1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
8 #define CPT_IQ_FC_LEN 128
9 #define CPT_IQ_GRP_LEN 16
11 #define CPT_IQ_NB_DESC_MULTIPLIER 40
13 /* The effective queue size to software is (CPT_LF_Q_SIZE[SIZE_DIV40] - 1 - 8).
15 * CPT requires 320 free entries (+8). And 40 entries are required for
16 * allowing CPT to discard packet when the queues are full (+1).
18 #define CPT_IQ_NB_DESC_SIZE_DIV40(nb_desc) \
19 (PLT_DIV_CEIL(nb_desc, CPT_IQ_NB_DESC_MULTIPLIER) + 1 + 8)
21 #define CPT_IQ_GRP_SIZE(nb_desc) \
22 (CPT_IQ_NB_DESC_SIZE_DIV40(nb_desc) * CPT_IQ_GRP_LEN)
24 #define CPT_LF_MAX_NB_DESC 128000
25 #define CPT_LF_DEFAULT_NB_DESC 1024
28 cpt_lf_misc_intr_enb_dis(struct roc_cpt_lf *lf, bool enb)
30 /* Enable all cpt lf error irqs except RQ_DISABLED and CQ_DISABLED */
32 plt_write64((BIT_ULL(6) | BIT_ULL(5) | BIT_ULL(3) | BIT_ULL(2) |
34 lf->rbase + CPT_LF_MISC_INT_ENA_W1S);
36 plt_write64((BIT_ULL(6) | BIT_ULL(5) | BIT_ULL(3) | BIT_ULL(2) |
38 lf->rbase + CPT_LF_MISC_INT_ENA_W1C);
42 cpt_lf_misc_irq(void *param)
44 struct roc_cpt_lf *lf = (struct roc_cpt_lf *)param;
45 struct dev *dev = lf->dev;
48 intr = plt_read64(lf->rbase + CPT_LF_MISC_INT);
52 plt_err("Err_irq=0x%" PRIx64 " pf=%d, vf=%d", intr, dev->pf, dev->vf);
55 plt_write64(intr, lf->rbase + CPT_LF_MISC_INT);
59 cpt_lf_register_misc_irq(struct roc_cpt_lf *lf)
61 struct plt_pci_device *pci_dev = lf->pci_dev;
62 struct plt_intr_handle *handle;
65 handle = &pci_dev->intr_handle;
67 vec = lf->msixoff + CPT_LF_INT_VEC_MISC;
68 /* Clear err interrupt */
69 cpt_lf_misc_intr_enb_dis(lf, false);
70 /* Set used interrupt vectors */
71 rc = dev_irq_register(handle, cpt_lf_misc_irq, lf, vec);
72 /* Enable all dev interrupt except for RQ_DISABLED */
73 cpt_lf_misc_intr_enb_dis(lf, true);
79 cpt_lf_unregister_misc_irq(struct roc_cpt_lf *lf)
81 struct plt_pci_device *pci_dev = lf->pci_dev;
82 struct plt_intr_handle *handle;
85 handle = &pci_dev->intr_handle;
87 vec = lf->msixoff + CPT_LF_INT_VEC_MISC;
88 /* Clear err interrupt */
89 cpt_lf_misc_intr_enb_dis(lf, false);
90 dev_irq_unregister(handle, cpt_lf_misc_irq, lf, vec);
94 cpt_lf_done_intr_enb_dis(struct roc_cpt_lf *lf, bool enb)
97 plt_write64(0x1, lf->rbase + CPT_LF_DONE_INT_ENA_W1S);
99 plt_write64(0x1, lf->rbase + CPT_LF_DONE_INT_ENA_W1C);
103 cpt_lf_done_irq(void *param)
105 struct roc_cpt_lf *lf = param;
109 /* Read the number of completed requests */
110 intr = plt_read64(lf->rbase + CPT_LF_DONE);
114 done_wait = plt_read64(lf->rbase + CPT_LF_DONE_WAIT);
116 /* Acknowledge the number of completed requests */
117 plt_write64(intr, lf->rbase + CPT_LF_DONE_ACK);
119 plt_write64(done_wait, lf->rbase + CPT_LF_DONE_WAIT);
123 cpt_lf_register_done_irq(struct roc_cpt_lf *lf)
125 struct plt_pci_device *pci_dev = lf->pci_dev;
126 struct plt_intr_handle *handle;
129 handle = &pci_dev->intr_handle;
131 vec = lf->msixoff + CPT_LF_INT_VEC_DONE;
133 /* Clear done interrupt */
134 cpt_lf_done_intr_enb_dis(lf, false);
136 /* Set used interrupt vectors */
137 rc = dev_irq_register(handle, cpt_lf_done_irq, lf, vec);
139 /* Enable done interrupt */
140 cpt_lf_done_intr_enb_dis(lf, true);
146 cpt_lf_unregister_done_irq(struct roc_cpt_lf *lf)
148 struct plt_pci_device *pci_dev = lf->pci_dev;
149 struct plt_intr_handle *handle;
152 handle = &pci_dev->intr_handle;
154 vec = lf->msixoff + CPT_LF_INT_VEC_DONE;
156 /* Clear done interrupt */
157 cpt_lf_done_intr_enb_dis(lf, false);
158 dev_irq_unregister(handle, cpt_lf_done_irq, lf, vec);
162 cpt_lf_register_irqs(struct roc_cpt_lf *lf)
166 if (lf->msixoff == MSIX_VECTOR_INVALID) {
167 plt_err("Invalid CPTLF MSIX vector offset vector: 0x%x",
172 /* Register lf err interrupt */
173 rc = cpt_lf_register_misc_irq(lf);
175 plt_err("Error registering IRQs");
177 rc = cpt_lf_register_done_irq(lf);
179 plt_err("Error registering IRQs");
185 cpt_lf_unregister_irqs(struct roc_cpt_lf *lf)
187 cpt_lf_unregister_misc_irq(lf);
188 cpt_lf_unregister_done_irq(lf);
192 roc_cpt_rxc_time_cfg(struct roc_cpt *roc_cpt, struct roc_cpt_rxc_time_cfg *cfg)
194 struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
195 struct cpt_rxc_time_cfg_req *req;
196 struct dev *dev = &cpt->dev;
198 req = mbox_alloc_msg_cpt_rxc_time_cfg(dev->mbox);
204 /* The step value is in microseconds. */
205 req->step = cfg->step;
207 /* The timeout will be: limit * step microseconds */
208 req->zombie_limit = cfg->zombie_limit;
209 req->zombie_thres = cfg->zombie_thres;
211 /* The timeout will be: limit * step microseconds */
212 req->active_limit = cfg->active_limit;
213 req->active_thres = cfg->active_thres;
215 return mbox_process(dev->mbox);
219 cpt_get_msix_offset(struct dev *dev, struct msix_offset_rsp **msix_rsp)
221 struct mbox *mbox = dev->mbox;
224 /* Get MSIX vector offsets */
225 mbox_alloc_msg_msix_offset(mbox);
226 rc = mbox_process_msg(mbox, (void *)msix_rsp);
232 cpt_lfs_attach(struct dev *dev, uint8_t blkaddr, bool modify, uint16_t nb_lf)
234 struct mbox *mbox = dev->mbox;
235 struct rsrc_attach_req *req;
237 if (blkaddr != RVU_BLOCK_ADDR_CPT0 && blkaddr != RVU_BLOCK_ADDR_CPT1)
241 req = mbox_alloc_msg_attach_resources(mbox);
246 req->modify = modify;
247 req->cpt_blkaddr = blkaddr;
249 return mbox_process(mbox);
253 cpt_lfs_detach(struct dev *dev)
255 struct mbox *mbox = dev->mbox;
256 struct rsrc_detach_req *req;
258 req = mbox_alloc_msg_detach_resources(mbox);
265 return mbox_process(mbox);
269 cpt_available_lfs_get(struct dev *dev, uint16_t *nb_lf)
271 struct mbox *mbox = dev->mbox;
272 struct free_rsrcs_rsp *rsp;
275 mbox_alloc_msg_free_rsrc_cnt(mbox);
277 rc = mbox_process_msg(mbox, (void *)&rsp);
286 cpt_lfs_alloc(struct dev *dev, uint8_t eng_grpmsk, uint8_t blkaddr,
289 struct cpt_lf_alloc_req_msg *req;
290 struct mbox *mbox = dev->mbox;
292 if (blkaddr != RVU_BLOCK_ADDR_CPT0 && blkaddr != RVU_BLOCK_ADDR_CPT1)
295 PLT_SET_USED(inl_dev_sso);
297 req = mbox_alloc_msg_cpt_lf_alloc(mbox);
298 req->nix_pf_func = 0;
299 req->sso_pf_func = idev_sso_pffunc_get();
300 req->eng_grpmsk = eng_grpmsk;
301 req->blkaddr = blkaddr;
303 return mbox_process(mbox);
307 cpt_lfs_free(struct dev *dev)
309 mbox_alloc_msg_cpt_lf_free(dev->mbox);
311 return mbox_process(dev->mbox);
315 cpt_hardware_caps_get(struct dev *dev, union cpt_eng_caps *hw_caps)
317 struct cpt_caps_rsp_msg *rsp;
320 mbox_alloc_msg_cpt_caps_get(dev->mbox);
322 ret = mbox_process_msg(dev->mbox, (void *)&rsp);
326 mbox_memcpy(hw_caps, rsp->eng_caps,
327 sizeof(union cpt_eng_caps) * CPT_MAX_ENG_TYPES);
333 cpt_lf_iq_mem_calc(uint32_t nb_desc)
337 /* Space for instruction group memory */
338 len = CPT_IQ_GRP_SIZE(nb_desc);
341 len = PLT_ALIGN(len, ROC_ALIGN);
344 len += CPT_IQ_FC_LEN;
346 /* For instruction queues */
347 len += CPT_IQ_NB_DESC_SIZE_DIV40(nb_desc) * CPT_IQ_NB_DESC_MULTIPLIER *
348 sizeof(struct cpt_inst_s);
354 cpt_iq_init(struct roc_cpt_lf *lf)
356 union cpt_lf_q_size lf_q_size = {.u = 0x0};
357 union cpt_lf_q_base lf_q_base = {.u = 0x0};
358 union cpt_lf_inprog lf_inprog;
359 union cpt_lf_ctl lf_ctl;
362 lf->io_addr = lf->rbase + CPT_LF_NQX(0);
364 /* Disable command queue */
365 roc_cpt_iq_disable(lf);
367 /* Set command queue base address */
368 addr = (uintptr_t)lf->iq_vaddr +
369 PLT_ALIGN(CPT_IQ_GRP_SIZE(lf->nb_desc), ROC_ALIGN);
373 plt_write64(lf_q_base.u, lf->rbase + CPT_LF_Q_BASE);
375 /* Set command queue size */
376 lf_q_size.s.size_div40 = CPT_IQ_NB_DESC_SIZE_DIV40(lf->nb_desc);
377 plt_write64(lf_q_size.u, lf->rbase + CPT_LF_Q_SIZE);
379 /* Enable command queue execution */
380 lf_inprog.u = plt_read64(lf->rbase + CPT_LF_INPROG);
381 lf_inprog.s.eena = 1;
382 plt_write64(lf_inprog.u, lf->rbase + CPT_LF_INPROG);
384 /* Enable instruction queue enqueuing */
385 lf_ctl.u = plt_read64(lf->rbase + CPT_LF_CTL);
388 lf_ctl.s.fc_up_crossing = 1;
389 lf_ctl.s.fc_hyst_bits = CPT_FC_NUM_HYST_BITS;
390 plt_write64(lf_ctl.u, lf->rbase + CPT_LF_CTL);
392 lf->fc_addr = (uint64_t *)addr;
396 roc_cpt_dev_configure(struct roc_cpt *roc_cpt, int nb_lf)
398 struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
399 uint8_t blkaddr = RVU_BLOCK_ADDR_CPT0;
400 struct msix_offset_rsp *rsp;
404 /* Request LF resources */
405 rc = cpt_lfs_attach(&cpt->dev, blkaddr, false, nb_lf);
409 eng_grpmsk = (1 << roc_cpt->eng_grp[CPT_ENG_TYPE_AE]) |
410 (1 << roc_cpt->eng_grp[CPT_ENG_TYPE_SE]) |
411 (1 << roc_cpt->eng_grp[CPT_ENG_TYPE_IE]);
413 rc = cpt_lfs_alloc(&cpt->dev, eng_grpmsk, blkaddr, false);
417 rc = cpt_get_msix_offset(&cpt->dev, &rsp);
421 for (i = 0; i < nb_lf; i++)
422 cpt->lf_msix_off[i] =
423 (cpt->lf_blkaddr[i] == RVU_BLOCK_ADDR_CPT1) ?
424 rsp->cpt1_lf_msixoff[i] :
425 rsp->cptlf_msixoff[i];
427 roc_cpt->nb_lf = nb_lf;
432 cpt_lfs_free(&cpt->dev);
434 cpt_lfs_detach(&cpt->dev);
439 cpt_get_blkaddr(struct dev *dev)
444 /* Reading the discovery register to know which CPT is the LF
445 * attached to. Assume CPT LF's of only one block are attached
449 off = RVU_VF_BLOCK_ADDRX_DISC(RVU_BLOCK_ADDR_CPT1);
451 off = RVU_PF_BLOCK_ADDRX_DISC(RVU_BLOCK_ADDR_CPT1);
453 reg = plt_read64(dev->bar2 + off);
455 return reg & 0x1FFULL ? RVU_BLOCK_ADDR_CPT1 : RVU_BLOCK_ADDR_CPT0;
459 cpt_lf_init(struct roc_cpt_lf *lf)
461 struct dev *dev = lf->dev;
466 if (lf->nb_desc == 0 || lf->nb_desc > CPT_LF_MAX_NB_DESC)
467 lf->nb_desc = CPT_LF_DEFAULT_NB_DESC;
469 /* Allocate memory for instruction queue for CPT LF. */
470 iq_mem = plt_zmalloc(cpt_lf_iq_mem_calc(lf->nb_desc), ROC_ALIGN);
474 blkaddr = cpt_get_blkaddr(dev);
475 lf->rbase = dev->bar2 + ((blkaddr << 20) | (lf->lf_id << 12));
476 lf->iq_vaddr = iq_mem;
477 lf->lmt_base = dev->lmt_base;
478 lf->pf_func = dev->pf_func;
480 /* Initialize instruction queue */
483 rc = cpt_lf_register_irqs(lf);
490 roc_cpt_iq_disable(lf);
496 roc_cpt_lf_init(struct roc_cpt *roc_cpt, struct roc_cpt_lf *lf)
498 struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
502 lf->roc_cpt = roc_cpt;
503 lf->msixoff = cpt->lf_msix_off[lf->lf_id];
504 lf->pci_dev = cpt->pci_dev;
506 rc = cpt_lf_init(lf);
510 /* LF init successful */
511 roc_cpt->lf[lf->lf_id] = lf;
516 roc_cpt_dev_init(struct roc_cpt *roc_cpt)
518 struct plt_pci_device *pci_dev;
519 uint16_t nb_lf_avail;
524 if (roc_cpt == NULL || roc_cpt->pci_dev == NULL)
527 PLT_STATIC_ASSERT(sizeof(struct cpt) <= ROC_CPT_MEM_SZ);
529 cpt = roc_cpt_to_cpt_priv(roc_cpt);
530 memset(cpt, 0, sizeof(*cpt));
531 pci_dev = roc_cpt->pci_dev;
534 /* Initialize device */
535 rc = dev_init(dev, pci_dev);
537 plt_err("Failed to init roc device");
541 cpt->pci_dev = pci_dev;
542 roc_cpt->lmt_base = dev->lmt_base;
544 rc = cpt_hardware_caps_get(dev, roc_cpt->hw_caps);
546 plt_err("Could not determine hardware capabilities");
550 rc = cpt_available_lfs_get(&cpt->dev, &nb_lf_avail);
552 plt_err("Could not get available lfs");
556 /* Reserve 1 CPT LF for inline inbound */
557 nb_lf_avail = PLT_MIN(nb_lf_avail, ROC_CPT_MAX_LFS - 1);
559 roc_cpt->nb_lf_avail = nb_lf_avail;
561 dev->roc_cpt = roc_cpt;
563 /* Set it to idev if not already present */
564 if (!roc_idev_cpt_get())
565 roc_idev_cpt_set(roc_cpt);
574 cpt_lf_fini(struct roc_cpt_lf *lf)
576 /* Unregister IRQ's */
577 cpt_lf_unregister_irqs(lf);
580 roc_cpt_iq_disable(lf);
583 plt_free(lf->iq_vaddr);
588 roc_cpt_lf_fini(struct roc_cpt_lf *lf)
592 lf->roc_cpt->lf[lf->lf_id] = NULL;
597 roc_cpt_dev_fini(struct roc_cpt *roc_cpt)
599 struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
604 /* Remove idev references */
605 if (roc_idev_cpt_get() == roc_cpt)
606 roc_idev_cpt_set(NULL);
608 roc_cpt->nb_lf_avail = 0;
610 roc_cpt->lmt_base = 0;
612 return dev_fini(&cpt->dev, cpt->pci_dev);
616 roc_cpt_dev_clear(struct roc_cpt *roc_cpt)
618 struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
624 for (i = 0; i < roc_cpt->nb_lf; i++)
625 cpt->lf_msix_off[i] = 0;
629 cpt_lfs_free(&cpt->dev);
631 cpt_lfs_detach(&cpt->dev);
635 roc_cpt_eng_grp_add(struct roc_cpt *roc_cpt, enum cpt_eng_type eng_type)
637 struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
638 struct dev *dev = &cpt->dev;
639 struct cpt_eng_grp_req *req;
640 struct cpt_eng_grp_rsp *rsp;
643 req = mbox_alloc_msg_cpt_eng_grp_get(dev->mbox);
648 case CPT_ENG_TYPE_AE:
649 case CPT_ENG_TYPE_SE:
650 case CPT_ENG_TYPE_IE:
656 req->eng_type = eng_type;
657 ret = mbox_process_msg(dev->mbox, (void *)&rsp);
661 if (rsp->eng_grp_num > 8) {
662 plt_err("Invalid CPT engine group");
666 roc_cpt->eng_grp[eng_type] = rsp->eng_grp_num;
668 return rsp->eng_grp_num;
672 roc_cpt_iq_disable(struct roc_cpt_lf *lf)
674 union cpt_lf_ctl lf_ctl = {.u = 0x0};
675 union cpt_lf_inprog lf_inprog;
678 /* Disable instructions enqueuing */
679 plt_write64(lf_ctl.u, lf->rbase + CPT_LF_CTL);
681 /* Wait for instruction queue to become empty */
683 lf_inprog.u = plt_read64(lf->rbase + CPT_LF_INPROG);
684 if (!lf_inprog.s.inflight)
689 plt_err("CPT LF %d is still busy", lf->lf_id);
695 /* Disable executions in the LF's queue.
696 * The queue should be empty at this point
698 lf_inprog.s.eena = 0x0;
699 plt_write64(lf_inprog.u, lf->rbase + CPT_LF_INPROG);