1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
8 #define CPT_IQ_FC_LEN 128
9 #define CPT_IQ_GRP_LEN 16
11 #define CPT_IQ_NB_DESC_MULTIPLIER 40
13 /* The effective queue size to software is (CPT_LF_Q_SIZE[SIZE_DIV40] - 1 - 8).
15 * CPT requires 320 free entries (+8). And 40 entries are required for
16 * allowing CPT to discard packet when the queues are full (+1).
18 #define CPT_IQ_NB_DESC_SIZE_DIV40(nb_desc) \
19 (PLT_DIV_CEIL(nb_desc, CPT_IQ_NB_DESC_MULTIPLIER) + 1 + 8)
21 #define CPT_IQ_GRP_SIZE(nb_desc) \
22 (CPT_IQ_NB_DESC_SIZE_DIV40(nb_desc) * CPT_IQ_GRP_LEN)
24 #define CPT_LF_MAX_NB_DESC 128000
25 #define CPT_LF_DEFAULT_NB_DESC 1024
28 cpt_lf_misc_intr_enb_dis(struct roc_cpt_lf *lf, bool enb)
30 /* Enable all cpt lf error irqs except RQ_DISABLED and CQ_DISABLED */
32 plt_write64((BIT_ULL(6) | BIT_ULL(5) | BIT_ULL(3) | BIT_ULL(2) |
34 lf->rbase + CPT_LF_MISC_INT_ENA_W1S);
36 plt_write64((BIT_ULL(6) | BIT_ULL(5) | BIT_ULL(3) | BIT_ULL(2) |
38 lf->rbase + CPT_LF_MISC_INT_ENA_W1C);
42 cpt_lf_misc_irq(void *param)
44 struct roc_cpt_lf *lf = (struct roc_cpt_lf *)param;
45 struct dev *dev = lf->dev;
48 intr = plt_read64(lf->rbase + CPT_LF_MISC_INT);
52 plt_err("Err_irq=0x%" PRIx64 " pf=%d, vf=%d", intr, dev->pf, dev->vf);
54 /* Dump lf registers */
58 plt_write64(intr, lf->rbase + CPT_LF_MISC_INT);
62 cpt_lf_register_misc_irq(struct roc_cpt_lf *lf)
64 struct plt_pci_device *pci_dev = lf->pci_dev;
65 struct plt_intr_handle *handle;
68 handle = &pci_dev->intr_handle;
70 vec = lf->msixoff + CPT_LF_INT_VEC_MISC;
71 /* Clear err interrupt */
72 cpt_lf_misc_intr_enb_dis(lf, false);
73 /* Set used interrupt vectors */
74 rc = dev_irq_register(handle, cpt_lf_misc_irq, lf, vec);
75 /* Enable all dev interrupt except for RQ_DISABLED */
76 cpt_lf_misc_intr_enb_dis(lf, true);
82 cpt_lf_unregister_misc_irq(struct roc_cpt_lf *lf)
84 struct plt_pci_device *pci_dev = lf->pci_dev;
85 struct plt_intr_handle *handle;
88 handle = &pci_dev->intr_handle;
90 vec = lf->msixoff + CPT_LF_INT_VEC_MISC;
91 /* Clear err interrupt */
92 cpt_lf_misc_intr_enb_dis(lf, false);
93 dev_irq_unregister(handle, cpt_lf_misc_irq, lf, vec);
97 cpt_lf_done_intr_enb_dis(struct roc_cpt_lf *lf, bool enb)
100 plt_write64(0x1, lf->rbase + CPT_LF_DONE_INT_ENA_W1S);
102 plt_write64(0x1, lf->rbase + CPT_LF_DONE_INT_ENA_W1C);
106 cpt_lf_done_irq(void *param)
108 struct roc_cpt_lf *lf = param;
112 /* Read the number of completed requests */
113 intr = plt_read64(lf->rbase + CPT_LF_DONE);
117 done_wait = plt_read64(lf->rbase + CPT_LF_DONE_WAIT);
119 /* Acknowledge the number of completed requests */
120 plt_write64(intr, lf->rbase + CPT_LF_DONE_ACK);
122 plt_write64(done_wait, lf->rbase + CPT_LF_DONE_WAIT);
126 cpt_lf_register_done_irq(struct roc_cpt_lf *lf)
128 struct plt_pci_device *pci_dev = lf->pci_dev;
129 struct plt_intr_handle *handle;
132 handle = &pci_dev->intr_handle;
134 vec = lf->msixoff + CPT_LF_INT_VEC_DONE;
136 /* Clear done interrupt */
137 cpt_lf_done_intr_enb_dis(lf, false);
139 /* Set used interrupt vectors */
140 rc = dev_irq_register(handle, cpt_lf_done_irq, lf, vec);
142 /* Enable done interrupt */
143 cpt_lf_done_intr_enb_dis(lf, true);
149 cpt_lf_unregister_done_irq(struct roc_cpt_lf *lf)
151 struct plt_pci_device *pci_dev = lf->pci_dev;
152 struct plt_intr_handle *handle;
155 handle = &pci_dev->intr_handle;
157 vec = lf->msixoff + CPT_LF_INT_VEC_DONE;
159 /* Clear done interrupt */
160 cpt_lf_done_intr_enb_dis(lf, false);
161 dev_irq_unregister(handle, cpt_lf_done_irq, lf, vec);
165 cpt_lf_register_irqs(struct roc_cpt_lf *lf)
169 if (lf->msixoff == MSIX_VECTOR_INVALID) {
170 plt_err("Invalid CPTLF MSIX vector offset vector: 0x%x",
175 /* Register lf err interrupt */
176 rc = cpt_lf_register_misc_irq(lf);
178 plt_err("Error registering IRQs");
180 rc = cpt_lf_register_done_irq(lf);
182 plt_err("Error registering IRQs");
188 cpt_lf_unregister_irqs(struct roc_cpt_lf *lf)
190 cpt_lf_unregister_misc_irq(lf);
191 cpt_lf_unregister_done_irq(lf);
195 cpt_lf_dump(struct roc_cpt_lf *lf)
197 plt_cpt_dbg("CPT LF");
198 plt_cpt_dbg("RBASE: 0x%016" PRIx64, lf->rbase);
199 plt_cpt_dbg("LMT_BASE: 0x%016" PRIx64, lf->lmt_base);
200 plt_cpt_dbg("MSIXOFF: 0x%x", lf->msixoff);
201 plt_cpt_dbg("LF_ID: 0x%x", lf->lf_id);
202 plt_cpt_dbg("NB DESC: %d", lf->nb_desc);
203 plt_cpt_dbg("FC_ADDR: 0x%016" PRIx64, (uintptr_t)lf->fc_addr);
204 plt_cpt_dbg("CQ.VADDR: 0x%016" PRIx64, (uintptr_t)lf->iq_vaddr);
206 plt_cpt_dbg("CPT LF REG:");
207 plt_cpt_dbg("LF_CTL[0x%016llx]: 0x%016" PRIx64, CPT_LF_CTL,
208 plt_read64(lf->rbase + CPT_LF_CTL));
209 plt_cpt_dbg("LF_INPROG[0x%016llx]: 0x%016" PRIx64, CPT_LF_INPROG,
210 plt_read64(lf->rbase + CPT_LF_INPROG));
212 plt_cpt_dbg("Q_BASE[0x%016llx]: 0x%016" PRIx64, CPT_LF_Q_BASE,
213 plt_read64(lf->rbase + CPT_LF_Q_BASE));
214 plt_cpt_dbg("Q_SIZE[0x%016llx]: 0x%016" PRIx64, CPT_LF_Q_SIZE,
215 plt_read64(lf->rbase + CPT_LF_Q_SIZE));
216 plt_cpt_dbg("Q_INST_PTR[0x%016llx]: 0x%016" PRIx64, CPT_LF_Q_INST_PTR,
217 plt_read64(lf->rbase + CPT_LF_Q_INST_PTR));
218 plt_cpt_dbg("Q_GRP_PTR[0x%016llx]: 0x%016" PRIx64, CPT_LF_Q_GRP_PTR,
219 plt_read64(lf->rbase + CPT_LF_Q_GRP_PTR));
223 cpt_lf_outb_cfg(struct dev *dev, uint16_t sso_pf_func, uint16_t nix_pf_func,
224 uint8_t lf_id, bool ena)
226 struct cpt_inline_ipsec_cfg_msg *req;
227 struct mbox *mbox = dev->mbox;
229 req = mbox_alloc_msg_cpt_inline_ipsec_cfg(mbox);
233 req->dir = CPT_INLINE_OUTBOUND;
237 req->sso_pf_func = sso_pf_func;
238 req->nix_pf_func = nix_pf_func;
243 return mbox_process(mbox);
247 roc_cpt_inline_ipsec_cfg(struct dev *cpt_dev, uint8_t lf_id,
248 struct roc_nix *roc_nix)
250 bool ena = roc_nix ? true : false;
251 uint16_t nix_pf_func = 0;
252 uint16_t sso_pf_func = 0;
255 nix_pf_func = roc_nix_get_pf_func(roc_nix);
256 sso_pf_func = idev_sso_pffunc_get();
259 return cpt_lf_outb_cfg(cpt_dev, sso_pf_func, nix_pf_func, lf_id, ena);
263 roc_cpt_inline_ipsec_inb_cfg(struct roc_cpt *roc_cpt, uint16_t param1,
266 struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
267 struct cpt_rx_inline_lf_cfg_msg *req;
270 mbox = cpt->dev.mbox;
272 req = mbox_alloc_msg_cpt_rx_inline_lf_cfg(mbox);
276 req->sso_pf_func = idev_sso_pffunc_get();
277 req->param1 = param1;
278 req->param2 = param2;
280 return mbox_process(mbox);
284 roc_cpt_rxc_time_cfg(struct roc_cpt *roc_cpt, struct roc_cpt_rxc_time_cfg *cfg)
286 struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
287 struct cpt_rxc_time_cfg_req *req;
288 struct dev *dev = &cpt->dev;
290 req = mbox_alloc_msg_cpt_rxc_time_cfg(dev->mbox);
296 /* The step value is in microseconds. */
297 req->step = cfg->step;
299 /* The timeout will be: limit * step microseconds */
300 req->zombie_limit = cfg->zombie_limit;
301 req->zombie_thres = cfg->zombie_thres;
303 /* The timeout will be: limit * step microseconds */
304 req->active_limit = cfg->active_limit;
305 req->active_thres = cfg->active_thres;
307 return mbox_process(dev->mbox);
311 cpt_get_msix_offset(struct dev *dev, struct msix_offset_rsp **msix_rsp)
313 struct mbox *mbox = dev->mbox;
316 /* Get MSIX vector offsets */
317 mbox_alloc_msg_msix_offset(mbox);
318 rc = mbox_process_msg(mbox, (void *)msix_rsp);
324 cpt_lfs_attach(struct dev *dev, uint8_t blkaddr, bool modify, uint16_t nb_lf)
326 struct mbox *mbox = dev->mbox;
327 struct rsrc_attach_req *req;
329 if (blkaddr != RVU_BLOCK_ADDR_CPT0 && blkaddr != RVU_BLOCK_ADDR_CPT1)
333 req = mbox_alloc_msg_attach_resources(mbox);
338 req->modify = modify;
339 req->cpt_blkaddr = blkaddr;
341 return mbox_process(mbox);
345 cpt_lfs_detach(struct dev *dev)
347 struct mbox *mbox = dev->mbox;
348 struct rsrc_detach_req *req;
350 req = mbox_alloc_msg_detach_resources(mbox);
357 return mbox_process(mbox);
361 cpt_available_lfs_get(struct dev *dev, uint16_t *nb_lf)
363 struct mbox *mbox = dev->mbox;
364 struct free_rsrcs_rsp *rsp;
367 mbox_alloc_msg_free_rsrc_cnt(mbox);
369 rc = mbox_process_msg(mbox, (void *)&rsp);
378 cpt_lfs_alloc(struct dev *dev, uint8_t eng_grpmsk, uint8_t blkaddr,
381 struct cpt_lf_alloc_req_msg *req;
382 struct mbox *mbox = dev->mbox;
384 if (blkaddr != RVU_BLOCK_ADDR_CPT0 && blkaddr != RVU_BLOCK_ADDR_CPT1)
387 req = mbox_alloc_msg_cpt_lf_alloc(mbox);
388 req->nix_pf_func = 0;
389 if (inl_dev_sso && nix_inl_dev_pffunc_get())
390 req->sso_pf_func = nix_inl_dev_pffunc_get();
392 req->sso_pf_func = idev_sso_pffunc_get();
393 req->eng_grpmsk = eng_grpmsk;
394 req->blkaddr = blkaddr;
396 return mbox_process(mbox);
400 cpt_lfs_free(struct dev *dev)
402 mbox_alloc_msg_cpt_lf_free(dev->mbox);
404 return mbox_process(dev->mbox);
408 cpt_hardware_caps_get(struct dev *dev, union cpt_eng_caps *hw_caps)
410 struct cpt_caps_rsp_msg *rsp;
413 mbox_alloc_msg_cpt_caps_get(dev->mbox);
415 ret = mbox_process_msg(dev->mbox, (void *)&rsp);
419 mbox_memcpy(hw_caps, rsp->eng_caps,
420 sizeof(union cpt_eng_caps) * CPT_MAX_ENG_TYPES);
426 cpt_lf_iq_mem_calc(uint32_t nb_desc)
430 /* Space for instruction group memory */
431 len = CPT_IQ_GRP_SIZE(nb_desc);
434 len = PLT_ALIGN(len, ROC_ALIGN);
437 len += CPT_IQ_FC_LEN;
439 /* For instruction queues */
440 len += PLT_ALIGN(CPT_IQ_NB_DESC_SIZE_DIV40(nb_desc) *
441 CPT_IQ_NB_DESC_MULTIPLIER *
442 sizeof(struct cpt_inst_s),
449 cpt_iq_init(struct roc_cpt_lf *lf)
451 union cpt_lf_q_size lf_q_size = {.u = 0x0};
452 union cpt_lf_q_base lf_q_base = {.u = 0x0};
455 lf->io_addr = lf->rbase + CPT_LF_NQX(0);
457 /* Disable command queue */
458 roc_cpt_iq_disable(lf);
460 /* Set command queue base address */
461 addr = (uintptr_t)lf->iq_vaddr +
462 PLT_ALIGN(CPT_IQ_GRP_SIZE(lf->nb_desc), ROC_ALIGN);
466 plt_write64(lf_q_base.u, lf->rbase + CPT_LF_Q_BASE);
468 /* Set command queue size */
469 lf_q_size.s.size_div40 = CPT_IQ_NB_DESC_SIZE_DIV40(lf->nb_desc);
470 plt_write64(lf_q_size.u, lf->rbase + CPT_LF_Q_SIZE);
472 lf->fc_addr = (uint64_t *)addr;
473 lf->fc_hyst_bits = plt_log2_u32(lf->nb_desc) / 2;
474 lf->fc_thresh = lf->nb_desc - (lf->nb_desc % (1 << lf->fc_hyst_bits));
478 roc_cpt_dev_configure(struct roc_cpt *roc_cpt, int nb_lf)
480 struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
481 uint8_t blkaddr = RVU_BLOCK_ADDR_CPT0;
482 struct msix_offset_rsp *rsp;
486 /* Request LF resources */
487 rc = cpt_lfs_attach(&cpt->dev, blkaddr, true, nb_lf);
491 eng_grpmsk = (1 << roc_cpt->eng_grp[CPT_ENG_TYPE_AE]) |
492 (1 << roc_cpt->eng_grp[CPT_ENG_TYPE_SE]) |
493 (1 << roc_cpt->eng_grp[CPT_ENG_TYPE_IE]);
495 rc = cpt_lfs_alloc(&cpt->dev, eng_grpmsk, blkaddr, false);
499 rc = cpt_get_msix_offset(&cpt->dev, &rsp);
503 for (i = 0; i < nb_lf; i++)
504 cpt->lf_msix_off[i] =
505 (cpt->lf_blkaddr[i] == RVU_BLOCK_ADDR_CPT1) ?
506 rsp->cpt1_lf_msixoff[i] :
507 rsp->cptlf_msixoff[i];
509 roc_cpt->nb_lf = nb_lf;
514 cpt_lfs_free(&cpt->dev);
516 cpt_lfs_detach(&cpt->dev);
521 cpt_get_blkaddr(struct dev *dev)
526 /* Reading the discovery register to know which CPT is the LF
527 * attached to. Assume CPT LF's of only one block are attached
531 off = RVU_VF_BLOCK_ADDRX_DISC(RVU_BLOCK_ADDR_CPT1);
533 off = RVU_PF_BLOCK_ADDRX_DISC(RVU_BLOCK_ADDR_CPT1);
535 reg = plt_read64(dev->bar2 + off);
537 return reg & 0x1FFULL ? RVU_BLOCK_ADDR_CPT1 : RVU_BLOCK_ADDR_CPT0;
541 cpt_lf_init(struct roc_cpt_lf *lf)
543 struct dev *dev = lf->dev;
548 if (lf->nb_desc == 0 || lf->nb_desc > CPT_LF_MAX_NB_DESC)
549 lf->nb_desc = CPT_LF_DEFAULT_NB_DESC;
551 /* Allocate memory for instruction queue for CPT LF. */
552 iq_mem = plt_zmalloc(cpt_lf_iq_mem_calc(lf->nb_desc), ROC_ALIGN);
555 plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
557 blkaddr = cpt_get_blkaddr(dev);
558 lf->rbase = dev->bar2 + ((blkaddr << 20) | (lf->lf_id << 12));
559 lf->iq_vaddr = iq_mem;
560 lf->lmt_base = dev->lmt_base;
561 lf->pf_func = dev->pf_func;
563 /* Initialize instruction queue */
566 rc = cpt_lf_register_irqs(lf);
573 roc_cpt_iq_disable(lf);
579 roc_cpt_lf_init(struct roc_cpt *roc_cpt, struct roc_cpt_lf *lf)
581 struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
585 lf->roc_cpt = roc_cpt;
586 lf->msixoff = cpt->lf_msix_off[lf->lf_id];
587 lf->pci_dev = cpt->pci_dev;
589 rc = cpt_lf_init(lf);
593 /* LF init successful */
594 roc_cpt->lf[lf->lf_id] = lf;
599 roc_cpt_dev_init(struct roc_cpt *roc_cpt)
601 struct plt_pci_device *pci_dev;
602 uint16_t nb_lf_avail;
607 if (roc_cpt == NULL || roc_cpt->pci_dev == NULL)
610 PLT_STATIC_ASSERT(sizeof(struct cpt) <= ROC_CPT_MEM_SZ);
612 cpt = roc_cpt_to_cpt_priv(roc_cpt);
613 memset(cpt, 0, sizeof(*cpt));
614 pci_dev = roc_cpt->pci_dev;
617 /* Initialize device */
618 rc = dev_init(dev, pci_dev);
620 plt_err("Failed to init roc device");
624 cpt->pci_dev = pci_dev;
625 roc_cpt->lmt_base = dev->lmt_base;
627 rc = cpt_hardware_caps_get(dev, roc_cpt->hw_caps);
629 plt_err("Could not determine hardware capabilities");
633 rc = cpt_available_lfs_get(&cpt->dev, &nb_lf_avail);
635 plt_err("Could not get available lfs");
639 /* Reserve 1 CPT LF for inline inbound */
640 nb_lf_avail = PLT_MIN(nb_lf_avail, (uint16_t)(ROC_CPT_MAX_LFS - 1));
642 roc_cpt->nb_lf_avail = nb_lf_avail;
644 dev->roc_cpt = roc_cpt;
646 /* Set it to idev if not already present */
647 if (!roc_idev_cpt_get())
648 roc_idev_cpt_set(roc_cpt);
657 roc_cpt_lf_ctx_flush(struct roc_cpt_lf *lf, uint64_t cptr)
659 union cpt_lf_ctx_flush reg;
665 reg.s.pf_func = lf->pf_func;
669 plt_write64(reg.u, lf->rbase + CPT_LF_CTX_FLUSH);
675 cpt_lf_fini(struct roc_cpt_lf *lf)
677 /* Unregister IRQ's */
678 cpt_lf_unregister_irqs(lf);
681 roc_cpt_iq_disable(lf);
684 plt_free(lf->iq_vaddr);
689 roc_cpt_lf_fini(struct roc_cpt_lf *lf)
693 lf->roc_cpt->lf[lf->lf_id] = NULL;
698 roc_cpt_dev_fini(struct roc_cpt *roc_cpt)
700 struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
705 /* Remove idev references */
706 if (roc_idev_cpt_get() == roc_cpt)
707 roc_idev_cpt_set(NULL);
709 roc_cpt->nb_lf_avail = 0;
711 roc_cpt->lmt_base = 0;
713 return dev_fini(&cpt->dev, cpt->pci_dev);
717 roc_cpt_dev_clear(struct roc_cpt *roc_cpt)
719 struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
725 for (i = 0; i < roc_cpt->nb_lf; i++)
726 cpt->lf_msix_off[i] = 0;
730 cpt_lfs_free(&cpt->dev);
732 cpt_lfs_detach(&cpt->dev);
736 roc_cpt_eng_grp_add(struct roc_cpt *roc_cpt, enum cpt_eng_type eng_type)
738 struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
739 struct dev *dev = &cpt->dev;
740 struct cpt_eng_grp_req *req;
741 struct cpt_eng_grp_rsp *rsp;
744 req = mbox_alloc_msg_cpt_eng_grp_get(dev->mbox);
749 case CPT_ENG_TYPE_AE:
750 case CPT_ENG_TYPE_SE:
751 case CPT_ENG_TYPE_IE:
757 req->eng_type = eng_type;
758 ret = mbox_process_msg(dev->mbox, (void *)&rsp);
762 if (rsp->eng_grp_num > 8) {
763 plt_err("Invalid CPT engine group");
767 roc_cpt->eng_grp[eng_type] = rsp->eng_grp_num;
769 return rsp->eng_grp_num;
773 roc_cpt_iq_disable(struct roc_cpt_lf *lf)
775 union cpt_lf_ctl lf_ctl = {.u = 0x0};
776 union cpt_lf_q_grp_ptr grp_ptr;
777 union cpt_lf_inprog lf_inprog;
781 /* Disable instructions enqueuing */
782 plt_write64(lf_ctl.u, lf->rbase + CPT_LF_CTL);
784 /* Wait for instruction queue to become empty */
786 lf_inprog.u = plt_read64(lf->rbase + CPT_LF_INPROG);
787 if (!lf_inprog.s.inflight)
792 plt_err("CPT LF %d is still busy", lf->lf_id);
798 /* Disable executions in the LF's queue.
799 * The queue should be empty at this point
801 lf_inprog.s.eena = 0x0;
802 plt_write64(lf_inprog.u, lf->rbase + CPT_LF_INPROG);
804 /* Wait for instruction queue to become empty */
807 lf_inprog.u = plt_read64(lf->rbase + CPT_LF_INPROG);
808 if (lf_inprog.s.grb_partial)
812 grp_ptr.u = plt_read64(lf->rbase + CPT_LF_Q_GRP_PTR);
813 } while ((cnt < 10) && (grp_ptr.s.nq_ptr != grp_ptr.s.dq_ptr));
817 lf_inprog.u = plt_read64(lf->rbase + CPT_LF_INPROG);
818 if ((lf_inprog.s.inflight == 0) && (lf_inprog.s.gwb_cnt < 40) &&
819 ((lf_inprog.s.grb_cnt == 0) || (lf_inprog.s.grb_cnt == 40)))
827 roc_cpt_iq_enable(struct roc_cpt_lf *lf)
829 union cpt_lf_inprog lf_inprog;
830 union cpt_lf_ctl lf_ctl;
832 /* Disable command queue */
833 roc_cpt_iq_disable(lf);
835 /* Enable instruction queue enqueuing */
836 lf_ctl.u = plt_read64(lf->rbase + CPT_LF_CTL);
839 lf_ctl.s.fc_up_crossing = 0;
840 lf_ctl.s.fc_hyst_bits = lf->fc_hyst_bits;
841 plt_write64(lf_ctl.u, lf->rbase + CPT_LF_CTL);
843 /* Enable command queue execution */
844 lf_inprog.u = plt_read64(lf->rbase + CPT_LF_INPROG);
845 lf_inprog.s.eena = 1;
846 plt_write64(lf_inprog.u, lf->rbase + CPT_LF_INPROG);
852 roc_cpt_lmtline_init(struct roc_cpt *roc_cpt, struct roc_cpt_lmtline *lmtline,
855 struct roc_cpt_lf *lf;
857 lf = roc_cpt->lf[lf_id];
861 lmtline->io_addr = lf->io_addr;
862 if (roc_model_is_cn10k())
863 lmtline->io_addr |= ROC_CN10K_CPT_INST_DW_M1 << 4;
865 lmtline->fc_addr = lf->fc_addr;
866 lmtline->lmt_base = lf->lmt_base;