1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
9 roc_cpt_rxc_time_cfg(struct roc_cpt *roc_cpt, struct roc_cpt_rxc_time_cfg *cfg)
11 struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
12 struct cpt_rxc_time_cfg_req *req;
13 struct dev *dev = &cpt->dev;
15 req = mbox_alloc_msg_cpt_rxc_time_cfg(dev->mbox);
21 /* The step value is in microseconds. */
22 req->step = cfg->step;
24 /* The timeout will be: limit * step microseconds */
25 req->zombie_limit = cfg->zombie_limit;
26 req->zombie_thres = cfg->zombie_thres;
28 /* The timeout will be: limit * step microseconds */
29 req->active_limit = cfg->active_limit;
30 req->active_thres = cfg->active_thres;
32 return mbox_process(dev->mbox);
36 cpt_get_msix_offset(struct dev *dev, struct msix_offset_rsp **msix_rsp)
38 struct mbox *mbox = dev->mbox;
41 /* Get MSIX vector offsets */
42 mbox_alloc_msg_msix_offset(mbox);
43 rc = mbox_process_msg(mbox, (void *)msix_rsp);
49 cpt_lfs_attach(struct dev *dev, uint8_t blkaddr, bool modify, uint16_t nb_lf)
51 struct mbox *mbox = dev->mbox;
52 struct rsrc_attach_req *req;
54 if (blkaddr != RVU_BLOCK_ADDR_CPT0 && blkaddr != RVU_BLOCK_ADDR_CPT1)
58 req = mbox_alloc_msg_attach_resources(mbox);
64 req->cpt_blkaddr = blkaddr;
66 return mbox_process(mbox);
70 cpt_lfs_detach(struct dev *dev)
72 struct mbox *mbox = dev->mbox;
73 struct rsrc_detach_req *req;
75 req = mbox_alloc_msg_detach_resources(mbox);
82 return mbox_process(mbox);
86 cpt_available_lfs_get(struct dev *dev, uint16_t *nb_lf)
88 struct mbox *mbox = dev->mbox;
89 struct free_rsrcs_rsp *rsp;
92 mbox_alloc_msg_free_rsrc_cnt(mbox);
94 rc = mbox_process_msg(mbox, (void *)&rsp);
103 cpt_lfs_alloc(struct dev *dev, uint8_t eng_grpmsk, uint8_t blkaddr,
106 struct cpt_lf_alloc_req_msg *req;
107 struct mbox *mbox = dev->mbox;
109 if (blkaddr != RVU_BLOCK_ADDR_CPT0 && blkaddr != RVU_BLOCK_ADDR_CPT1)
112 PLT_SET_USED(inl_dev_sso);
114 req = mbox_alloc_msg_cpt_lf_alloc(mbox);
115 req->nix_pf_func = 0;
116 req->sso_pf_func = idev_sso_pffunc_get();
117 req->eng_grpmsk = eng_grpmsk;
118 req->blkaddr = blkaddr;
120 return mbox_process(mbox);
124 cpt_lfs_free(struct dev *dev)
126 mbox_alloc_msg_cpt_lf_free(dev->mbox);
128 return mbox_process(dev->mbox);
132 cpt_hardware_caps_get(struct dev *dev, union cpt_eng_caps *hw_caps)
134 struct cpt_caps_rsp_msg *rsp;
137 mbox_alloc_msg_cpt_caps_get(dev->mbox);
139 ret = mbox_process_msg(dev->mbox, (void *)&rsp);
143 mbox_memcpy(hw_caps, rsp->eng_caps,
144 sizeof(union cpt_eng_caps) * CPT_MAX_ENG_TYPES);
150 roc_cpt_dev_configure(struct roc_cpt *roc_cpt, int nb_lf)
152 struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
153 uint8_t blkaddr = RVU_BLOCK_ADDR_CPT0;
154 struct msix_offset_rsp *rsp;
158 /* Request LF resources */
159 rc = cpt_lfs_attach(&cpt->dev, blkaddr, false, nb_lf);
163 eng_grpmsk = (1 << roc_cpt->eng_grp[CPT_ENG_TYPE_AE]) |
164 (1 << roc_cpt->eng_grp[CPT_ENG_TYPE_SE]) |
165 (1 << roc_cpt->eng_grp[CPT_ENG_TYPE_IE]);
167 rc = cpt_lfs_alloc(&cpt->dev, eng_grpmsk, blkaddr, false);
171 rc = cpt_get_msix_offset(&cpt->dev, &rsp);
175 for (i = 0; i < nb_lf; i++)
176 cpt->lf_msix_off[i] =
177 (cpt->lf_blkaddr[i] == RVU_BLOCK_ADDR_CPT1) ?
178 rsp->cpt1_lf_msixoff[i] :
179 rsp->cptlf_msixoff[i];
181 roc_cpt->nb_lf = nb_lf;
186 cpt_lfs_free(&cpt->dev);
188 cpt_lfs_detach(&cpt->dev);
193 cpt_get_blkaddr(struct dev *dev)
198 /* Reading the discovery register to know which CPT is the LF
199 * attached to. Assume CPT LF's of only one block are attached
203 off = RVU_VF_BLOCK_ADDRX_DISC(RVU_BLOCK_ADDR_CPT1);
205 off = RVU_PF_BLOCK_ADDRX_DISC(RVU_BLOCK_ADDR_CPT1);
207 reg = plt_read64(dev->bar2 + off);
209 return reg & 0x1FFULL ? RVU_BLOCK_ADDR_CPT1 : RVU_BLOCK_ADDR_CPT0;
213 roc_cpt_dev_init(struct roc_cpt *roc_cpt)
215 struct plt_pci_device *pci_dev;
216 uint16_t nb_lf_avail;
221 if (roc_cpt == NULL || roc_cpt->pci_dev == NULL)
224 PLT_STATIC_ASSERT(sizeof(struct cpt) <= ROC_CPT_MEM_SZ);
226 cpt = roc_cpt_to_cpt_priv(roc_cpt);
227 memset(cpt, 0, sizeof(*cpt));
228 pci_dev = roc_cpt->pci_dev;
231 /* Initialize device */
232 rc = dev_init(dev, pci_dev);
234 plt_err("Failed to init roc device");
238 cpt->pci_dev = pci_dev;
239 roc_cpt->lmt_base = dev->lmt_base;
241 rc = cpt_hardware_caps_get(dev, roc_cpt->hw_caps);
243 plt_err("Could not determine hardware capabilities");
247 rc = cpt_available_lfs_get(&cpt->dev, &nb_lf_avail);
249 plt_err("Could not get available lfs");
253 /* Reserve 1 CPT LF for inline inbound */
254 nb_lf_avail = PLT_MIN(nb_lf_avail, ROC_CPT_MAX_LFS - 1);
256 roc_cpt->nb_lf_avail = nb_lf_avail;
258 dev->roc_cpt = roc_cpt;
260 /* Set it to idev if not already present */
261 if (!roc_idev_cpt_get())
262 roc_idev_cpt_set(roc_cpt);
271 roc_cpt_dev_fini(struct roc_cpt *roc_cpt)
273 struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
278 /* Remove idev references */
279 if (roc_idev_cpt_get() == roc_cpt)
280 roc_idev_cpt_set(NULL);
282 roc_cpt->nb_lf_avail = 0;
284 roc_cpt->lmt_base = 0;
286 return dev_fini(&cpt->dev, cpt->pci_dev);
290 roc_cpt_dev_clear(struct roc_cpt *roc_cpt)
292 struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
298 for (i = 0; i < roc_cpt->nb_lf; i++)
299 cpt->lf_msix_off[i] = 0;
303 cpt_lfs_free(&cpt->dev);
305 cpt_lfs_detach(&cpt->dev);
309 roc_cpt_eng_grp_add(struct roc_cpt *roc_cpt, enum cpt_eng_type eng_type)
311 struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
312 struct dev *dev = &cpt->dev;
313 struct cpt_eng_grp_req *req;
314 struct cpt_eng_grp_rsp *rsp;
317 req = mbox_alloc_msg_cpt_eng_grp_get(dev->mbox);
322 case CPT_ENG_TYPE_AE:
323 case CPT_ENG_TYPE_SE:
324 case CPT_ENG_TYPE_IE:
330 req->eng_type = eng_type;
331 ret = mbox_process_msg(dev->mbox, (void *)&rsp);
335 if (rsp->eng_grp_num > 8) {
336 plt_err("Invalid CPT engine group");
340 roc_cpt->eng_grp[eng_type] = rsp->eng_grp_num;
342 return rsp->eng_grp_num;