1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
14 /* PCI Extended capability ID */
15 #define ROC_PCI_EXT_CAP_ID_SRIOV 0x10 /* SRIOV cap */
17 /* Single Root I/O Virtualization */
18 #define ROC_PCI_SRIOV_TOTAL_VF 0x0e /* Total VFs */
21 process_msgs(struct dev *dev, struct mbox *mbox)
23 struct mbox_dev *mdev = &mbox->dev[0];
24 struct mbox_hdr *req_hdr;
25 struct mbox_msghdr *msg;
30 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
31 if (req_hdr->num_msgs == 0)
34 offset = mbox->rx_start + PLT_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
35 for (i = 0; i < req_hdr->num_msgs; i++) {
36 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
39 plt_base_dbg("Message 0x%x (%s) pf:%d/vf:%d", msg->id,
40 mbox_id2name(msg->id), dev_get_pf(msg->pcifunc),
41 dev_get_vf(msg->pcifunc));
44 /* Add message id's that are handled here */
46 /* Get our identity */
47 dev->pf_func = msg->pcifunc;
52 plt_err("Message (%s) response has err=%d",
53 mbox_id2name(msg->id), msg->rc);
56 offset = mbox->rx_start + msg->next_msgoff;
60 /* Update acked if someone is waiting a message */
61 mdev->msgs_acked = msgs_acked;
66 mbox_process_msgs_up(struct dev *dev, struct mbox_msghdr *req)
68 /* Check if valid, if not reply with a invalid msg */
69 if (req->sig != MBOX_REQ_SIG)
74 reply_invalid_msg(&dev->mbox_up, 0, 0, req->id);
82 process_msgs_up(struct dev *dev, struct mbox *mbox)
84 struct mbox_dev *mdev = &mbox->dev[0];
85 struct mbox_hdr *req_hdr;
86 struct mbox_msghdr *msg;
89 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
90 if (req_hdr->num_msgs == 0)
93 offset = mbox->rx_start + PLT_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
94 for (i = 0; i < req_hdr->num_msgs; i++) {
95 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
97 plt_base_dbg("Message 0x%x (%s) pf:%d/vf:%d", msg->id,
98 mbox_id2name(msg->id), dev_get_pf(msg->pcifunc),
99 dev_get_vf(msg->pcifunc));
100 err = mbox_process_msgs_up(dev, msg);
102 plt_err("Error %d handling 0x%x (%s)", err, msg->id,
103 mbox_id2name(msg->id));
104 offset = mbox->rx_start + msg->next_msgoff;
106 /* Send mbox responses */
107 if (mdev->num_msgs) {
108 plt_base_dbg("Reply num_msgs:%d", mdev->num_msgs);
109 mbox_msg_send(mbox, 0);
114 roc_af_pf_mbox_irq(void *param)
116 struct dev *dev = param;
119 intr = plt_read64(dev->bar2 + RVU_PF_INT);
121 plt_base_dbg("Proceeding to check mbox UP messages if any");
123 plt_write64(intr, dev->bar2 + RVU_PF_INT);
124 plt_base_dbg("Irq 0x%" PRIx64 "(pf:%d)", intr, dev->pf);
126 /* First process all configuration messages */
127 process_msgs(dev, dev->mbox);
129 /* Process Uplink messages */
130 process_msgs_up(dev, &dev->mbox_up);
134 mbox_register_pf_irq(struct plt_pci_device *pci_dev, struct dev *dev)
136 struct plt_intr_handle *intr_handle = &pci_dev->intr_handle;
139 plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
141 /* MBOX interrupt AF <-> PF */
142 rc = dev_irq_register(intr_handle, roc_af_pf_mbox_irq, dev,
143 RVU_PF_INT_VEC_AFPF_MBOX);
145 plt_err("Fail to register AF<->PF mbox irq");
149 plt_write64(~0ull, dev->bar2 + RVU_PF_INT);
150 plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S);
156 mbox_register_irq(struct plt_pci_device *pci_dev, struct dev *dev)
158 return mbox_register_pf_irq(pci_dev, dev);
162 mbox_unregister_pf_irq(struct plt_pci_device *pci_dev, struct dev *dev)
164 struct plt_intr_handle *intr_handle = &pci_dev->intr_handle;
166 plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
168 /* MBOX interrupt AF <-> PF */
169 dev_irq_unregister(intr_handle, roc_af_pf_mbox_irq, dev,
170 RVU_PF_INT_VEC_AFPF_MBOX);
174 mbox_unregister_irq(struct plt_pci_device *pci_dev, struct dev *dev)
176 mbox_unregister_pf_irq(pci_dev, dev);
180 dev_pf_total_vfs(struct plt_pci_device *pci_dev)
182 uint16_t total_vfs = 0;
186 plt_pci_find_ext_capability(pci_dev, ROC_PCI_EXT_CAP_ID_SRIOV);
187 if (sriov_pos <= 0) {
188 plt_warn("Unable to find SRIOV cap, rc=%d", sriov_pos);
192 rc = plt_pci_read_config(pci_dev, &total_vfs, 2,
193 sriov_pos + ROC_PCI_SRIOV_TOTAL_VF);
195 plt_warn("Unable to read SRIOV cap, rc=%d", rc);
203 dev_setup_shared_lmt_region(struct mbox *mbox)
205 struct lmtst_tbl_setup_req *req;
207 req = mbox_alloc_msg_lmtst_tbl_setup(mbox);
208 req->pcifunc = idev_lmt_pffunc_get();
210 return mbox_process(mbox);
214 dev_lmt_setup(struct plt_pci_device *pci_dev, struct dev *dev)
216 uint64_t bar4_mbox_sz = MBOX_SIZE;
217 struct idev_cfg *idev;
220 if (roc_model_is_cn9k()) {
221 dev->lmt_base = dev->bar2 + (RVU_BLOCK_ADDR_LMT << 20);
227 /* Set common lmt region from second pf_func onwards. */
228 if (!dev->disable_shared_lmt && idev_lmt_pffunc_get() &&
229 dev->pf_func != idev_lmt_pffunc_get()) {
230 rc = dev_setup_shared_lmt_region(dev->mbox);
232 dev->lmt_base = roc_idev_lmt_base_addr_get();
235 plt_err("Failed to setup shared lmt region, pf_func %d err %d "
236 "Using respective LMT region per pf func",
240 /* PF BAR4 should always be sufficient enough to
241 * hold PF-AF MBOX + PF-VF MBOX + LMT lines.
243 if (pci_dev->mem_resource[4].len <
244 (bar4_mbox_sz + (RVU_LMT_LINE_MAX * RVU_LMT_SZ))) {
245 plt_err("Not enough bar4 space for lmt lines and mbox");
249 /* LMT base is just after total VF MBOX area */
250 bar4_mbox_sz += (MBOX_SIZE * dev_pf_total_vfs(pci_dev));
251 dev->lmt_base = dev->bar4 + bar4_mbox_sz;
253 /* Base LMT address should be chosen from only those pci funcs which
254 * participate in LMT shared mode.
256 if (!dev->disable_shared_lmt) {
257 idev = idev_get_cfg();
258 if (!__atomic_load_n(&idev->lmt_pf_func, __ATOMIC_ACQUIRE)) {
259 idev->lmt_base_addr = dev->lmt_base;
260 idev->lmt_pf_func = dev->pf_func;
261 idev->num_lmtlines = RVU_LMT_LINE_MAX;
269 dev_init(struct dev *dev, struct plt_pci_device *pci_dev)
271 int direction, up_direction, rc;
272 uintptr_t bar2, bar4, mbox;
273 uint64_t intr_offset;
275 bar2 = (uintptr_t)pci_dev->mem_resource[2].addr;
276 bar4 = (uintptr_t)pci_dev->mem_resource[4].addr;
277 if (bar2 == 0 || bar4 == 0) {
278 plt_err("Failed to get PCI bars");
283 /* Trigger fault on bar2 and bar4 regions
284 * to avoid BUG_ON in remap_pfn_range()
287 *(volatile uint64_t *)bar2;
288 *(volatile uint64_t *)bar4;
290 /* Check ROC model supported */
291 if (roc_model->flag == 0) {
292 rc = UTIL_ERR_INVALID_MODEL;
300 direction = MBOX_DIR_PFAF;
301 up_direction = MBOX_DIR_PFAF_UP;
302 intr_offset = RVU_PF_INT;
304 /* Initialize the local mbox */
305 rc = mbox_init(&dev->mbox_local, mbox, bar2, direction, 1, intr_offset);
308 dev->mbox = &dev->mbox_local;
310 rc = mbox_init(&dev->mbox_up, mbox, bar2, up_direction, 1, intr_offset);
314 /* Register mbox interrupts */
315 rc = mbox_register_irq(pci_dev, dev);
319 /* Check the readiness of PF/VF */
320 rc = send_ready_msg(dev->mbox, &dev->pf_func);
322 goto mbox_unregister;
324 dev->pf = dev_get_pf(dev->pf_func);
326 dev->mbox_active = 1;
328 /* Setup LMT line base */
329 rc = dev_lmt_setup(pci_dev, dev);
336 mbox_unregister_irq(pci_dev, dev);
338 mbox_fini(dev->mbox);
339 mbox_fini(&dev->mbox_up);
345 dev_fini(struct dev *dev, struct plt_pci_device *pci_dev)
347 struct plt_intr_handle *intr_handle = &pci_dev->intr_handle;
350 mbox_unregister_irq(pci_dev, dev);
352 /* Release PF - AF */
355 mbox = &dev->mbox_up;
357 dev->mbox_active = 0;
359 /* Disable MSIX vectors */
360 dev_irqs_disable(intr_handle);