1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
14 /* PCI Extended capability ID */
15 #define ROC_PCI_EXT_CAP_ID_SRIOV 0x10 /* SRIOV cap */
17 /* Single Root I/O Virtualization */
18 #define ROC_PCI_SRIOV_TOTAL_VF 0x0e /* Total VFs */
21 mbox_mem_map(off_t off, size_t size)
23 void *va = MAP_FAILED;
26 if (size <= 0 || !off) {
27 plt_err("Invalid mbox area off 0x%lx size %lu", off, size);
31 mem_fd = open("/dev/mem", O_RDWR);
35 va = plt_mmap(NULL, size, PLT_PROT_READ | PLT_PROT_WRITE,
36 PLT_MAP_SHARED, mem_fd, off);
40 plt_err("Failed to mmap sz=0x%zx, fd=%d, off=%jd", size, mem_fd,
47 mbox_mem_unmap(void *va, size_t size)
54 pf_af_sync_msg(struct dev *dev, struct mbox_msghdr **rsp)
56 uint32_t timeout = 0, sleep = 1;
57 struct mbox *mbox = dev->mbox;
58 struct mbox_dev *mdev = &mbox->dev[0];
60 volatile uint64_t int_status;
61 struct mbox_msghdr *msghdr;
65 /* We need to disable PF interrupts. We are in timer interrupt */
66 plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
69 mbox_msg_send(mbox, 0);
74 if (timeout >= mbox->rsp_tmo) {
75 plt_err("Message timeout: %dms", mbox->rsp_tmo);
79 int_status = plt_read64(dev->bar2 + RVU_PF_INT);
80 } while ((int_status & 0x1) != 0x1);
83 plt_write64(int_status, dev->bar2 + RVU_PF_INT);
85 /* Enable interrupts */
86 plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S);
90 off = mbox->rx_start +
91 PLT_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
92 msghdr = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + off);
102 af_pf_wait_msg(struct dev *dev, uint16_t vf, int num_msg)
104 uint32_t timeout = 0, sleep = 1;
105 struct mbox *mbox = dev->mbox;
106 struct mbox_dev *mdev = &mbox->dev[0];
107 volatile uint64_t int_status;
108 struct mbox_hdr *req_hdr;
109 struct mbox_msghdr *msg;
110 struct mbox_msghdr *rsp;
115 /* We need to disable PF interrupts. We are in timer interrupt */
116 plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
119 mbox_msg_send(mbox, 0);
124 if (timeout >= mbox->rsp_tmo) {
125 plt_err("Routed messages %d timeout: %dms", num_msg,
129 int_status = plt_read64(dev->bar2 + RVU_PF_INT);
130 } while ((int_status & 0x1) != 0x1);
133 plt_write64(~0ull, dev->bar2 + RVU_PF_INT);
135 /* Enable interrupts */
136 plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S);
138 plt_spinlock_lock(&mdev->mbox_lock);
140 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
141 if (req_hdr->num_msgs != num_msg)
142 plt_err("Routed messages: %d received: %d", num_msg,
145 /* Get messages from mbox */
146 offset = mbox->rx_start +
147 PLT_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
148 for (i = 0; i < req_hdr->num_msgs; i++) {
149 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
150 size = mbox->rx_start + msg->next_msgoff - offset;
152 /* Reserve PF/VF mbox message */
153 size = PLT_ALIGN(size, MBOX_MSG_ALIGN);
154 rsp = mbox_alloc_msg(&dev->mbox_vfpf, vf, size);
155 mbox_rsp_init(msg->id, rsp);
157 /* Copy message from AF<->PF mbox to PF<->VF mbox */
158 mbox_memcpy((uint8_t *)rsp + sizeof(struct mbox_msghdr),
159 (uint8_t *)msg + sizeof(struct mbox_msghdr),
160 size - sizeof(struct mbox_msghdr));
162 /* Set status and sender pf_func data */
164 rsp->pcifunc = msg->pcifunc;
166 offset = mbox->rx_start + msg->next_msgoff;
168 plt_spinlock_unlock(&mdev->mbox_lock);
170 return req_hdr->num_msgs;
174 vf_pf_process_msgs(struct dev *dev, uint16_t vf)
176 struct mbox *mbox = &dev->mbox_vfpf;
177 struct mbox_dev *mdev = &mbox->dev[vf];
178 struct mbox_hdr *req_hdr;
179 struct mbox_msghdr *msg;
180 int offset, routed = 0;
184 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
185 if (!req_hdr->num_msgs)
188 offset = mbox->rx_start + PLT_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
190 for (i = 0; i < req_hdr->num_msgs; i++) {
191 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
192 size = mbox->rx_start + msg->next_msgoff - offset;
195 msg->pcifunc = dev_pf_func(dev->pf, vf);
197 if (msg->id == MBOX_MSG_READY) {
198 struct ready_msg_rsp *rsp;
199 uint16_t max_bits = sizeof(dev->active_vfs[0]) * 8;
201 /* Handle READY message in PF */
202 dev->active_vfs[vf / max_bits] |=
203 BIT_ULL(vf % max_bits);
204 rsp = (struct ready_msg_rsp *)mbox_alloc_msg(
205 mbox, vf, sizeof(*rsp));
206 mbox_rsp_init(msg->id, rsp);
208 /* PF/VF function ID */
209 rsp->hdr.pcifunc = msg->pcifunc;
212 struct mbox_msghdr *af_req;
213 /* Reserve AF/PF mbox message */
214 size = PLT_ALIGN(size, MBOX_MSG_ALIGN);
215 af_req = mbox_alloc_msg(dev->mbox, 0, size);
218 mbox_req_init(msg->id, af_req);
220 /* Copy message from VF<->PF mbox to PF<->AF mbox */
221 mbox_memcpy((uint8_t *)af_req +
222 sizeof(struct mbox_msghdr),
223 (uint8_t *)msg + sizeof(struct mbox_msghdr),
224 size - sizeof(struct mbox_msghdr));
225 af_req->pcifunc = msg->pcifunc;
228 offset = mbox->rx_start + msg->next_msgoff;
232 plt_base_dbg("pf:%d routed %d messages from vf:%d to AF",
233 dev->pf, routed, vf);
234 af_pf_wait_msg(dev, vf, routed);
235 mbox_reset(dev->mbox, 0);
238 /* Send mbox responses to VF */
239 if (mdev->num_msgs) {
240 plt_base_dbg("pf:%d reply %d messages to vf:%d", dev->pf,
242 mbox_msg_send(mbox, vf);
249 vf_pf_process_up_msgs(struct dev *dev, uint16_t vf)
251 struct mbox *mbox = &dev->mbox_vfpf_up;
252 struct mbox_dev *mdev = &mbox->dev[vf];
253 struct mbox_hdr *req_hdr;
254 struct mbox_msghdr *msg;
259 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
260 if (req_hdr->num_msgs == 0)
263 offset = mbox->rx_start + PLT_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
265 for (i = 0; i < req_hdr->num_msgs; i++) {
266 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
270 msg->pcifunc = dev_pf_func(dev->pf, vf);
273 case MBOX_MSG_CGX_LINK_EVENT:
274 plt_base_dbg("PF: Msg 0x%x (%s) fn:0x%x (pf:%d,vf:%d)",
275 msg->id, mbox_id2name(msg->id),
276 msg->pcifunc, dev_get_pf(msg->pcifunc),
277 dev_get_vf(msg->pcifunc));
279 case MBOX_MSG_CGX_PTP_RX_INFO:
280 plt_base_dbg("PF: Msg 0x%x (%s) fn:0x%x (pf:%d,vf:%d)",
281 msg->id, mbox_id2name(msg->id),
282 msg->pcifunc, dev_get_pf(msg->pcifunc),
283 dev_get_vf(msg->pcifunc));
286 plt_err("Not handled UP msg 0x%x (%s) func:0x%x",
287 msg->id, mbox_id2name(msg->id), msg->pcifunc);
289 offset = mbox->rx_start + msg->next_msgoff;
291 mbox_reset(mbox, vf);
292 mdev->msgs_acked = msgs_acked;
299 roc_vf_pf_mbox_handle_msg(void *param)
301 uint16_t vf, max_vf, max_bits;
302 struct dev *dev = param;
304 max_bits = sizeof(dev->intr.bits[0]) * sizeof(uint64_t);
305 max_vf = max_bits * MAX_VFPF_DWORD_BITS;
307 for (vf = 0; vf < max_vf; vf++) {
308 if (dev->intr.bits[vf / max_bits] & BIT_ULL(vf % max_bits)) {
309 plt_base_dbg("Process vf:%d request (pf:%d, vf:%d)", vf,
311 vf_pf_process_msgs(dev, vf);
313 vf_pf_process_up_msgs(dev, vf);
314 dev->intr.bits[vf / max_bits] &=
315 ~(BIT_ULL(vf % max_bits));
322 roc_vf_pf_mbox_irq(void *param)
324 struct dev *dev = param;
325 bool alarm_set = false;
329 for (vfpf = 0; vfpf < MAX_VFPF_DWORD_BITS; ++vfpf) {
330 intr = plt_read64(dev->bar2 + RVU_PF_VFPF_MBOX_INTX(vfpf));
334 plt_base_dbg("vfpf: %d intr: 0x%" PRIx64 " (pf:%d, vf:%d)",
335 vfpf, intr, dev->pf, dev->vf);
337 /* Save and clear intr bits */
338 dev->intr.bits[vfpf] |= intr;
339 plt_write64(intr, dev->bar2 + RVU_PF_VFPF_MBOX_INTX(vfpf));
343 if (!dev->timer_set && alarm_set) {
345 /* Start timer to handle messages */
346 plt_alarm_set(VF_PF_MBOX_TIMER_MS, roc_vf_pf_mbox_handle_msg,
352 process_msgs(struct dev *dev, struct mbox *mbox)
354 struct mbox_dev *mdev = &mbox->dev[0];
355 struct mbox_hdr *req_hdr;
356 struct mbox_msghdr *msg;
361 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
362 if (req_hdr->num_msgs == 0)
365 offset = mbox->rx_start + PLT_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
366 for (i = 0; i < req_hdr->num_msgs; i++) {
367 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
370 plt_base_dbg("Message 0x%x (%s) pf:%d/vf:%d", msg->id,
371 mbox_id2name(msg->id), dev_get_pf(msg->pcifunc),
372 dev_get_vf(msg->pcifunc));
375 /* Add message id's that are handled here */
377 /* Get our identity */
378 dev->pf_func = msg->pcifunc;
383 plt_err("Message (%s) response has err=%d",
384 mbox_id2name(msg->id), msg->rc);
387 offset = mbox->rx_start + msg->next_msgoff;
391 /* Update acked if someone is waiting a message */
392 mdev->msgs_acked = msgs_acked;
396 /* Copies the message received from AF and sends it to VF */
398 pf_vf_mbox_send_up_msg(struct dev *dev, void *rec_msg)
400 uint16_t max_bits = sizeof(dev->active_vfs[0]) * sizeof(uint64_t);
401 struct mbox *vf_mbox = &dev->mbox_vfpf_up;
402 struct msg_req *msg = rec_msg;
403 struct mbox_msghdr *vf_msg;
407 size = PLT_ALIGN(mbox_id2size(msg->hdr.id), MBOX_MSG_ALIGN);
408 /* Send UP message to all VF's */
409 for (vf = 0; vf < vf_mbox->ndevs; vf++) {
411 if (!(dev->active_vfs[vf / max_bits] & (BIT_ULL(vf))))
414 plt_base_dbg("(%s) size: %zx to VF: %d",
415 mbox_id2name(msg->hdr.id), size, vf);
417 /* Reserve PF/VF mbox message */
418 vf_msg = mbox_alloc_msg(vf_mbox, vf, size);
420 plt_err("Failed to alloc VF%d UP message", vf);
423 mbox_req_init(msg->hdr.id, vf_msg);
426 * Copy message from AF<->PF UP mbox
429 mbox_memcpy((uint8_t *)vf_msg + sizeof(struct mbox_msghdr),
430 (uint8_t *)msg + sizeof(struct mbox_msghdr),
431 size - sizeof(struct mbox_msghdr));
433 vf_msg->rc = msg->hdr.rc;
434 /* Set PF to be a sender */
435 vf_msg->pcifunc = dev->pf_func;
438 mbox_msg_send(vf_mbox, vf);
443 mbox_up_handler_cgx_link_event(struct dev *dev, struct cgx_link_info_msg *msg,
446 struct cgx_link_user_info *linfo = &msg->link_info;
447 void *roc_nix = dev->roc_nix;
449 plt_base_dbg("pf:%d/vf:%d NIC Link %s --> 0x%x (%s) from: pf:%d/vf:%d",
450 dev_get_pf(dev->pf_func), dev_get_vf(dev->pf_func),
451 linfo->link_up ? "UP" : "DOWN", msg->hdr.id,
452 mbox_id2name(msg->hdr.id), dev_get_pf(msg->hdr.pcifunc),
453 dev_get_vf(msg->hdr.pcifunc));
455 /* PF gets link notification from AF */
456 if (dev_get_pf(msg->hdr.pcifunc) == 0) {
457 if (dev->ops && dev->ops->link_status_update)
458 dev->ops->link_status_update(roc_nix, linfo);
460 /* Forward the same message as received from AF to VF */
461 pf_vf_mbox_send_up_msg(dev, msg);
463 /* VF gets link up notification */
464 if (dev->ops && dev->ops->link_status_update)
465 dev->ops->link_status_update(roc_nix, linfo);
473 mbox_up_handler_cgx_ptp_rx_info(struct dev *dev,
474 struct cgx_ptp_rx_info_msg *msg,
477 void *roc_nix = dev->roc_nix;
479 plt_base_dbg("pf:%d/vf:%d PTP mode %s --> 0x%x (%s) from: pf:%d/vf:%d",
480 dev_get_pf(dev->pf_func), dev_get_vf(dev->pf_func),
481 msg->ptp_en ? "ENABLED" : "DISABLED", msg->hdr.id,
482 mbox_id2name(msg->hdr.id), dev_get_pf(msg->hdr.pcifunc),
483 dev_get_vf(msg->hdr.pcifunc));
485 /* PF gets PTP notification from AF */
486 if (dev_get_pf(msg->hdr.pcifunc) == 0) {
487 if (dev->ops && dev->ops->ptp_info_update)
488 dev->ops->ptp_info_update(roc_nix, msg->ptp_en);
490 /* Forward the same message as received from AF to VF */
491 pf_vf_mbox_send_up_msg(dev, msg);
493 /* VF gets PTP notification */
494 if (dev->ops && dev->ops->ptp_info_update)
495 dev->ops->ptp_info_update(roc_nix, msg->ptp_en);
503 mbox_process_msgs_up(struct dev *dev, struct mbox_msghdr *req)
505 /* Check if valid, if not reply with a invalid msg */
506 if (req->sig != MBOX_REQ_SIG)
511 reply_invalid_msg(&dev->mbox_up, 0, 0, req->id);
513 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
515 struct _rsp_type *rsp; \
517 rsp = (struct _rsp_type *)mbox_alloc_msg( \
518 &dev->mbox_up, 0, sizeof(struct _rsp_type)); \
522 rsp->hdr.sig = MBOX_RSP_SIG; \
523 rsp->hdr.pcifunc = dev->pf_func; \
525 err = mbox_up_handler_##_fn_name(dev, (struct _req_type *)req, \
537 process_msgs_up(struct dev *dev, struct mbox *mbox)
539 struct mbox_dev *mdev = &mbox->dev[0];
540 struct mbox_hdr *req_hdr;
541 struct mbox_msghdr *msg;
544 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
545 if (req_hdr->num_msgs == 0)
548 offset = mbox->rx_start + PLT_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
549 for (i = 0; i < req_hdr->num_msgs; i++) {
550 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
552 plt_base_dbg("Message 0x%x (%s) pf:%d/vf:%d", msg->id,
553 mbox_id2name(msg->id), dev_get_pf(msg->pcifunc),
554 dev_get_vf(msg->pcifunc));
555 err = mbox_process_msgs_up(dev, msg);
557 plt_err("Error %d handling 0x%x (%s)", err, msg->id,
558 mbox_id2name(msg->id));
559 offset = mbox->rx_start + msg->next_msgoff;
561 /* Send mbox responses */
562 if (mdev->num_msgs) {
563 plt_base_dbg("Reply num_msgs:%d", mdev->num_msgs);
564 mbox_msg_send(mbox, 0);
569 roc_pf_vf_mbox_irq(void *param)
571 struct dev *dev = param;
574 intr = plt_read64(dev->bar2 + RVU_VF_INT);
576 plt_base_dbg("Proceeding to check mbox UP messages if any");
578 plt_write64(intr, dev->bar2 + RVU_VF_INT);
579 plt_base_dbg("Irq 0x%" PRIx64 "(pf:%d,vf:%d)", intr, dev->pf, dev->vf);
581 /* First process all configuration messages */
582 process_msgs(dev, dev->mbox);
584 /* Process Uplink messages */
585 process_msgs_up(dev, &dev->mbox_up);
589 roc_af_pf_mbox_irq(void *param)
591 struct dev *dev = param;
594 intr = plt_read64(dev->bar2 + RVU_PF_INT);
596 plt_base_dbg("Proceeding to check mbox UP messages if any");
598 plt_write64(intr, dev->bar2 + RVU_PF_INT);
599 plt_base_dbg("Irq 0x%" PRIx64 "(pf:%d,vf:%d)", intr, dev->pf, dev->vf);
601 /* First process all configuration messages */
602 process_msgs(dev, dev->mbox);
604 /* Process Uplink messages */
605 process_msgs_up(dev, &dev->mbox_up);
609 mbox_register_pf_irq(struct plt_pci_device *pci_dev, struct dev *dev)
611 struct plt_intr_handle *intr_handle = &pci_dev->intr_handle;
615 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)
617 dev->bar2 + RVU_PF_VFPF_MBOX_INT_ENA_W1CX(i));
619 plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
623 /* MBOX interrupt for VF(0...63) <-> PF */
624 rc = dev_irq_register(intr_handle, roc_vf_pf_mbox_irq, dev,
625 RVU_PF_INT_VEC_VFPF_MBOX0);
628 plt_err("Fail to register PF(VF0-63) mbox irq");
631 /* MBOX interrupt for VF(64...128) <-> PF */
632 rc = dev_irq_register(intr_handle, roc_vf_pf_mbox_irq, dev,
633 RVU_PF_INT_VEC_VFPF_MBOX1);
636 plt_err("Fail to register PF(VF64-128) mbox irq");
639 /* MBOX interrupt AF <-> PF */
640 rc = dev_irq_register(intr_handle, roc_af_pf_mbox_irq, dev,
641 RVU_PF_INT_VEC_AFPF_MBOX);
643 plt_err("Fail to register AF<->PF mbox irq");
648 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)
650 dev->bar2 + RVU_PF_VFPF_MBOX_INT_ENA_W1SX(i));
652 plt_write64(~0ull, dev->bar2 + RVU_PF_INT);
653 plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S);
659 mbox_register_vf_irq(struct plt_pci_device *pci_dev, struct dev *dev)
661 struct plt_intr_handle *intr_handle = &pci_dev->intr_handle;
665 plt_write64(~0ull, dev->bar2 + RVU_VF_INT_ENA_W1C);
667 /* MBOX interrupt PF <-> VF */
668 rc = dev_irq_register(intr_handle, roc_pf_vf_mbox_irq, dev,
669 RVU_VF_INT_VEC_MBOX);
671 plt_err("Fail to register PF<->VF mbox irq");
676 plt_write64(~0ull, dev->bar2 + RVU_VF_INT);
677 plt_write64(~0ull, dev->bar2 + RVU_VF_INT_ENA_W1S);
683 mbox_register_irq(struct plt_pci_device *pci_dev, struct dev *dev)
686 return mbox_register_vf_irq(pci_dev, dev);
688 return mbox_register_pf_irq(pci_dev, dev);
692 mbox_unregister_pf_irq(struct plt_pci_device *pci_dev, struct dev *dev)
694 struct plt_intr_handle *intr_handle = &pci_dev->intr_handle;
698 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)
700 dev->bar2 + RVU_PF_VFPF_MBOX_INT_ENA_W1CX(i));
702 plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
706 plt_alarm_cancel(roc_vf_pf_mbox_handle_msg, dev);
708 /* Unregister the interrupt handler for each vectors */
709 /* MBOX interrupt for VF(0...63) <-> PF */
710 dev_irq_unregister(intr_handle, roc_vf_pf_mbox_irq, dev,
711 RVU_PF_INT_VEC_VFPF_MBOX0);
713 /* MBOX interrupt for VF(64...128) <-> PF */
714 dev_irq_unregister(intr_handle, roc_vf_pf_mbox_irq, dev,
715 RVU_PF_INT_VEC_VFPF_MBOX1);
717 /* MBOX interrupt AF <-> PF */
718 dev_irq_unregister(intr_handle, roc_af_pf_mbox_irq, dev,
719 RVU_PF_INT_VEC_AFPF_MBOX);
723 mbox_unregister_vf_irq(struct plt_pci_device *pci_dev, struct dev *dev)
725 struct plt_intr_handle *intr_handle = &pci_dev->intr_handle;
728 plt_write64(~0ull, dev->bar2 + RVU_VF_INT_ENA_W1C);
730 /* Unregister the interrupt handler */
731 dev_irq_unregister(intr_handle, roc_pf_vf_mbox_irq, dev,
732 RVU_VF_INT_VEC_MBOX);
736 mbox_unregister_irq(struct plt_pci_device *pci_dev, struct dev *dev)
739 mbox_unregister_vf_irq(pci_dev, dev);
741 mbox_unregister_pf_irq(pci_dev, dev);
745 vf_flr_send_msg(struct dev *dev, uint16_t vf)
747 struct mbox *mbox = dev->mbox;
751 req = mbox_alloc_msg_vf_flr(mbox);
754 /* Overwrite pcifunc to indicate VF */
755 req->hdr.pcifunc = dev_pf_func(dev->pf, vf);
757 /* Sync message in interrupt context */
758 rc = pf_af_sync_msg(dev, NULL);
760 plt_err("Failed to send VF FLR mbox msg, rc=%d", rc);
766 roc_pf_vf_flr_irq(void *param)
768 struct dev *dev = (struct dev *)param;
769 uint16_t max_vf = 64, vf;
774 max_vf = (dev->maxvf > 0) ? dev->maxvf : 64;
777 plt_base_dbg("FLR VF interrupt: max_vf: %d", max_vf);
779 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) {
780 intr = plt_read64(bar2 + RVU_PF_VFFLR_INTX(i));
784 for (vf = 0; vf < max_vf; vf++) {
785 if (!(intr & (1ULL << vf)))
788 plt_base_dbg("FLR: i :%d intr: 0x%" PRIx64 ", vf-%d", i,
789 intr, (64 * i + vf));
790 /* Clear interrupt */
791 plt_write64(BIT_ULL(vf), bar2 + RVU_PF_VFFLR_INTX(i));
792 /* Disable the interrupt */
793 plt_write64(BIT_ULL(vf),
794 bar2 + RVU_PF_VFFLR_INT_ENA_W1CX(i));
795 /* Inform AF about VF reset */
796 vf_flr_send_msg(dev, vf);
798 /* Signal FLR finish */
799 plt_write64(BIT_ULL(vf), bar2 + RVU_PF_VFTRPENDX(i));
800 /* Enable interrupt */
801 plt_write64(~0ull, bar2 + RVU_PF_VFFLR_INT_ENA_W1SX(i));
807 vf_flr_unregister_irqs(struct plt_pci_device *pci_dev, struct dev *dev)
809 struct plt_intr_handle *intr_handle = &pci_dev->intr_handle;
812 plt_base_dbg("Unregister VF FLR interrupts for %s", pci_dev->name);
815 for (i = 0; i < MAX_VFPF_DWORD_BITS; i++)
816 plt_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INT_ENA_W1CX(i));
818 dev_irq_unregister(intr_handle, roc_pf_vf_flr_irq, dev,
819 RVU_PF_INT_VEC_VFFLR0);
821 dev_irq_unregister(intr_handle, roc_pf_vf_flr_irq, dev,
822 RVU_PF_INT_VEC_VFFLR1);
828 vf_flr_register_irqs(struct plt_pci_device *pci_dev, struct dev *dev)
830 struct plt_intr_handle *handle = &pci_dev->intr_handle;
833 plt_base_dbg("Register VF FLR interrupts for %s", pci_dev->name);
835 rc = dev_irq_register(handle, roc_pf_vf_flr_irq, dev,
836 RVU_PF_INT_VEC_VFFLR0);
838 plt_err("Failed to init RVU_PF_INT_VEC_VFFLR0 rc=%d", rc);
840 rc = dev_irq_register(handle, roc_pf_vf_flr_irq, dev,
841 RVU_PF_INT_VEC_VFFLR1);
843 plt_err("Failed to init RVU_PF_INT_VEC_VFFLR1 rc=%d", rc);
845 /* Enable HW interrupt */
846 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) {
847 plt_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INTX(i));
848 plt_write64(~0ull, dev->bar2 + RVU_PF_VFTRPENDX(i));
849 plt_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INT_ENA_W1SX(i));
855 dev_active_vfs(struct dev *dev)
859 for (i = 0; i < MAX_VFPF_DWORD_BITS; i++)
860 count += __builtin_popcount(dev->active_vfs[i]);
866 dev_vf_hwcap_update(struct plt_pci_device *pci_dev, struct dev *dev)
868 switch (pci_dev->id.device_id) {
869 case PCI_DEVID_CNXK_RVU_PF:
871 case PCI_DEVID_CNXK_RVU_SSO_TIM_VF:
872 case PCI_DEVID_CNXK_RVU_NPA_VF:
873 case PCI_DEVID_CNXK_RVU_AF_VF:
874 case PCI_DEVID_CNXK_RVU_VF:
875 case PCI_DEVID_CNXK_RVU_SDP_VF:
876 dev->hwcap |= DEV_HWCAP_F_VF;
882 dev_vf_mbase_get(struct plt_pci_device *pci_dev, struct dev *dev)
884 void *vf_mbase = NULL;
890 /* For CN10K onwards, it is just after PF MBOX */
891 if (!roc_model_is_cn9k())
892 return dev->bar4 + MBOX_SIZE;
894 pa = plt_read64(dev->bar2 + RVU_PF_VF_BAR4_ADDR);
896 plt_err("Invalid VF mbox base pa");
900 vf_mbase = mbox_mem_map(pa, MBOX_SIZE * pci_dev->max_vfs);
901 if (vf_mbase == MAP_FAILED) {
902 plt_err("Failed to mmap vf mbase at pa 0x%lx, rc=%d", pa,
906 return (uintptr_t)vf_mbase;
910 dev_vf_mbase_put(struct plt_pci_device *pci_dev, uintptr_t vf_mbase)
912 if (!vf_mbase || !pci_dev->max_vfs || !roc_model_is_cn9k())
915 mbox_mem_unmap((void *)vf_mbase, MBOX_SIZE * pci_dev->max_vfs);
919 dev_pf_total_vfs(struct plt_pci_device *pci_dev)
921 uint16_t total_vfs = 0;
925 plt_pci_find_ext_capability(pci_dev, ROC_PCI_EXT_CAP_ID_SRIOV);
926 if (sriov_pos <= 0) {
927 plt_warn("Unable to find SRIOV cap, rc=%d", sriov_pos);
931 rc = plt_pci_read_config(pci_dev, &total_vfs, 2,
932 sriov_pos + ROC_PCI_SRIOV_TOTAL_VF);
934 plt_warn("Unable to read SRIOV cap, rc=%d", rc);
942 dev_setup_shared_lmt_region(struct mbox *mbox)
944 struct lmtst_tbl_setup_req *req;
946 req = mbox_alloc_msg_lmtst_tbl_setup(mbox);
947 req->pcifunc = idev_lmt_pffunc_get();
949 return mbox_process(mbox);
953 dev_lmt_setup(struct plt_pci_device *pci_dev, struct dev *dev)
955 struct idev_cfg *idev;
958 if (roc_model_is_cn9k()) {
959 dev->lmt_base = dev->bar2 + (RVU_BLOCK_ADDR_LMT << 20);
965 /* Set common lmt region from second pf_func onwards. */
966 if (!dev->disable_shared_lmt && idev_lmt_pffunc_get() &&
967 dev->pf_func != idev_lmt_pffunc_get()) {
968 rc = dev_setup_shared_lmt_region(dev->mbox);
970 dev->lmt_base = roc_idev_lmt_base_addr_get();
973 plt_err("Failed to setup shared lmt region, pf_func %d err %d "
974 "Using respective LMT region per pf func",
978 if (dev_is_vf(dev)) {
979 /* VF BAR4 should always be sufficient enough to
982 if (pci_dev->mem_resource[4].len <
983 (RVU_LMT_LINE_MAX * RVU_LMT_SZ)) {
984 plt_err("Not enough bar4 space for lmt lines");
988 dev->lmt_base = dev->bar4;
990 uint64_t bar4_mbox_sz = MBOX_SIZE;
992 /* PF BAR4 should always be sufficient enough to
993 * hold PF-AF MBOX + PF-VF MBOX + LMT lines.
995 if (pci_dev->mem_resource[4].len <
996 (bar4_mbox_sz + (RVU_LMT_LINE_MAX * RVU_LMT_SZ))) {
997 plt_err("Not enough bar4 space for lmt lines and mbox");
1001 /* LMT base is just after total VF MBOX area */
1002 bar4_mbox_sz += (MBOX_SIZE * dev_pf_total_vfs(pci_dev));
1003 dev->lmt_base = dev->bar4 + bar4_mbox_sz;
1006 /* Base LMT address should be chosen from only those pci funcs which
1007 * participate in LMT shared mode.
1009 if (!dev->disable_shared_lmt) {
1010 idev = idev_get_cfg();
1011 if (!__atomic_load_n(&idev->lmt_pf_func, __ATOMIC_ACQUIRE)) {
1012 idev->lmt_base_addr = dev->lmt_base;
1013 idev->lmt_pf_func = dev->pf_func;
1014 idev->num_lmtlines = RVU_LMT_LINE_MAX;
1022 dev_init(struct dev *dev, struct plt_pci_device *pci_dev)
1024 int direction, up_direction, rc;
1025 uintptr_t bar2, bar4, mbox;
1026 uintptr_t vf_mbase = 0;
1027 uint64_t intr_offset;
1029 bar2 = (uintptr_t)pci_dev->mem_resource[2].addr;
1030 bar4 = (uintptr_t)pci_dev->mem_resource[4].addr;
1031 if (bar2 == 0 || bar4 == 0) {
1032 plt_err("Failed to get PCI bars");
1037 /* Trigger fault on bar2 and bar4 regions
1038 * to avoid BUG_ON in remap_pfn_range()
1041 *(volatile uint64_t *)bar2;
1042 *(volatile uint64_t *)bar4;
1044 /* Check ROC model supported */
1045 if (roc_model->flag == 0) {
1046 rc = UTIL_ERR_INVALID_MODEL;
1050 dev->maxvf = pci_dev->max_vfs;
1053 dev_vf_hwcap_update(pci_dev, dev);
1055 if (dev_is_vf(dev)) {
1056 mbox = (roc_model_is_cn9k() ?
1057 bar4 : (bar2 + RVU_VF_MBOX_REGION));
1058 direction = MBOX_DIR_VFPF;
1059 up_direction = MBOX_DIR_VFPF_UP;
1060 intr_offset = RVU_VF_INT;
1063 direction = MBOX_DIR_PFAF;
1064 up_direction = MBOX_DIR_PFAF_UP;
1065 intr_offset = RVU_PF_INT;
1068 /* Initialize the local mbox */
1069 rc = mbox_init(&dev->mbox_local, mbox, bar2, direction, 1, intr_offset);
1072 dev->mbox = &dev->mbox_local;
1074 rc = mbox_init(&dev->mbox_up, mbox, bar2, up_direction, 1, intr_offset);
1078 /* Register mbox interrupts */
1079 rc = mbox_register_irq(pci_dev, dev);
1083 /* Check the readiness of PF/VF */
1084 rc = send_ready_msg(dev->mbox, &dev->pf_func);
1086 goto mbox_unregister;
1088 dev->pf = dev_get_pf(dev->pf_func);
1089 dev->vf = dev_get_vf(dev->pf_func);
1090 memset(&dev->active_vfs, 0, sizeof(dev->active_vfs));
1092 /* Allocate memory for device ops */
1093 dev->ops = plt_zmalloc(sizeof(struct dev_ops), 0);
1094 if (dev->ops == NULL) {
1096 goto mbox_unregister;
1099 /* Found VF devices in a PF device */
1100 if (pci_dev->max_vfs > 0) {
1101 /* Remap mbox area for all vf's */
1102 vf_mbase = dev_vf_mbase_get(pci_dev, dev);
1105 goto mbox_unregister;
1107 /* Init mbox object */
1108 rc = mbox_init(&dev->mbox_vfpf, vf_mbase, bar2, MBOX_DIR_PFVF,
1109 pci_dev->max_vfs, intr_offset);
1113 /* PF -> VF UP messages */
1114 rc = mbox_init(&dev->mbox_vfpf_up, vf_mbase, bar2,
1115 MBOX_DIR_PFVF_UP, pci_dev->max_vfs, intr_offset);
1120 /* Register VF-FLR irq handlers */
1121 if (!dev_is_vf(dev)) {
1122 rc = vf_flr_register_irqs(pci_dev, dev);
1126 dev->mbox_active = 1;
1128 rc = npa_lf_init(dev, pci_dev);
1132 /* Setup LMT line base */
1133 rc = dev_lmt_setup(pci_dev, dev);
1139 dev_vf_mbase_put(pci_dev, vf_mbase);
1141 mbox_unregister_irq(pci_dev, dev);
1145 mbox_fini(dev->mbox);
1146 mbox_fini(&dev->mbox_up);
1152 dev_fini(struct dev *dev, struct plt_pci_device *pci_dev)
1154 struct plt_intr_handle *intr_handle = &pci_dev->intr_handle;
1157 /* Check if this dev hosts npalf and has 1+ refs */
1158 if (idev_npa_lf_active(dev) > 1)
1161 /* Clear references to this pci dev */
1164 mbox_unregister_irq(pci_dev, dev);
1166 if (!dev_is_vf(dev))
1167 vf_flr_unregister_irqs(pci_dev, dev);
1168 /* Release PF - VF */
1169 mbox = &dev->mbox_vfpf;
1170 if (mbox->hwbase && mbox->dev)
1171 dev_vf_mbase_put(pci_dev, mbox->hwbase);
1177 mbox = &dev->mbox_vfpf_up;
1180 /* Release PF - AF */
1183 mbox = &dev->mbox_up;
1185 dev->mbox_active = 0;
1187 /* Disable MSIX vectors */
1188 dev_irqs_disable(intr_handle);