1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
14 /* PCI Extended capability ID */
15 #define ROC_PCI_EXT_CAP_ID_SRIOV 0x10 /* SRIOV cap */
17 /* Single Root I/O Virtualization */
18 #define ROC_PCI_SRIOV_TOTAL_VF 0x0e /* Total VFs */
21 mbox_mem_map(off_t off, size_t size)
23 void *va = MAP_FAILED;
26 if (size <= 0 || !off) {
27 plt_err("Invalid mbox area off 0x%lx size %lu", off, size);
31 mem_fd = open("/dev/mem", O_RDWR);
35 va = plt_mmap(NULL, size, PLT_PROT_READ | PLT_PROT_WRITE,
36 PLT_MAP_SHARED, mem_fd, off);
40 plt_err("Failed to mmap sz=0x%zx, fd=%d, off=%jd", size, mem_fd,
47 mbox_mem_unmap(void *va, size_t size)
54 pf_af_sync_msg(struct dev *dev, struct mbox_msghdr **rsp)
56 uint32_t timeout = 0, sleep = 1;
57 struct mbox *mbox = dev->mbox;
58 struct mbox_dev *mdev = &mbox->dev[0];
60 volatile uint64_t int_status = 0;
61 struct mbox_msghdr *msghdr;
65 /* We need to disable PF interrupts. We are in timer interrupt */
66 plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
69 mbox_msg_send(mbox, 0);
74 if (timeout >= mbox->rsp_tmo) {
75 plt_err("Message timeout: %dms", mbox->rsp_tmo);
79 int_status = plt_read64(dev->bar2 + RVU_PF_INT);
80 } while ((int_status & 0x1) != 0x1);
83 plt_write64(int_status, dev->bar2 + RVU_PF_INT);
85 /* Enable interrupts */
86 plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S);
90 off = mbox->rx_start +
91 PLT_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
92 msghdr = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + off);
102 af_pf_wait_msg(struct dev *dev, uint16_t vf, int num_msg)
104 uint32_t timeout = 0, sleep = 1;
105 struct mbox *mbox = dev->mbox;
106 struct mbox_dev *mdev = &mbox->dev[0];
107 volatile uint64_t int_status;
108 struct mbox_hdr *req_hdr;
109 struct mbox_msghdr *msg;
110 struct mbox_msghdr *rsp;
115 /* We need to disable PF interrupts. We are in timer interrupt */
116 plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
119 mbox_msg_send(mbox, 0);
124 if (timeout >= mbox->rsp_tmo) {
125 plt_err("Routed messages %d timeout: %dms", num_msg,
129 int_status = plt_read64(dev->bar2 + RVU_PF_INT);
130 } while ((int_status & 0x1) != 0x1);
133 plt_write64(~0ull, dev->bar2 + RVU_PF_INT);
135 /* Enable interrupts */
136 plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S);
138 plt_spinlock_lock(&mdev->mbox_lock);
140 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
141 if (req_hdr->num_msgs != num_msg)
142 plt_err("Routed messages: %d received: %d", num_msg,
145 /* Get messages from mbox */
146 offset = mbox->rx_start +
147 PLT_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
148 for (i = 0; i < req_hdr->num_msgs; i++) {
149 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
150 size = mbox->rx_start + msg->next_msgoff - offset;
152 /* Reserve PF/VF mbox message */
153 size = PLT_ALIGN(size, MBOX_MSG_ALIGN);
154 rsp = mbox_alloc_msg(&dev->mbox_vfpf, vf, size);
156 plt_err("Failed to reserve VF%d message", vf);
160 mbox_rsp_init(msg->id, rsp);
162 /* Copy message from AF<->PF mbox to PF<->VF mbox */
163 mbox_memcpy((uint8_t *)rsp + sizeof(struct mbox_msghdr),
164 (uint8_t *)msg + sizeof(struct mbox_msghdr),
165 size - sizeof(struct mbox_msghdr));
167 /* Set status and sender pf_func data */
169 rsp->pcifunc = msg->pcifunc;
171 /* Whenever a PF comes up, AF sends the link status to it but
172 * when VF comes up no such event is sent to respective VF.
173 * Using MBOX_MSG_NIX_LF_START_RX response from AF for the
174 * purpose and send the link status of PF to VF.
176 if (msg->id == MBOX_MSG_NIX_LF_START_RX) {
177 /* Send link status to VF */
178 struct cgx_link_user_info linfo;
179 struct mbox_msghdr *vf_msg;
182 /* Get the link status */
183 memset(&linfo, 0, sizeof(struct cgx_link_user_info));
184 if (dev->ops && dev->ops->link_status_get)
185 dev->ops->link_status_get(dev->roc_nix, &linfo);
187 sz = PLT_ALIGN(mbox_id2size(MBOX_MSG_CGX_LINK_EVENT),
189 /* Prepare the message to be sent */
190 vf_msg = mbox_alloc_msg(&dev->mbox_vfpf_up, vf, sz);
192 mbox_req_init(MBOX_MSG_CGX_LINK_EVENT, vf_msg);
193 memcpy((uint8_t *)vf_msg +
194 sizeof(struct mbox_msghdr), &linfo,
195 sizeof(struct cgx_link_user_info));
197 vf_msg->rc = msg->rc;
198 vf_msg->pcifunc = msg->pcifunc;
200 mbox_msg_send(&dev->mbox_vfpf_up, vf);
204 offset = mbox->rx_start + msg->next_msgoff;
206 plt_spinlock_unlock(&mdev->mbox_lock);
208 return req_hdr->num_msgs;
212 vf_pf_process_msgs(struct dev *dev, uint16_t vf)
214 struct mbox *mbox = &dev->mbox_vfpf;
215 struct mbox_dev *mdev = &mbox->dev[vf];
216 struct mbox_hdr *req_hdr;
217 struct mbox_msghdr *msg;
218 int offset, routed = 0;
222 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
223 if (!req_hdr->num_msgs)
226 offset = mbox->rx_start + PLT_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
228 for (i = 0; i < req_hdr->num_msgs; i++) {
229 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
230 size = mbox->rx_start + msg->next_msgoff - offset;
233 msg->pcifunc = dev_pf_func(dev->pf, vf);
235 if (msg->id == MBOX_MSG_READY) {
236 struct ready_msg_rsp *rsp;
237 uint16_t max_bits = sizeof(dev->active_vfs[0]) * 8;
239 /* Handle READY message in PF */
240 dev->active_vfs[vf / max_bits] |=
241 BIT_ULL(vf % max_bits);
242 rsp = (struct ready_msg_rsp *)mbox_alloc_msg(
243 mbox, vf, sizeof(*rsp));
245 plt_err("Failed to alloc VF%d READY message",
250 mbox_rsp_init(msg->id, rsp);
252 /* PF/VF function ID */
253 rsp->hdr.pcifunc = msg->pcifunc;
256 struct mbox_msghdr *af_req;
257 /* Reserve AF/PF mbox message */
258 size = PLT_ALIGN(size, MBOX_MSG_ALIGN);
259 af_req = mbox_alloc_msg(dev->mbox, 0, size);
262 mbox_req_init(msg->id, af_req);
264 /* Copy message from VF<->PF mbox to PF<->AF mbox */
265 mbox_memcpy((uint8_t *)af_req +
266 sizeof(struct mbox_msghdr),
267 (uint8_t *)msg + sizeof(struct mbox_msghdr),
268 size - sizeof(struct mbox_msghdr));
269 af_req->pcifunc = msg->pcifunc;
272 offset = mbox->rx_start + msg->next_msgoff;
276 plt_base_dbg("pf:%d routed %d messages from vf:%d to AF",
277 dev->pf, routed, vf);
278 af_pf_wait_msg(dev, vf, routed);
279 mbox_reset(dev->mbox, 0);
282 /* Send mbox responses to VF */
283 if (mdev->num_msgs) {
284 plt_base_dbg("pf:%d reply %d messages to vf:%d", dev->pf,
286 mbox_msg_send(mbox, vf);
293 vf_pf_process_up_msgs(struct dev *dev, uint16_t vf)
295 struct mbox *mbox = &dev->mbox_vfpf_up;
296 struct mbox_dev *mdev = &mbox->dev[vf];
297 struct mbox_hdr *req_hdr;
298 struct mbox_msghdr *msg;
303 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
304 if (req_hdr->num_msgs == 0)
307 offset = mbox->rx_start + PLT_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
309 for (i = 0; i < req_hdr->num_msgs; i++) {
310 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
314 msg->pcifunc = dev_pf_func(dev->pf, vf);
317 case MBOX_MSG_CGX_LINK_EVENT:
318 plt_base_dbg("PF: Msg 0x%x (%s) fn:0x%x (pf:%d,vf:%d)",
319 msg->id, mbox_id2name(msg->id),
320 msg->pcifunc, dev_get_pf(msg->pcifunc),
321 dev_get_vf(msg->pcifunc));
323 case MBOX_MSG_CGX_PTP_RX_INFO:
324 plt_base_dbg("PF: Msg 0x%x (%s) fn:0x%x (pf:%d,vf:%d)",
325 msg->id, mbox_id2name(msg->id),
326 msg->pcifunc, dev_get_pf(msg->pcifunc),
327 dev_get_vf(msg->pcifunc));
330 plt_err("Not handled UP msg 0x%x (%s) func:0x%x",
331 msg->id, mbox_id2name(msg->id), msg->pcifunc);
333 offset = mbox->rx_start + msg->next_msgoff;
335 mbox_reset(mbox, vf);
336 mdev->msgs_acked = msgs_acked;
343 roc_vf_pf_mbox_handle_msg(void *param)
345 uint16_t vf, max_vf, max_bits;
346 struct dev *dev = param;
348 max_bits = sizeof(dev->intr.bits[0]) * sizeof(uint64_t);
349 max_vf = max_bits * MAX_VFPF_DWORD_BITS;
351 for (vf = 0; vf < max_vf; vf++) {
352 if (dev->intr.bits[vf / max_bits] & BIT_ULL(vf % max_bits)) {
353 plt_base_dbg("Process vf:%d request (pf:%d, vf:%d)", vf,
355 vf_pf_process_msgs(dev, vf);
357 vf_pf_process_up_msgs(dev, vf);
358 dev->intr.bits[vf / max_bits] &=
359 ~(BIT_ULL(vf % max_bits));
366 roc_vf_pf_mbox_irq(void *param)
368 struct dev *dev = param;
369 bool alarm_set = false;
373 for (vfpf = 0; vfpf < MAX_VFPF_DWORD_BITS; ++vfpf) {
374 intr = plt_read64(dev->bar2 + RVU_PF_VFPF_MBOX_INTX(vfpf));
378 plt_base_dbg("vfpf: %d intr: 0x%" PRIx64 " (pf:%d, vf:%d)",
379 vfpf, intr, dev->pf, dev->vf);
381 /* Save and clear intr bits */
382 dev->intr.bits[vfpf] |= intr;
383 plt_write64(intr, dev->bar2 + RVU_PF_VFPF_MBOX_INTX(vfpf));
387 if (!dev->timer_set && alarm_set) {
389 /* Start timer to handle messages */
390 plt_alarm_set(VF_PF_MBOX_TIMER_MS, roc_vf_pf_mbox_handle_msg,
396 process_msgs(struct dev *dev, struct mbox *mbox)
398 struct mbox_dev *mdev = &mbox->dev[0];
399 struct mbox_hdr *req_hdr;
400 struct mbox_msghdr *msg;
405 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
406 if (req_hdr->num_msgs == 0)
409 offset = mbox->rx_start + PLT_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
410 for (i = 0; i < req_hdr->num_msgs; i++) {
411 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
414 plt_base_dbg("Message 0x%x (%s) pf:%d/vf:%d", msg->id,
415 mbox_id2name(msg->id), dev_get_pf(msg->pcifunc),
416 dev_get_vf(msg->pcifunc));
419 /* Add message id's that are handled here */
421 /* Get our identity */
422 dev->pf_func = msg->pcifunc;
424 case MBOX_MSG_CGX_PRIO_FLOW_CTRL_CFG:
425 /* Handling the case where one VF tries to disable PFC
426 * while PFC already configured on other VFs. This is
427 * not an error but a warning which can be ignored.
429 #define LMAC_AF_ERR_PERM_DENIED -1103
431 if (msg->rc == LMAC_AF_ERR_PERM_DENIED) {
433 "Receive Flow control disable not permitted "
434 "as its used by other PFVFs");
437 plt_err("Message (%s) response has err=%d",
438 mbox_id2name(msg->id), msg->rc);
445 plt_err("Message (%s) response has err=%d",
446 mbox_id2name(msg->id), msg->rc);
449 offset = mbox->rx_start + msg->next_msgoff;
453 /* Update acked if someone is waiting a message */
454 mdev->msgs_acked = msgs_acked;
458 /* Copies the message received from AF and sends it to VF */
460 pf_vf_mbox_send_up_msg(struct dev *dev, void *rec_msg)
462 uint16_t max_bits = sizeof(dev->active_vfs[0]) * sizeof(uint64_t);
463 struct mbox *vf_mbox = &dev->mbox_vfpf_up;
464 struct msg_req *msg = rec_msg;
465 struct mbox_msghdr *vf_msg;
469 size = PLT_ALIGN(mbox_id2size(msg->hdr.id), MBOX_MSG_ALIGN);
470 /* Send UP message to all VF's */
471 for (vf = 0; vf < vf_mbox->ndevs; vf++) {
473 if (!(dev->active_vfs[vf / max_bits] & (BIT_ULL(vf))))
476 plt_base_dbg("(%s) size: %zx to VF: %d",
477 mbox_id2name(msg->hdr.id), size, vf);
479 /* Reserve PF/VF mbox message */
480 vf_msg = mbox_alloc_msg(vf_mbox, vf, size);
482 plt_err("Failed to alloc VF%d UP message", vf);
485 mbox_req_init(msg->hdr.id, vf_msg);
488 * Copy message from AF<->PF UP mbox
491 mbox_memcpy((uint8_t *)vf_msg + sizeof(struct mbox_msghdr),
492 (uint8_t *)msg + sizeof(struct mbox_msghdr),
493 size - sizeof(struct mbox_msghdr));
495 vf_msg->rc = msg->hdr.rc;
496 /* Set PF to be a sender */
497 vf_msg->pcifunc = dev->pf_func;
500 mbox_msg_send(vf_mbox, vf);
505 mbox_up_handler_cgx_link_event(struct dev *dev, struct cgx_link_info_msg *msg,
508 struct cgx_link_user_info *linfo = &msg->link_info;
509 void *roc_nix = dev->roc_nix;
511 plt_base_dbg("pf:%d/vf:%d NIC Link %s --> 0x%x (%s) from: pf:%d/vf:%d",
512 dev_get_pf(dev->pf_func), dev_get_vf(dev->pf_func),
513 linfo->link_up ? "UP" : "DOWN", msg->hdr.id,
514 mbox_id2name(msg->hdr.id), dev_get_pf(msg->hdr.pcifunc),
515 dev_get_vf(msg->hdr.pcifunc));
517 /* PF gets link notification from AF */
518 if (dev_get_pf(msg->hdr.pcifunc) == 0) {
519 if (dev->ops && dev->ops->link_status_update)
520 dev->ops->link_status_update(roc_nix, linfo);
522 /* Forward the same message as received from AF to VF */
523 pf_vf_mbox_send_up_msg(dev, msg);
525 /* VF gets link up notification */
526 if (dev->ops && dev->ops->link_status_update)
527 dev->ops->link_status_update(roc_nix, linfo);
535 mbox_up_handler_cgx_ptp_rx_info(struct dev *dev,
536 struct cgx_ptp_rx_info_msg *msg,
539 void *roc_nix = dev->roc_nix;
541 plt_base_dbg("pf:%d/vf:%d PTP mode %s --> 0x%x (%s) from: pf:%d/vf:%d",
542 dev_get_pf(dev->pf_func), dev_get_vf(dev->pf_func),
543 msg->ptp_en ? "ENABLED" : "DISABLED", msg->hdr.id,
544 mbox_id2name(msg->hdr.id), dev_get_pf(msg->hdr.pcifunc),
545 dev_get_vf(msg->hdr.pcifunc));
547 /* PF gets PTP notification from AF */
548 if (dev_get_pf(msg->hdr.pcifunc) == 0) {
549 if (dev->ops && dev->ops->ptp_info_update)
550 dev->ops->ptp_info_update(roc_nix, msg->ptp_en);
552 /* Forward the same message as received from AF to VF */
553 pf_vf_mbox_send_up_msg(dev, msg);
555 /* VF gets PTP notification */
556 if (dev->ops && dev->ops->ptp_info_update)
557 dev->ops->ptp_info_update(roc_nix, msg->ptp_en);
565 mbox_process_msgs_up(struct dev *dev, struct mbox_msghdr *req)
567 /* Check if valid, if not reply with a invalid msg */
568 if (req->sig != MBOX_REQ_SIG)
573 reply_invalid_msg(&dev->mbox_up, 0, 0, req->id);
575 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
577 struct _rsp_type *rsp; \
579 rsp = (struct _rsp_type *)mbox_alloc_msg( \
580 &dev->mbox_up, 0, sizeof(struct _rsp_type)); \
584 rsp->hdr.sig = MBOX_RSP_SIG; \
585 rsp->hdr.pcifunc = dev->pf_func; \
587 err = mbox_up_handler_##_fn_name(dev, (struct _req_type *)req, \
599 process_msgs_up(struct dev *dev, struct mbox *mbox)
601 struct mbox_dev *mdev = &mbox->dev[0];
602 struct mbox_hdr *req_hdr;
603 struct mbox_msghdr *msg;
606 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
607 if (req_hdr->num_msgs == 0)
610 offset = mbox->rx_start + PLT_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
611 for (i = 0; i < req_hdr->num_msgs; i++) {
612 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
614 plt_base_dbg("Message 0x%x (%s) pf:%d/vf:%d", msg->id,
615 mbox_id2name(msg->id), dev_get_pf(msg->pcifunc),
616 dev_get_vf(msg->pcifunc));
617 err = mbox_process_msgs_up(dev, msg);
619 plt_err("Error %d handling 0x%x (%s)", err, msg->id,
620 mbox_id2name(msg->id));
621 offset = mbox->rx_start + msg->next_msgoff;
623 /* Send mbox responses */
624 if (mdev->num_msgs) {
625 plt_base_dbg("Reply num_msgs:%d", mdev->num_msgs);
626 mbox_msg_send(mbox, 0);
631 roc_pf_vf_mbox_irq(void *param)
633 struct dev *dev = param;
636 intr = plt_read64(dev->bar2 + RVU_VF_INT);
638 plt_base_dbg("Proceeding to check mbox UP messages if any");
640 plt_write64(intr, dev->bar2 + RVU_VF_INT);
641 plt_base_dbg("Irq 0x%" PRIx64 "(pf:%d,vf:%d)", intr, dev->pf, dev->vf);
643 /* First process all configuration messages */
644 process_msgs(dev, dev->mbox);
646 /* Process Uplink messages */
647 process_msgs_up(dev, &dev->mbox_up);
651 roc_af_pf_mbox_irq(void *param)
653 struct dev *dev = param;
656 intr = plt_read64(dev->bar2 + RVU_PF_INT);
658 plt_base_dbg("Proceeding to check mbox UP messages if any");
660 plt_write64(intr, dev->bar2 + RVU_PF_INT);
661 plt_base_dbg("Irq 0x%" PRIx64 "(pf:%d,vf:%d)", intr, dev->pf, dev->vf);
663 /* First process all configuration messages */
664 process_msgs(dev, dev->mbox);
666 /* Process Uplink messages */
667 process_msgs_up(dev, &dev->mbox_up);
671 mbox_register_pf_irq(struct plt_pci_device *pci_dev, struct dev *dev)
673 struct plt_intr_handle *intr_handle = pci_dev->intr_handle;
677 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)
679 dev->bar2 + RVU_PF_VFPF_MBOX_INT_ENA_W1CX(i));
681 plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
685 /* MBOX interrupt for VF(0...63) <-> PF */
686 rc = dev_irq_register(intr_handle, roc_vf_pf_mbox_irq, dev,
687 RVU_PF_INT_VEC_VFPF_MBOX0);
690 plt_err("Fail to register PF(VF0-63) mbox irq");
693 /* MBOX interrupt for VF(64...128) <-> PF */
694 rc = dev_irq_register(intr_handle, roc_vf_pf_mbox_irq, dev,
695 RVU_PF_INT_VEC_VFPF_MBOX1);
698 plt_err("Fail to register PF(VF64-128) mbox irq");
701 /* MBOX interrupt AF <-> PF */
702 rc = dev_irq_register(intr_handle, roc_af_pf_mbox_irq, dev,
703 RVU_PF_INT_VEC_AFPF_MBOX);
705 plt_err("Fail to register AF<->PF mbox irq");
710 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)
712 dev->bar2 + RVU_PF_VFPF_MBOX_INT_ENA_W1SX(i));
714 plt_write64(~0ull, dev->bar2 + RVU_PF_INT);
715 plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S);
721 mbox_register_vf_irq(struct plt_pci_device *pci_dev, struct dev *dev)
723 struct plt_intr_handle *intr_handle = pci_dev->intr_handle;
727 plt_write64(~0ull, dev->bar2 + RVU_VF_INT_ENA_W1C);
729 /* MBOX interrupt PF <-> VF */
730 rc = dev_irq_register(intr_handle, roc_pf_vf_mbox_irq, dev,
731 RVU_VF_INT_VEC_MBOX);
733 plt_err("Fail to register PF<->VF mbox irq");
738 plt_write64(~0ull, dev->bar2 + RVU_VF_INT);
739 plt_write64(~0ull, dev->bar2 + RVU_VF_INT_ENA_W1S);
745 mbox_register_irq(struct plt_pci_device *pci_dev, struct dev *dev)
748 return mbox_register_vf_irq(pci_dev, dev);
750 return mbox_register_pf_irq(pci_dev, dev);
754 mbox_unregister_pf_irq(struct plt_pci_device *pci_dev, struct dev *dev)
756 struct plt_intr_handle *intr_handle = pci_dev->intr_handle;
760 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)
762 dev->bar2 + RVU_PF_VFPF_MBOX_INT_ENA_W1CX(i));
764 plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
768 plt_alarm_cancel(roc_vf_pf_mbox_handle_msg, dev);
770 /* Unregister the interrupt handler for each vectors */
771 /* MBOX interrupt for VF(0...63) <-> PF */
772 dev_irq_unregister(intr_handle, roc_vf_pf_mbox_irq, dev,
773 RVU_PF_INT_VEC_VFPF_MBOX0);
775 /* MBOX interrupt for VF(64...128) <-> PF */
776 dev_irq_unregister(intr_handle, roc_vf_pf_mbox_irq, dev,
777 RVU_PF_INT_VEC_VFPF_MBOX1);
779 /* MBOX interrupt AF <-> PF */
780 dev_irq_unregister(intr_handle, roc_af_pf_mbox_irq, dev,
781 RVU_PF_INT_VEC_AFPF_MBOX);
785 mbox_unregister_vf_irq(struct plt_pci_device *pci_dev, struct dev *dev)
787 struct plt_intr_handle *intr_handle = pci_dev->intr_handle;
790 plt_write64(~0ull, dev->bar2 + RVU_VF_INT_ENA_W1C);
792 /* Unregister the interrupt handler */
793 dev_irq_unregister(intr_handle, roc_pf_vf_mbox_irq, dev,
794 RVU_VF_INT_VEC_MBOX);
798 mbox_unregister_irq(struct plt_pci_device *pci_dev, struct dev *dev)
801 mbox_unregister_vf_irq(pci_dev, dev);
803 mbox_unregister_pf_irq(pci_dev, dev);
807 vf_flr_send_msg(struct dev *dev, uint16_t vf)
809 struct mbox *mbox = dev->mbox;
813 req = mbox_alloc_msg_vf_flr(mbox);
816 /* Overwrite pcifunc to indicate VF */
817 req->hdr.pcifunc = dev_pf_func(dev->pf, vf);
819 /* Sync message in interrupt context */
820 rc = pf_af_sync_msg(dev, NULL);
822 plt_err("Failed to send VF FLR mbox msg, rc=%d", rc);
828 roc_pf_vf_flr_irq(void *param)
830 struct dev *dev = (struct dev *)param;
831 uint16_t max_vf = 64, vf;
836 max_vf = (dev->maxvf > 0) ? dev->maxvf : 64;
839 plt_base_dbg("FLR VF interrupt: max_vf: %d", max_vf);
841 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) {
842 intr = plt_read64(bar2 + RVU_PF_VFFLR_INTX(i));
846 for (vf = 0; vf < max_vf; vf++) {
847 if (!(intr & (1ULL << vf)))
850 plt_base_dbg("FLR: i :%d intr: 0x%" PRIx64 ", vf-%d", i,
851 intr, (64 * i + vf));
852 /* Clear interrupt */
853 plt_write64(BIT_ULL(vf), bar2 + RVU_PF_VFFLR_INTX(i));
854 /* Disable the interrupt */
855 plt_write64(BIT_ULL(vf),
856 bar2 + RVU_PF_VFFLR_INT_ENA_W1CX(i));
857 /* Inform AF about VF reset */
858 vf_flr_send_msg(dev, vf);
860 /* Signal FLR finish */
861 plt_write64(BIT_ULL(vf), bar2 + RVU_PF_VFTRPENDX(i));
862 /* Enable interrupt */
863 plt_write64(~0ull, bar2 + RVU_PF_VFFLR_INT_ENA_W1SX(i));
869 vf_flr_unregister_irqs(struct plt_pci_device *pci_dev, struct dev *dev)
871 struct plt_intr_handle *intr_handle = pci_dev->intr_handle;
874 plt_base_dbg("Unregister VF FLR interrupts for %s", pci_dev->name);
877 for (i = 0; i < MAX_VFPF_DWORD_BITS; i++)
878 plt_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INT_ENA_W1CX(i));
880 dev_irq_unregister(intr_handle, roc_pf_vf_flr_irq, dev,
881 RVU_PF_INT_VEC_VFFLR0);
883 dev_irq_unregister(intr_handle, roc_pf_vf_flr_irq, dev,
884 RVU_PF_INT_VEC_VFFLR1);
890 vf_flr_register_irqs(struct plt_pci_device *pci_dev, struct dev *dev)
892 struct plt_intr_handle *handle = pci_dev->intr_handle;
895 plt_base_dbg("Register VF FLR interrupts for %s", pci_dev->name);
897 rc = dev_irq_register(handle, roc_pf_vf_flr_irq, dev,
898 RVU_PF_INT_VEC_VFFLR0);
900 plt_err("Failed to init RVU_PF_INT_VEC_VFFLR0 rc=%d", rc);
902 rc = dev_irq_register(handle, roc_pf_vf_flr_irq, dev,
903 RVU_PF_INT_VEC_VFFLR1);
905 plt_err("Failed to init RVU_PF_INT_VEC_VFFLR1 rc=%d", rc);
907 /* Enable HW interrupt */
908 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) {
909 plt_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INTX(i));
910 plt_write64(~0ull, dev->bar2 + RVU_PF_VFTRPENDX(i));
911 plt_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INT_ENA_W1SX(i));
917 clear_rvum_interrupts(struct dev *dev)
922 if (dev_is_vf(dev)) {
923 /* Clear VF mbox interrupt */
924 intr = plt_read64(dev->bar2 + RVU_VF_INT);
926 plt_write64(intr, dev->bar2 + RVU_VF_INT);
928 /* Clear AF PF interrupt line */
929 intr = plt_read64(dev->bar2 + RVU_PF_INT);
931 plt_write64(intr, dev->bar2 + RVU_PF_INT);
932 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) {
933 /* Clear MBOX interrupts */
934 intr = plt_read64(dev->bar2 + RVU_PF_VFPF_MBOX_INTX(i));
938 RVU_PF_VFPF_MBOX_INTX(i));
939 /* Clear VF FLR interrupts */
940 intr = plt_read64(dev->bar2 + RVU_PF_VFFLR_INTX(i));
943 dev->bar2 + RVU_PF_VFFLR_INTX(i));
949 dev_active_vfs(struct dev *dev)
953 for (i = 0; i < MAX_VFPF_DWORD_BITS; i++)
954 count += __builtin_popcount(dev->active_vfs[i]);
960 dev_vf_hwcap_update(struct plt_pci_device *pci_dev, struct dev *dev)
962 switch (pci_dev->id.device_id) {
963 case PCI_DEVID_CNXK_RVU_PF:
965 case PCI_DEVID_CNXK_RVU_SSO_TIM_VF:
966 case PCI_DEVID_CNXK_RVU_NPA_VF:
967 case PCI_DEVID_CN10K_RVU_CPT_VF:
968 case PCI_DEVID_CN9K_RVU_CPT_VF:
969 case PCI_DEVID_CNXK_RVU_AF_VF:
970 case PCI_DEVID_CNXK_RVU_VF:
971 case PCI_DEVID_CNXK_RVU_SDP_VF:
972 dev->hwcap |= DEV_HWCAP_F_VF;
978 dev_vf_mbase_get(struct plt_pci_device *pci_dev, struct dev *dev)
980 void *vf_mbase = NULL;
986 /* For CN10K onwards, it is just after PF MBOX */
987 if (!roc_model_is_cn9k())
988 return dev->bar4 + MBOX_SIZE;
990 pa = plt_read64(dev->bar2 + RVU_PF_VF_BAR4_ADDR);
992 plt_err("Invalid VF mbox base pa");
996 vf_mbase = mbox_mem_map(pa, MBOX_SIZE * pci_dev->max_vfs);
997 if (vf_mbase == MAP_FAILED) {
998 plt_err("Failed to mmap vf mbase at pa 0x%lx, rc=%d", pa,
1002 return (uintptr_t)vf_mbase;
1006 dev_vf_mbase_put(struct plt_pci_device *pci_dev, uintptr_t vf_mbase)
1008 if (!vf_mbase || !pci_dev->max_vfs || !roc_model_is_cn9k())
1011 mbox_mem_unmap((void *)vf_mbase, MBOX_SIZE * pci_dev->max_vfs);
1015 dev_setup_shared_lmt_region(struct mbox *mbox, bool valid_iova, uint64_t iova)
1017 struct lmtst_tbl_setup_req *req;
1019 req = mbox_alloc_msg_lmtst_tbl_setup(mbox);
1023 /* This pcifunc is defined with primary pcifunc whose LMT address
1024 * will be shared. If call contains valid IOVA, following pcifunc
1025 * field is of no use.
1027 req->pcifunc = valid_iova ? 0 : idev_lmt_pffunc_get();
1028 req->use_local_lmt_region = valid_iova;
1029 req->lmt_iova = iova;
1031 return mbox_process(mbox);
1034 /* Total no of lines * size of each lmtline */
1035 #define LMT_REGION_SIZE (ROC_NUM_LMT_LINES * ROC_LMT_LINE_SZ)
1037 dev_lmt_setup(struct dev *dev)
1039 char name[PLT_MEMZONE_NAMESIZE];
1040 const struct plt_memzone *mz;
1041 struct idev_cfg *idev;
1044 if (roc_model_is_cn9k()) {
1045 dev->lmt_base = dev->bar2 + (RVU_BLOCK_ADDR_LMT << 20);
1051 /* Set common lmt region from second pf_func onwards. */
1052 if (!dev->disable_shared_lmt && idev_lmt_pffunc_get() &&
1053 dev->pf_func != idev_lmt_pffunc_get()) {
1054 rc = dev_setup_shared_lmt_region(dev->mbox, false, 0);
1056 /* On success, updating lmt base of secondary pf_funcs
1057 * with primary pf_func's lmt base.
1059 dev->lmt_base = roc_idev_lmt_base_addr_get();
1062 plt_err("Failed to setup shared lmt region, pf_func %d err %d "
1063 "Using respective LMT region per pf func",
1067 /* Allocating memory for LMT region */
1068 sprintf(name, "LMT_MAP%x", dev->pf_func);
1070 /* Setting alignment to ensure correct masking for resetting to lmt base
1071 * of a core after all lmt lines under that core are used.
1072 * Alignment value LMT_REGION_SIZE to handle the case where all lines
1073 * are used by 1 core.
1075 mz = plt_lmt_region_reserve_aligned(name, LMT_REGION_SIZE,
1078 plt_err("Memory alloc failed: %s", strerror(errno));
1082 /* Share the IOVA address with Kernel */
1083 rc = dev_setup_shared_lmt_region(dev->mbox, true, mz->iova);
1089 dev->lmt_base = mz->iova;
1091 /* Base LMT address should be chosen from only those pci funcs which
1092 * participate in LMT shared mode.
1094 if (!dev->disable_shared_lmt) {
1095 idev = idev_get_cfg();
1101 if (!__atomic_load_n(&idev->lmt_pf_func, __ATOMIC_ACQUIRE)) {
1102 idev->lmt_base_addr = dev->lmt_base;
1103 idev->lmt_pf_func = dev->pf_func;
1104 idev->num_lmtlines = RVU_LMT_LINE_MAX;
1110 plt_memzone_free(mz);
1116 dev_cache_line_size_valid(void)
1118 if (roc_model_is_cn9k()) {
1119 if (PLT_CACHE_LINE_SIZE != 128) {
1120 plt_err("Cache line size of %d is wrong for CN9K",
1121 PLT_CACHE_LINE_SIZE);
1124 } else if (roc_model_is_cn10k()) {
1125 if (PLT_CACHE_LINE_SIZE == 128) {
1126 plt_warn("Cache line size of %d might affect performance",
1127 PLT_CACHE_LINE_SIZE);
1128 } else if (PLT_CACHE_LINE_SIZE != 64) {
1129 plt_err("Cache line size of %d is wrong for CN10K",
1130 PLT_CACHE_LINE_SIZE);
1139 dev_init(struct dev *dev, struct plt_pci_device *pci_dev)
1141 int direction, up_direction, rc;
1142 uintptr_t bar2, bar4, mbox;
1143 uintptr_t vf_mbase = 0;
1144 uint64_t intr_offset;
1146 if (!dev_cache_line_size_valid())
1149 bar2 = (uintptr_t)pci_dev->mem_resource[2].addr;
1150 bar4 = (uintptr_t)pci_dev->mem_resource[4].addr;
1151 if (bar2 == 0 || bar4 == 0) {
1152 plt_err("Failed to get PCI bars");
1157 /* Trigger fault on bar2 and bar4 regions
1158 * to avoid BUG_ON in remap_pfn_range()
1161 *(volatile uint64_t *)bar2;
1162 *(volatile uint64_t *)bar4;
1164 /* Check ROC model supported */
1165 if (roc_model->flag == 0) {
1166 rc = UTIL_ERR_INVALID_MODEL;
1170 dev->maxvf = pci_dev->max_vfs;
1173 dev_vf_hwcap_update(pci_dev, dev);
1175 if (dev_is_vf(dev)) {
1176 mbox = (roc_model_is_cn9k() ?
1177 bar4 : (bar2 + RVU_VF_MBOX_REGION));
1178 direction = MBOX_DIR_VFPF;
1179 up_direction = MBOX_DIR_VFPF_UP;
1180 intr_offset = RVU_VF_INT;
1183 direction = MBOX_DIR_PFAF;
1184 up_direction = MBOX_DIR_PFAF_UP;
1185 intr_offset = RVU_PF_INT;
1188 /* Clear all RVUM interrupts */
1189 clear_rvum_interrupts(dev);
1191 /* Initialize the local mbox */
1192 rc = mbox_init(&dev->mbox_local, mbox, bar2, direction, 1, intr_offset);
1195 dev->mbox = &dev->mbox_local;
1197 rc = mbox_init(&dev->mbox_up, mbox, bar2, up_direction, 1, intr_offset);
1201 /* Register mbox interrupts */
1202 rc = mbox_register_irq(pci_dev, dev);
1206 /* Check the readiness of PF/VF */
1207 rc = send_ready_msg(dev->mbox, &dev->pf_func);
1209 goto mbox_unregister;
1211 dev->pf = dev_get_pf(dev->pf_func);
1212 dev->vf = dev_get_vf(dev->pf_func);
1213 memset(&dev->active_vfs, 0, sizeof(dev->active_vfs));
1215 /* Allocate memory for device ops */
1216 dev->ops = plt_zmalloc(sizeof(struct dev_ops), 0);
1217 if (dev->ops == NULL) {
1219 goto mbox_unregister;
1222 /* Found VF devices in a PF device */
1223 if (pci_dev->max_vfs > 0) {
1224 /* Remap mbox area for all vf's */
1225 vf_mbase = dev_vf_mbase_get(pci_dev, dev);
1228 goto mbox_unregister;
1230 /* Init mbox object */
1231 rc = mbox_init(&dev->mbox_vfpf, vf_mbase, bar2, MBOX_DIR_PFVF,
1232 pci_dev->max_vfs, intr_offset);
1236 /* PF -> VF UP messages */
1237 rc = mbox_init(&dev->mbox_vfpf_up, vf_mbase, bar2,
1238 MBOX_DIR_PFVF_UP, pci_dev->max_vfs, intr_offset);
1243 /* Register VF-FLR irq handlers */
1244 if (!dev_is_vf(dev)) {
1245 rc = vf_flr_register_irqs(pci_dev, dev);
1249 dev->mbox_active = 1;
1251 rc = npa_lf_init(dev, pci_dev);
1255 /* Setup LMT line base */
1256 rc = dev_lmt_setup(dev);
1262 dev_vf_mbase_put(pci_dev, vf_mbase);
1264 mbox_unregister_irq(pci_dev, dev);
1268 mbox_fini(dev->mbox);
1269 mbox_fini(&dev->mbox_up);
1275 dev_fini(struct dev *dev, struct plt_pci_device *pci_dev)
1277 struct plt_intr_handle *intr_handle = pci_dev->intr_handle;
1280 /* Check if this dev hosts npalf and has 1+ refs */
1281 if (idev_npa_lf_active(dev) > 1)
1284 /* Clear references to this pci dev */
1287 /* Releasing memory allocated for lmt region */
1289 plt_memzone_free(dev->lmt_mz);
1291 mbox_unregister_irq(pci_dev, dev);
1293 if (!dev_is_vf(dev))
1294 vf_flr_unregister_irqs(pci_dev, dev);
1295 /* Release PF - VF */
1296 mbox = &dev->mbox_vfpf;
1297 if (mbox->hwbase && mbox->dev)
1298 dev_vf_mbase_put(pci_dev, mbox->hwbase);
1304 mbox = &dev->mbox_vfpf_up;
1307 /* Release PF - AF */
1310 mbox = &dev->mbox_up;
1312 dev->mbox_active = 0;
1314 /* Disable MSIX vectors */
1315 dev_irqs_disable(intr_handle);