1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
10 #include <rte_alarm.h>
11 #include <rte_common.h>
13 #include <rte_memcpy.h>
14 #include <rte_eal_paging.h>
17 #include "otx2_mbox.h"
19 #define RVU_MAX_VF 64 /* RVU_PF_VFPF_MBOX_INT(0..1) */
20 #define RVU_MAX_INT_RETRY 3
22 /* PF/VF message handling timer */
23 #define VF_PF_MBOX_TIMER_MS (20 * 1000)
26 mbox_mem_map(off_t off, size_t size)
28 void *va = MAP_FAILED;
34 mem_fd = open("/dev/mem", O_RDWR);
38 va = rte_mem_map(NULL, size, RTE_PROT_READ | RTE_PROT_WRITE,
39 RTE_MAP_SHARED, mem_fd, off);
43 otx2_err("Failed to mmap sz=0x%zx, fd=%d, off=%jd",
44 size, mem_fd, (intmax_t)off);
50 mbox_mem_unmap(void *va, size_t size)
53 rte_mem_unmap(va, size);
57 pf_af_sync_msg(struct otx2_dev *dev, struct mbox_msghdr **rsp)
59 uint32_t timeout = 0, sleep = 1; struct otx2_mbox *mbox = dev->mbox;
60 struct otx2_mbox_dev *mdev = &mbox->dev[0];
61 volatile uint64_t int_status;
62 struct mbox_msghdr *msghdr;
66 /* We need to disable PF interrupts. We are in timer interrupt */
67 otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
70 otx2_mbox_msg_send(mbox, 0);
75 if (timeout >= MBOX_RSP_TIMEOUT) {
76 otx2_err("Message timeout: %dms", MBOX_RSP_TIMEOUT);
80 int_status = otx2_read64(dev->bar2 + RVU_PF_INT);
81 } while ((int_status & 0x1) != 0x1);
84 otx2_write64(int_status, dev->bar2 + RVU_PF_INT);
86 /* Enable interrupts */
87 otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S);
91 off = mbox->rx_start +
92 RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
93 msghdr = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + off);
103 af_pf_wait_msg(struct otx2_dev *dev, uint16_t vf, int num_msg)
105 uint32_t timeout = 0, sleep = 1; struct otx2_mbox *mbox = dev->mbox;
106 struct otx2_mbox_dev *mdev = &mbox->dev[0];
107 volatile uint64_t int_status;
108 struct mbox_hdr *req_hdr;
109 struct mbox_msghdr *msg;
110 struct mbox_msghdr *rsp;
115 /* We need to disable PF interrupts. We are in timer interrupt */
116 otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
119 otx2_mbox_msg_send(mbox, 0);
124 if (timeout >= MBOX_RSP_TIMEOUT) {
125 otx2_err("Routed messages %d timeout: %dms",
126 num_msg, MBOX_RSP_TIMEOUT);
129 int_status = otx2_read64(dev->bar2 + RVU_PF_INT);
130 } while ((int_status & 0x1) != 0x1);
133 otx2_write64(~0ull, dev->bar2 + RVU_PF_INT);
135 /* Enable interrupts */
136 otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S);
138 rte_spinlock_lock(&mdev->mbox_lock);
140 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
141 if (req_hdr->num_msgs != num_msg)
142 otx2_err("Routed messages: %d received: %d", num_msg,
145 /* Get messages from mbox */
146 offset = mbox->rx_start +
147 RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
148 for (i = 0; i < req_hdr->num_msgs; i++) {
149 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
150 size = mbox->rx_start + msg->next_msgoff - offset;
152 /* Reserve PF/VF mbox message */
153 size = RTE_ALIGN(size, MBOX_MSG_ALIGN);
154 rsp = otx2_mbox_alloc_msg(&dev->mbox_vfpf, vf, size);
155 otx2_mbox_rsp_init(msg->id, rsp);
157 /* Copy message from AF<->PF mbox to PF<->VF mbox */
158 otx2_mbox_memcpy((uint8_t *)rsp + sizeof(struct mbox_msghdr),
159 (uint8_t *)msg + sizeof(struct mbox_msghdr),
160 size - sizeof(struct mbox_msghdr));
162 /* Set status and sender pf_func data */
164 rsp->pcifunc = msg->pcifunc;
166 offset = mbox->rx_start + msg->next_msgoff;
168 rte_spinlock_unlock(&mdev->mbox_lock);
170 return req_hdr->num_msgs;
174 vf_pf_process_msgs(struct otx2_dev *dev, uint16_t vf)
176 int offset, routed = 0; struct otx2_mbox *mbox = &dev->mbox_vfpf;
177 struct otx2_mbox_dev *mdev = &mbox->dev[vf];
178 struct mbox_hdr *req_hdr;
179 struct mbox_msghdr *msg;
183 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
184 if (!req_hdr->num_msgs)
187 offset = mbox->rx_start + RTE_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
189 for (i = 0; i < req_hdr->num_msgs; i++) {
191 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
192 size = mbox->rx_start + msg->next_msgoff - offset;
195 msg->pcifunc = otx2_pfvf_func(dev->pf, vf);
197 if (msg->id == MBOX_MSG_READY) {
198 struct ready_msg_rsp *rsp;
199 uint16_t max_bits = sizeof(dev->active_vfs[0]) * 8;
201 /* Handle READY message in PF */
202 dev->active_vfs[vf / max_bits] |=
203 BIT_ULL(vf % max_bits);
204 rsp = (struct ready_msg_rsp *)
205 otx2_mbox_alloc_msg(mbox, vf, sizeof(*rsp));
206 otx2_mbox_rsp_init(msg->id, rsp);
208 /* PF/VF function ID */
209 rsp->hdr.pcifunc = msg->pcifunc;
212 struct mbox_msghdr *af_req;
213 /* Reserve AF/PF mbox message */
214 size = RTE_ALIGN(size, MBOX_MSG_ALIGN);
215 af_req = otx2_mbox_alloc_msg(dev->mbox, 0, size);
216 otx2_mbox_req_init(msg->id, af_req);
218 /* Copy message from VF<->PF mbox to PF<->AF mbox */
219 otx2_mbox_memcpy((uint8_t *)af_req +
220 sizeof(struct mbox_msghdr),
221 (uint8_t *)msg + sizeof(struct mbox_msghdr),
222 size - sizeof(struct mbox_msghdr));
223 af_req->pcifunc = msg->pcifunc;
226 offset = mbox->rx_start + msg->next_msgoff;
230 otx2_base_dbg("pf:%d routed %d messages from vf:%d to AF",
231 dev->pf, routed, vf);
232 af_pf_wait_msg(dev, vf, routed);
233 otx2_mbox_reset(dev->mbox, 0);
236 /* Send mbox responses to VF */
237 if (mdev->num_msgs) {
238 otx2_base_dbg("pf:%d reply %d messages to vf:%d",
239 dev->pf, mdev->num_msgs, vf);
240 otx2_mbox_msg_send(mbox, vf);
247 vf_pf_process_up_msgs(struct otx2_dev *dev, uint16_t vf)
249 struct otx2_mbox *mbox = &dev->mbox_vfpf_up;
250 struct otx2_mbox_dev *mdev = &mbox->dev[vf];
251 struct mbox_hdr *req_hdr;
252 struct mbox_msghdr *msg;
257 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
258 if (req_hdr->num_msgs == 0)
261 offset = mbox->rx_start + RTE_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
263 for (i = 0; i < req_hdr->num_msgs; i++) {
264 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
268 msg->pcifunc = otx2_pfvf_func(dev->pf, vf);
271 case MBOX_MSG_CGX_LINK_EVENT:
272 otx2_base_dbg("PF: Msg 0x%x (%s) fn:0x%x (pf:%d,vf:%d)",
273 msg->id, otx2_mbox_id2name(msg->id),
274 msg->pcifunc, otx2_get_pf(msg->pcifunc),
275 otx2_get_vf(msg->pcifunc));
277 case MBOX_MSG_CGX_PTP_RX_INFO:
278 otx2_base_dbg("PF: Msg 0x%x (%s) fn:0x%x (pf:%d,vf:%d)",
279 msg->id, otx2_mbox_id2name(msg->id),
280 msg->pcifunc, otx2_get_pf(msg->pcifunc),
281 otx2_get_vf(msg->pcifunc));
284 otx2_err("Not handled UP msg 0x%x (%s) func:0x%x",
285 msg->id, otx2_mbox_id2name(msg->id),
288 offset = mbox->rx_start + msg->next_msgoff;
290 otx2_mbox_reset(mbox, vf);
291 mdev->msgs_acked = msgs_acked;
298 otx2_vf_pf_mbox_handle_msg(void *param)
300 uint16_t vf, max_vf, max_bits;
301 struct otx2_dev *dev = param;
303 max_bits = sizeof(dev->intr.bits[0]) * sizeof(uint64_t);
304 max_vf = max_bits * MAX_VFPF_DWORD_BITS;
306 for (vf = 0; vf < max_vf; vf++) {
307 if (dev->intr.bits[vf/max_bits] & BIT_ULL(vf%max_bits)) {
308 otx2_base_dbg("Process vf:%d request (pf:%d, vf:%d)",
309 vf, dev->pf, dev->vf);
310 vf_pf_process_msgs(dev, vf);
312 vf_pf_process_up_msgs(dev, vf);
313 dev->intr.bits[vf/max_bits] &= ~(BIT_ULL(vf%max_bits));
320 otx2_vf_pf_mbox_irq(void *param)
322 struct otx2_dev *dev = param;
323 bool alarm_set = false;
327 for (vfpf = 0; vfpf < MAX_VFPF_DWORD_BITS; ++vfpf) {
328 intr = otx2_read64(dev->bar2 + RVU_PF_VFPF_MBOX_INTX(vfpf));
332 otx2_base_dbg("vfpf: %d intr: 0x%" PRIx64 " (pf:%d, vf:%d)",
333 vfpf, intr, dev->pf, dev->vf);
335 /* Save and clear intr bits */
336 dev->intr.bits[vfpf] |= intr;
337 otx2_write64(intr, dev->bar2 + RVU_PF_VFPF_MBOX_INTX(vfpf));
341 if (!dev->timer_set && alarm_set) {
343 /* Start timer to handle messages */
344 rte_eal_alarm_set(VF_PF_MBOX_TIMER_MS,
345 otx2_vf_pf_mbox_handle_msg, dev);
350 otx2_process_msgs(struct otx2_dev *dev, struct otx2_mbox *mbox)
352 struct otx2_mbox_dev *mdev = &mbox->dev[0];
353 struct mbox_hdr *req_hdr;
354 struct mbox_msghdr *msg;
359 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
360 if (req_hdr->num_msgs == 0)
363 offset = mbox->rx_start + RTE_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
364 for (i = 0; i < req_hdr->num_msgs; i++) {
365 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
368 otx2_base_dbg("Message 0x%x (%s) pf:%d/vf:%d",
369 msg->id, otx2_mbox_id2name(msg->id),
370 otx2_get_pf(msg->pcifunc),
371 otx2_get_vf(msg->pcifunc));
374 /* Add message id's that are handled here */
376 /* Get our identity */
377 dev->pf_func = msg->pcifunc;
382 otx2_err("Message (%s) response has err=%d",
383 otx2_mbox_id2name(msg->id), msg->rc);
386 offset = mbox->rx_start + msg->next_msgoff;
389 otx2_mbox_reset(mbox, 0);
390 /* Update acked if someone is waiting a message */
391 mdev->msgs_acked = msgs_acked;
395 /* Copies the message received from AF and sends it to VF */
397 pf_vf_mbox_send_up_msg(struct otx2_dev *dev, void *rec_msg)
399 uint16_t max_bits = sizeof(dev->active_vfs[0]) * sizeof(uint64_t);
400 struct otx2_mbox *vf_mbox = &dev->mbox_vfpf_up;
401 struct msg_req *msg = rec_msg;
402 struct mbox_msghdr *vf_msg;
406 size = RTE_ALIGN(otx2_mbox_id2size(msg->hdr.id), MBOX_MSG_ALIGN);
407 /* Send UP message to all VF's */
408 for (vf = 0; vf < vf_mbox->ndevs; vf++) {
410 if (!(dev->active_vfs[vf / max_bits] & (BIT_ULL(vf))))
413 otx2_base_dbg("(%s) size: %zx to VF: %d",
414 otx2_mbox_id2name(msg->hdr.id), size, vf);
416 /* Reserve PF/VF mbox message */
417 vf_msg = otx2_mbox_alloc_msg(vf_mbox, vf, size);
419 otx2_err("Failed to alloc VF%d UP message", vf);
422 otx2_mbox_req_init(msg->hdr.id, vf_msg);
425 * Copy message from AF<->PF UP mbox
428 otx2_mbox_memcpy((uint8_t *)vf_msg +
429 sizeof(struct mbox_msghdr), (uint8_t *)msg
430 + sizeof(struct mbox_msghdr), size -
431 sizeof(struct mbox_msghdr));
433 vf_msg->rc = msg->hdr.rc;
434 /* Set PF to be a sender */
435 vf_msg->pcifunc = dev->pf_func;
438 otx2_mbox_msg_send(vf_mbox, vf);
443 otx2_mbox_up_handler_cgx_link_event(struct otx2_dev *dev,
444 struct cgx_link_info_msg *msg,
447 struct cgx_link_user_info *linfo = &msg->link_info;
449 otx2_base_dbg("pf:%d/vf:%d NIC Link %s --> 0x%x (%s) from: pf:%d/vf:%d",
450 otx2_get_pf(dev->pf_func), otx2_get_vf(dev->pf_func),
451 linfo->link_up ? "UP" : "DOWN", msg->hdr.id,
452 otx2_mbox_id2name(msg->hdr.id),
453 otx2_get_pf(msg->hdr.pcifunc),
454 otx2_get_vf(msg->hdr.pcifunc));
456 /* PF gets link notification from AF */
457 if (otx2_get_pf(msg->hdr.pcifunc) == 0) {
458 if (dev->ops && dev->ops->link_status_update)
459 dev->ops->link_status_update(dev, linfo);
461 /* Forward the same message as received from AF to VF */
462 pf_vf_mbox_send_up_msg(dev, msg);
464 /* VF gets link up notification */
465 if (dev->ops && dev->ops->link_status_update)
466 dev->ops->link_status_update(dev, linfo);
474 otx2_mbox_up_handler_cgx_ptp_rx_info(struct otx2_dev *dev,
475 struct cgx_ptp_rx_info_msg *msg,
478 otx2_nix_dbg("pf:%d/vf:%d PTP mode %s --> 0x%x (%s) from: pf:%d/vf:%d",
479 otx2_get_pf(dev->pf_func),
480 otx2_get_vf(dev->pf_func),
481 msg->ptp_en ? "ENABLED" : "DISABLED",
482 msg->hdr.id, otx2_mbox_id2name(msg->hdr.id),
483 otx2_get_pf(msg->hdr.pcifunc),
484 otx2_get_vf(msg->hdr.pcifunc));
486 /* PF gets PTP notification from AF */
487 if (otx2_get_pf(msg->hdr.pcifunc) == 0) {
488 if (dev->ops && dev->ops->ptp_info_update)
489 dev->ops->ptp_info_update(dev, msg->ptp_en);
491 /* Forward the same message as received from AF to VF */
492 pf_vf_mbox_send_up_msg(dev, msg);
494 /* VF gets PTP notification */
495 if (dev->ops && dev->ops->ptp_info_update)
496 dev->ops->ptp_info_update(dev, msg->ptp_en);
504 mbox_process_msgs_up(struct otx2_dev *dev, struct mbox_msghdr *req)
506 /* Check if valid, if not reply with a invalid msg */
507 if (req->sig != OTX2_MBOX_REQ_SIG)
511 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
513 struct _rsp_type *rsp; \
516 rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \
518 sizeof(struct _rsp_type)); \
523 rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \
524 rsp->hdr.pcifunc = dev->pf_func; \
527 err = otx2_mbox_up_handler_ ## _fn_name( \
528 dev, (struct _req_type *)req, rsp); \
535 otx2_reply_invalid_msg(&dev->mbox_up, 0, 0, req->id);
542 otx2_process_msgs_up(struct otx2_dev *dev, struct otx2_mbox *mbox)
544 struct otx2_mbox_dev *mdev = &mbox->dev[0];
545 struct mbox_hdr *req_hdr;
546 struct mbox_msghdr *msg;
549 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
550 if (req_hdr->num_msgs == 0)
553 offset = mbox->rx_start + RTE_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
554 for (i = 0; i < req_hdr->num_msgs; i++) {
555 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
557 otx2_base_dbg("Message 0x%x (%s) pf:%d/vf:%d",
558 msg->id, otx2_mbox_id2name(msg->id),
559 otx2_get_pf(msg->pcifunc),
560 otx2_get_vf(msg->pcifunc));
561 err = mbox_process_msgs_up(dev, msg);
563 otx2_err("Error %d handling 0x%x (%s)",
564 err, msg->id, otx2_mbox_id2name(msg->id));
565 offset = mbox->rx_start + msg->next_msgoff;
567 /* Send mbox responses */
568 if (mdev->num_msgs) {
569 otx2_base_dbg("Reply num_msgs:%d", mdev->num_msgs);
570 otx2_mbox_msg_send(mbox, 0);
575 otx2_pf_vf_mbox_irq(void *param)
577 struct otx2_dev *dev = param;
580 intr = otx2_read64(dev->bar2 + RVU_VF_INT);
582 otx2_base_dbg("Proceeding to check mbox UP messages if any");
584 otx2_write64(intr, dev->bar2 + RVU_VF_INT);
585 otx2_base_dbg("Irq 0x%" PRIx64 "(pf:%d,vf:%d)", intr, dev->pf, dev->vf);
587 /* First process all configuration messages */
588 otx2_process_msgs(dev, dev->mbox);
590 /* Process Uplink messages */
591 otx2_process_msgs_up(dev, &dev->mbox_up);
595 otx2_af_pf_mbox_irq(void *param)
597 struct otx2_dev *dev = param;
600 intr = otx2_read64(dev->bar2 + RVU_PF_INT);
602 otx2_base_dbg("Proceeding to check mbox UP messages if any");
604 otx2_write64(intr, dev->bar2 + RVU_PF_INT);
605 otx2_base_dbg("Irq 0x%" PRIx64 "(pf:%d,vf:%d)", intr, dev->pf, dev->vf);
607 /* First process all configuration messages */
608 otx2_process_msgs(dev, dev->mbox);
610 /* Process Uplink messages */
611 otx2_process_msgs_up(dev, &dev->mbox_up);
615 mbox_register_pf_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
617 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
621 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)
622 otx2_write64(~0ull, dev->bar2 +
623 RVU_PF_VFPF_MBOX_INT_ENA_W1CX(i));
625 otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
629 /* MBOX interrupt for VF(0...63) <-> PF */
630 rc = otx2_register_irq(intr_handle, otx2_vf_pf_mbox_irq, dev,
631 RVU_PF_INT_VEC_VFPF_MBOX0);
634 otx2_err("Fail to register PF(VF0-63) mbox irq");
637 /* MBOX interrupt for VF(64...128) <-> PF */
638 rc = otx2_register_irq(intr_handle, otx2_vf_pf_mbox_irq, dev,
639 RVU_PF_INT_VEC_VFPF_MBOX1);
642 otx2_err("Fail to register PF(VF64-128) mbox irq");
645 /* MBOX interrupt AF <-> PF */
646 rc = otx2_register_irq(intr_handle, otx2_af_pf_mbox_irq,
647 dev, RVU_PF_INT_VEC_AFPF_MBOX);
649 otx2_err("Fail to register AF<->PF mbox irq");
654 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)
655 otx2_write64(~0ull, dev->bar2 +
656 RVU_PF_VFPF_MBOX_INT_ENA_W1SX(i));
658 otx2_write64(~0ull, dev->bar2 + RVU_PF_INT);
659 otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S);
665 mbox_register_vf_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
667 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
671 otx2_write64(~0ull, dev->bar2 + RVU_VF_INT_ENA_W1C);
673 /* MBOX interrupt PF <-> VF */
674 rc = otx2_register_irq(intr_handle, otx2_pf_vf_mbox_irq,
675 dev, RVU_VF_INT_VEC_MBOX);
677 otx2_err("Fail to register PF<->VF mbox irq");
682 otx2_write64(~0ull, dev->bar2 + RVU_VF_INT);
683 otx2_write64(~0ull, dev->bar2 + RVU_VF_INT_ENA_W1S);
689 mbox_register_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
691 if (otx2_dev_is_vf(dev))
692 return mbox_register_vf_irq(pci_dev, dev);
694 return mbox_register_pf_irq(pci_dev, dev);
698 mbox_unregister_pf_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
700 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
704 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)
705 otx2_write64(~0ull, dev->bar2 +
706 RVU_PF_VFPF_MBOX_INT_ENA_W1CX(i));
708 otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
712 rte_eal_alarm_cancel(otx2_vf_pf_mbox_handle_msg, dev);
714 /* Unregister the interrupt handler for each vectors */
715 /* MBOX interrupt for VF(0...63) <-> PF */
716 otx2_unregister_irq(intr_handle, otx2_vf_pf_mbox_irq, dev,
717 RVU_PF_INT_VEC_VFPF_MBOX0);
719 /* MBOX interrupt for VF(64...128) <-> PF */
720 otx2_unregister_irq(intr_handle, otx2_vf_pf_mbox_irq, dev,
721 RVU_PF_INT_VEC_VFPF_MBOX1);
723 /* MBOX interrupt AF <-> PF */
724 otx2_unregister_irq(intr_handle, otx2_af_pf_mbox_irq, dev,
725 RVU_PF_INT_VEC_AFPF_MBOX);
730 mbox_unregister_vf_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
732 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
735 otx2_write64(~0ull, dev->bar2 + RVU_VF_INT_ENA_W1C);
737 /* Unregister the interrupt handler */
738 otx2_unregister_irq(intr_handle, otx2_pf_vf_mbox_irq, dev,
739 RVU_VF_INT_VEC_MBOX);
743 mbox_unregister_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
745 if (otx2_dev_is_vf(dev))
746 mbox_unregister_vf_irq(pci_dev, dev);
748 mbox_unregister_pf_irq(pci_dev, dev);
752 vf_flr_send_msg(struct otx2_dev *dev, uint16_t vf)
754 struct otx2_mbox *mbox = dev->mbox;
758 req = otx2_mbox_alloc_msg_vf_flr(mbox);
759 /* Overwrite pcifunc to indicate VF */
760 req->hdr.pcifunc = otx2_pfvf_func(dev->pf, vf);
762 /* Sync message in interrupt context */
763 rc = pf_af_sync_msg(dev, NULL);
765 otx2_err("Failed to send VF FLR mbox msg, rc=%d", rc);
771 otx2_pf_vf_flr_irq(void *param)
773 struct otx2_dev *dev = (struct otx2_dev *)param;
774 uint16_t max_vf = 64, vf;
779 max_vf = (dev->maxvf > 0) ? dev->maxvf : 64;
782 otx2_base_dbg("FLR VF interrupt: max_vf: %d", max_vf);
784 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) {
785 intr = otx2_read64(bar2 + RVU_PF_VFFLR_INTX(i));
789 for (vf = 0; vf < max_vf; vf++) {
790 if (!(intr & (1ULL << vf)))
793 otx2_base_dbg("FLR: i :%d intr: 0x%" PRIx64 ", vf-%d",
794 i, intr, (64 * i + vf));
795 /* Clear interrupt */
796 otx2_write64(BIT_ULL(vf), bar2 + RVU_PF_VFFLR_INTX(i));
797 /* Disable the interrupt */
798 otx2_write64(BIT_ULL(vf),
799 bar2 + RVU_PF_VFFLR_INT_ENA_W1CX(i));
800 /* Inform AF about VF reset */
801 vf_flr_send_msg(dev, vf);
803 /* Signal FLR finish */
804 otx2_write64(BIT_ULL(vf), bar2 + RVU_PF_VFTRPENDX(i));
805 /* Enable interrupt */
807 bar2 + RVU_PF_VFFLR_INT_ENA_W1SX(i));
813 vf_flr_unregister_irqs(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
815 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
818 otx2_base_dbg("Unregister VF FLR interrupts for %s", pci_dev->name);
821 for (i = 0; i < MAX_VFPF_DWORD_BITS; i++)
822 otx2_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INT_ENA_W1CX(i));
824 otx2_unregister_irq(intr_handle, otx2_pf_vf_flr_irq, dev,
825 RVU_PF_INT_VEC_VFFLR0);
827 otx2_unregister_irq(intr_handle, otx2_pf_vf_flr_irq, dev,
828 RVU_PF_INT_VEC_VFFLR1);
834 vf_flr_register_irqs(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
836 struct rte_intr_handle *handle = &pci_dev->intr_handle;
839 otx2_base_dbg("Register VF FLR interrupts for %s", pci_dev->name);
841 rc = otx2_register_irq(handle, otx2_pf_vf_flr_irq, dev,
842 RVU_PF_INT_VEC_VFFLR0);
844 otx2_err("Failed to init RVU_PF_INT_VEC_VFFLR0 rc=%d", rc);
846 rc = otx2_register_irq(handle, otx2_pf_vf_flr_irq, dev,
847 RVU_PF_INT_VEC_VFFLR1);
849 otx2_err("Failed to init RVU_PF_INT_VEC_VFFLR1 rc=%d", rc);
851 /* Enable HW interrupt */
852 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) {
853 otx2_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INTX(i));
854 otx2_write64(~0ull, dev->bar2 + RVU_PF_VFTRPENDX(i));
855 otx2_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INT_ENA_W1SX(i));
862 * Get number of active VFs for the given PF device.
865 otx2_dev_active_vfs(void *otx2_dev)
867 struct otx2_dev *dev = otx2_dev;
870 for (i = 0; i < MAX_VFPF_DWORD_BITS; i++)
871 count += __builtin_popcount(dev->active_vfs[i]);
877 otx2_update_vf_hwcap(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
879 switch (pci_dev->id.device_id) {
880 case PCI_DEVID_OCTEONTX2_RVU_PF:
882 case PCI_DEVID_OCTEONTX2_RVU_SSO_TIM_VF:
883 case PCI_DEVID_OCTEONTX2_RVU_NPA_VF:
884 case PCI_DEVID_OCTEONTX2_RVU_CPT_VF:
885 case PCI_DEVID_OCTEONTX2_RVU_AF_VF:
886 case PCI_DEVID_OCTEONTX2_RVU_VF:
887 case PCI_DEVID_OCTEONTX2_RVU_SDP_VF:
888 dev->hwcap |= OTX2_HWCAP_F_VF;
895 * Initialize the otx2 device
898 otx2_dev_priv_init(struct rte_pci_device *pci_dev, void *otx2_dev)
900 int up_direction = MBOX_DIR_PFAF_UP;
901 int rc, direction = MBOX_DIR_PFAF;
902 uint64_t intr_offset = RVU_PF_INT;
903 struct otx2_dev *dev = otx2_dev;
904 uintptr_t bar2, bar4;
908 bar2 = (uintptr_t)pci_dev->mem_resource[2].addr;
909 bar4 = (uintptr_t)pci_dev->mem_resource[4].addr;
911 if (bar2 == 0 || bar4 == 0) {
912 otx2_err("Failed to get pci bars");
917 dev->node = pci_dev->device.numa_node;
918 dev->maxvf = pci_dev->max_vfs;
922 otx2_update_vf_hwcap(pci_dev, dev);
924 if (otx2_dev_is_vf(dev)) {
925 direction = MBOX_DIR_VFPF;
926 up_direction = MBOX_DIR_VFPF_UP;
927 intr_offset = RVU_VF_INT;
930 /* Initialize the local mbox */
931 rc = otx2_mbox_init(&dev->mbox_local, bar4, bar2, direction, 1,
935 dev->mbox = &dev->mbox_local;
937 rc = otx2_mbox_init(&dev->mbox_up, bar4, bar2, up_direction, 1,
942 /* Register mbox interrupts */
943 rc = mbox_register_irq(pci_dev, dev);
947 /* Check the readiness of PF/VF */
948 rc = otx2_send_ready_msg(dev->mbox, &dev->pf_func);
950 goto mbox_unregister;
952 dev->pf = otx2_get_pf(dev->pf_func);
953 dev->vf = otx2_get_vf(dev->pf_func);
954 memset(&dev->active_vfs, 0, sizeof(dev->active_vfs));
956 /* Found VF devices in a PF device */
957 if (pci_dev->max_vfs > 0) {
959 /* Remap mbox area for all vf's */
960 bar4_addr = otx2_read64(bar2 + RVU_PF_VF_BAR4_ADDR);
961 if (bar4_addr == 0) {
966 hwbase = mbox_mem_map(bar4_addr, MBOX_SIZE * pci_dev->max_vfs);
967 if (hwbase == MAP_FAILED) {
971 /* Init mbox object */
972 rc = otx2_mbox_init(&dev->mbox_vfpf, (uintptr_t)hwbase,
973 bar2, MBOX_DIR_PFVF, pci_dev->max_vfs,
978 /* PF -> VF UP messages */
979 rc = otx2_mbox_init(&dev->mbox_vfpf_up, (uintptr_t)hwbase,
980 bar2, MBOX_DIR_PFVF_UP, pci_dev->max_vfs,
986 /* Register VF-FLR irq handlers */
987 if (otx2_dev_is_pf(dev)) {
988 rc = vf_flr_register_irqs(pci_dev, dev);
992 dev->mbox_active = 1;
996 mbox_mem_unmap(hwbase, MBOX_SIZE * pci_dev->max_vfs);
998 mbox_unregister_irq(pci_dev, dev);
1000 otx2_mbox_fini(dev->mbox);
1001 otx2_mbox_fini(&dev->mbox_up);
1008 * Finalize the otx2 device
1011 otx2_dev_fini(struct rte_pci_device *pci_dev, void *otx2_dev)
1013 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1014 struct otx2_dev *dev = otx2_dev;
1015 struct otx2_idev_cfg *idev;
1016 struct otx2_mbox *mbox;
1018 /* Clear references to this pci dev */
1019 idev = otx2_intra_dev_get_cfg();
1020 if (idev->npa_lf && idev->npa_lf->pci_dev == pci_dev)
1021 idev->npa_lf = NULL;
1023 mbox_unregister_irq(pci_dev, dev);
1025 if (otx2_dev_is_pf(dev))
1026 vf_flr_unregister_irqs(pci_dev, dev);
1027 /* Release PF - VF */
1028 mbox = &dev->mbox_vfpf;
1029 if (mbox->hwbase && mbox->dev)
1030 mbox_mem_unmap((void *)mbox->hwbase,
1031 MBOX_SIZE * pci_dev->max_vfs);
1032 otx2_mbox_fini(mbox);
1033 mbox = &dev->mbox_vfpf_up;
1034 otx2_mbox_fini(mbox);
1036 /* Release PF - AF */
1038 otx2_mbox_fini(mbox);
1039 mbox = &dev->mbox_up;
1040 otx2_mbox_fini(mbox);
1041 dev->mbox_active = 0;
1043 /* Disable MSIX vectors */
1044 otx2_disable_irqs(intr_handle);