1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
10 #include <rte_alarm.h>
11 #include <rte_common.h>
13 #include <rte_memcpy.h>
16 #include "otx2_mbox.h"
18 #define RVU_MAX_VF 64 /* RVU_PF_VFPF_MBOX_INT(0..1) */
19 #define RVU_MAX_INT_RETRY 3
21 /* PF/VF message handling timer */
22 #define VF_PF_MBOX_TIMER_MS (20 * 1000)
25 mbox_mem_map(off_t off, size_t size)
27 void *va = MAP_FAILED;
33 mem_fd = open("/dev/mem", O_RDWR);
37 va = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, mem_fd, off);
41 otx2_err("Failed to mmap sz=0x%zx, fd=%d, off=%jd",
42 size, mem_fd, (intmax_t)off);
48 mbox_mem_unmap(void *va, size_t size)
55 pf_af_sync_msg(struct otx2_dev *dev, struct mbox_msghdr **rsp)
57 uint32_t timeout = 0, sleep = 1; struct otx2_mbox *mbox = dev->mbox;
58 struct otx2_mbox_dev *mdev = &mbox->dev[0];
59 volatile uint64_t int_status;
60 struct mbox_msghdr *msghdr;
64 /* We need to disable PF interrupts. We are in timer interrupt */
65 otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
68 otx2_mbox_msg_send(mbox, 0);
73 if (timeout >= MBOX_RSP_TIMEOUT) {
74 otx2_err("Message timeout: %dms", MBOX_RSP_TIMEOUT);
78 int_status = otx2_read64(dev->bar2 + RVU_PF_INT);
79 } while ((int_status & 0x1) != 0x1);
82 otx2_write64(int_status, dev->bar2 + RVU_PF_INT);
84 /* Enable interrupts */
85 otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S);
89 off = mbox->rx_start +
90 RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
91 msghdr = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + off);
101 af_pf_wait_msg(struct otx2_dev *dev, uint16_t vf, int num_msg)
103 uint32_t timeout = 0, sleep = 1; struct otx2_mbox *mbox = dev->mbox;
104 struct otx2_mbox_dev *mdev = &mbox->dev[0];
105 volatile uint64_t int_status;
106 struct mbox_hdr *req_hdr;
107 struct mbox_msghdr *msg;
108 struct mbox_msghdr *rsp;
113 /* We need to disable PF interrupts. We are in timer interrupt */
114 otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
117 otx2_mbox_msg_send(mbox, 0);
122 if (timeout >= MBOX_RSP_TIMEOUT) {
123 otx2_err("Routed messages %d timeout: %dms",
124 num_msg, MBOX_RSP_TIMEOUT);
127 int_status = otx2_read64(dev->bar2 + RVU_PF_INT);
128 } while ((int_status & 0x1) != 0x1);
131 otx2_write64(~0ull, dev->bar2 + RVU_PF_INT);
133 /* Enable interrupts */
134 otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S);
136 rte_spinlock_lock(&mdev->mbox_lock);
138 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
139 if (req_hdr->num_msgs != num_msg)
140 otx2_err("Routed messages: %d received: %d", num_msg,
143 /* Get messages from mbox */
144 offset = mbox->rx_start +
145 RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
146 for (i = 0; i < req_hdr->num_msgs; i++) {
147 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
148 size = mbox->rx_start + msg->next_msgoff - offset;
150 /* Reserve PF/VF mbox message */
151 size = RTE_ALIGN(size, MBOX_MSG_ALIGN);
152 rsp = otx2_mbox_alloc_msg(&dev->mbox_vfpf, vf, size);
153 otx2_mbox_rsp_init(msg->id, rsp);
155 /* Copy message from AF<->PF mbox to PF<->VF mbox */
156 otx2_mbox_memcpy((uint8_t *)rsp + sizeof(struct mbox_msghdr),
157 (uint8_t *)msg + sizeof(struct mbox_msghdr),
158 size - sizeof(struct mbox_msghdr));
160 /* Set status and sender pf_func data */
162 rsp->pcifunc = msg->pcifunc;
164 offset = mbox->rx_start + msg->next_msgoff;
166 rte_spinlock_unlock(&mdev->mbox_lock);
168 return req_hdr->num_msgs;
172 vf_pf_process_msgs(struct otx2_dev *dev, uint16_t vf)
174 int offset, routed = 0; struct otx2_mbox *mbox = &dev->mbox_vfpf;
175 struct otx2_mbox_dev *mdev = &mbox->dev[vf];
176 struct mbox_hdr *req_hdr;
177 struct mbox_msghdr *msg;
181 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
182 if (!req_hdr->num_msgs)
185 offset = mbox->rx_start + RTE_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
187 for (i = 0; i < req_hdr->num_msgs; i++) {
189 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
190 size = mbox->rx_start + msg->next_msgoff - offset;
193 msg->pcifunc = otx2_pfvf_func(dev->pf, vf);
195 if (msg->id == MBOX_MSG_READY) {
196 struct ready_msg_rsp *rsp;
197 uint16_t max_bits = sizeof(dev->active_vfs[0]) * 8;
199 /* Handle READY message in PF */
200 dev->active_vfs[vf / max_bits] |=
201 BIT_ULL(vf % max_bits);
202 rsp = (struct ready_msg_rsp *)
203 otx2_mbox_alloc_msg(mbox, vf, sizeof(*rsp));
204 otx2_mbox_rsp_init(msg->id, rsp);
206 /* PF/VF function ID */
207 rsp->hdr.pcifunc = msg->pcifunc;
210 struct mbox_msghdr *af_req;
211 /* Reserve AF/PF mbox message */
212 size = RTE_ALIGN(size, MBOX_MSG_ALIGN);
213 af_req = otx2_mbox_alloc_msg(dev->mbox, 0, size);
214 otx2_mbox_req_init(msg->id, af_req);
216 /* Copy message from VF<->PF mbox to PF<->AF mbox */
217 otx2_mbox_memcpy((uint8_t *)af_req +
218 sizeof(struct mbox_msghdr),
219 (uint8_t *)msg + sizeof(struct mbox_msghdr),
220 size - sizeof(struct mbox_msghdr));
221 af_req->pcifunc = msg->pcifunc;
224 offset = mbox->rx_start + msg->next_msgoff;
228 otx2_base_dbg("pf:%d routed %d messages from vf:%d to AF",
229 dev->pf, routed, vf);
230 af_pf_wait_msg(dev, vf, routed);
231 otx2_mbox_reset(dev->mbox, 0);
234 /* Send mbox responses to VF */
235 if (mdev->num_msgs) {
236 otx2_base_dbg("pf:%d reply %d messages to vf:%d",
237 dev->pf, mdev->num_msgs, vf);
238 otx2_mbox_msg_send(mbox, vf);
245 vf_pf_process_up_msgs(struct otx2_dev *dev, uint16_t vf)
247 struct otx2_mbox *mbox = &dev->mbox_vfpf_up;
248 struct otx2_mbox_dev *mdev = &mbox->dev[vf];
249 struct mbox_hdr *req_hdr;
250 struct mbox_msghdr *msg;
255 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
256 if (req_hdr->num_msgs == 0)
259 offset = mbox->rx_start + RTE_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
261 for (i = 0; i < req_hdr->num_msgs; i++) {
262 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
266 msg->pcifunc = otx2_pfvf_func(dev->pf, vf);
269 case MBOX_MSG_CGX_LINK_EVENT:
270 otx2_base_dbg("PF: Msg 0x%x (%s) fn:0x%x (pf:%d,vf:%d)",
271 msg->id, otx2_mbox_id2name(msg->id),
272 msg->pcifunc, otx2_get_pf(msg->pcifunc),
273 otx2_get_vf(msg->pcifunc));
275 case MBOX_MSG_CGX_PTP_RX_INFO:
276 otx2_base_dbg("PF: Msg 0x%x (%s) fn:0x%x (pf:%d,vf:%d)",
277 msg->id, otx2_mbox_id2name(msg->id),
278 msg->pcifunc, otx2_get_pf(msg->pcifunc),
279 otx2_get_vf(msg->pcifunc));
282 otx2_err("Not handled UP msg 0x%x (%s) func:0x%x",
283 msg->id, otx2_mbox_id2name(msg->id),
286 offset = mbox->rx_start + msg->next_msgoff;
288 otx2_mbox_reset(mbox, vf);
289 mdev->msgs_acked = msgs_acked;
296 otx2_vf_pf_mbox_handle_msg(void *param)
298 uint16_t vf, max_vf, max_bits;
299 struct otx2_dev *dev = param;
301 max_bits = sizeof(dev->intr.bits[0]) * sizeof(uint64_t);
302 max_vf = max_bits * MAX_VFPF_DWORD_BITS;
304 for (vf = 0; vf < max_vf; vf++) {
305 if (dev->intr.bits[vf/max_bits] & BIT_ULL(vf%max_bits)) {
306 otx2_base_dbg("Process vf:%d request (pf:%d, vf:%d)",
307 vf, dev->pf, dev->vf);
308 vf_pf_process_msgs(dev, vf);
310 vf_pf_process_up_msgs(dev, vf);
311 dev->intr.bits[vf/max_bits] &= ~(BIT_ULL(vf%max_bits));
318 otx2_vf_pf_mbox_irq(void *param)
320 struct otx2_dev *dev = param;
321 bool alarm_set = false;
325 for (vfpf = 0; vfpf < MAX_VFPF_DWORD_BITS; ++vfpf) {
326 intr = otx2_read64(dev->bar2 + RVU_PF_VFPF_MBOX_INTX(vfpf));
330 otx2_base_dbg("vfpf: %d intr: 0x%" PRIx64 " (pf:%d, vf:%d)",
331 vfpf, intr, dev->pf, dev->vf);
333 /* Save and clear intr bits */
334 dev->intr.bits[vfpf] |= intr;
335 otx2_write64(intr, dev->bar2 + RVU_PF_VFPF_MBOX_INTX(vfpf));
339 if (!dev->timer_set && alarm_set) {
341 /* Start timer to handle messages */
342 rte_eal_alarm_set(VF_PF_MBOX_TIMER_MS,
343 otx2_vf_pf_mbox_handle_msg, dev);
348 otx2_process_msgs(struct otx2_dev *dev, struct otx2_mbox *mbox)
350 struct otx2_mbox_dev *mdev = &mbox->dev[0];
351 struct mbox_hdr *req_hdr;
352 struct mbox_msghdr *msg;
357 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
358 if (req_hdr->num_msgs == 0)
361 offset = mbox->rx_start + RTE_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
362 for (i = 0; i < req_hdr->num_msgs; i++) {
363 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
366 otx2_base_dbg("Message 0x%x (%s) pf:%d/vf:%d",
367 msg->id, otx2_mbox_id2name(msg->id),
368 otx2_get_pf(msg->pcifunc),
369 otx2_get_vf(msg->pcifunc));
372 /* Add message id's that are handled here */
374 /* Get our identity */
375 dev->pf_func = msg->pcifunc;
380 otx2_err("Message (%s) response has err=%d",
381 otx2_mbox_id2name(msg->id), msg->rc);
384 offset = mbox->rx_start + msg->next_msgoff;
387 otx2_mbox_reset(mbox, 0);
388 /* Update acked if someone is waiting a message */
389 mdev->msgs_acked = msgs_acked;
393 /* Copies the message received from AF and sends it to VF */
395 pf_vf_mbox_send_up_msg(struct otx2_dev *dev, void *rec_msg)
397 uint16_t max_bits = sizeof(dev->active_vfs[0]) * sizeof(uint64_t);
398 struct otx2_mbox *vf_mbox = &dev->mbox_vfpf_up;
399 struct msg_req *msg = rec_msg;
400 struct mbox_msghdr *vf_msg;
404 size = RTE_ALIGN(otx2_mbox_id2size(msg->hdr.id), MBOX_MSG_ALIGN);
405 /* Send UP message to all VF's */
406 for (vf = 0; vf < vf_mbox->ndevs; vf++) {
408 if (!(dev->active_vfs[vf / max_bits] & (BIT_ULL(vf))))
411 otx2_base_dbg("(%s) size: %zx to VF: %d",
412 otx2_mbox_id2name(msg->hdr.id), size, vf);
414 /* Reserve PF/VF mbox message */
415 vf_msg = otx2_mbox_alloc_msg(vf_mbox, vf, size);
417 otx2_err("Failed to alloc VF%d UP message", vf);
420 otx2_mbox_req_init(msg->hdr.id, vf_msg);
423 * Copy message from AF<->PF UP mbox
426 otx2_mbox_memcpy((uint8_t *)vf_msg +
427 sizeof(struct mbox_msghdr), (uint8_t *)msg
428 + sizeof(struct mbox_msghdr), size -
429 sizeof(struct mbox_msghdr));
431 vf_msg->rc = msg->hdr.rc;
432 /* Set PF to be a sender */
433 vf_msg->pcifunc = dev->pf_func;
436 otx2_mbox_msg_send(vf_mbox, vf);
441 otx2_mbox_up_handler_cgx_link_event(struct otx2_dev *dev,
442 struct cgx_link_info_msg *msg,
445 struct cgx_link_user_info *linfo = &msg->link_info;
447 otx2_base_dbg("pf:%d/vf:%d NIC Link %s --> 0x%x (%s) from: pf:%d/vf:%d",
448 otx2_get_pf(dev->pf_func), otx2_get_vf(dev->pf_func),
449 linfo->link_up ? "UP" : "DOWN", msg->hdr.id,
450 otx2_mbox_id2name(msg->hdr.id),
451 otx2_get_pf(msg->hdr.pcifunc),
452 otx2_get_vf(msg->hdr.pcifunc));
454 /* PF gets link notification from AF */
455 if (otx2_get_pf(msg->hdr.pcifunc) == 0) {
456 if (dev->ops && dev->ops->link_status_update)
457 dev->ops->link_status_update(dev, linfo);
459 /* Forward the same message as received from AF to VF */
460 pf_vf_mbox_send_up_msg(dev, msg);
462 /* VF gets link up notification */
463 if (dev->ops && dev->ops->link_status_update)
464 dev->ops->link_status_update(dev, linfo);
472 otx2_mbox_up_handler_cgx_ptp_rx_info(struct otx2_dev *dev,
473 struct cgx_ptp_rx_info_msg *msg,
476 otx2_nix_dbg("pf:%d/vf:%d PTP mode %s --> 0x%x (%s) from: pf:%d/vf:%d",
477 otx2_get_pf(dev->pf_func),
478 otx2_get_vf(dev->pf_func),
479 msg->ptp_en ? "ENABLED" : "DISABLED",
480 msg->hdr.id, otx2_mbox_id2name(msg->hdr.id),
481 otx2_get_pf(msg->hdr.pcifunc),
482 otx2_get_vf(msg->hdr.pcifunc));
484 /* PF gets PTP notification from AF */
485 if (otx2_get_pf(msg->hdr.pcifunc) == 0) {
486 if (dev->ops && dev->ops->ptp_info_update)
487 dev->ops->ptp_info_update(dev, msg->ptp_en);
489 /* Forward the same message as received from AF to VF */
490 pf_vf_mbox_send_up_msg(dev, msg);
492 /* VF gets PTP notification */
493 if (dev->ops && dev->ops->ptp_info_update)
494 dev->ops->ptp_info_update(dev, msg->ptp_en);
502 mbox_process_msgs_up(struct otx2_dev *dev, struct mbox_msghdr *req)
504 /* Check if valid, if not reply with a invalid msg */
505 if (req->sig != OTX2_MBOX_REQ_SIG)
509 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
511 struct _rsp_type *rsp; \
514 rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \
516 sizeof(struct _rsp_type)); \
521 rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \
522 rsp->hdr.pcifunc = dev->pf_func; \
525 err = otx2_mbox_up_handler_ ## _fn_name( \
526 dev, (struct _req_type *)req, rsp); \
533 otx2_reply_invalid_msg(&dev->mbox_up, 0, 0, req->id);
540 otx2_process_msgs_up(struct otx2_dev *dev, struct otx2_mbox *mbox)
542 struct otx2_mbox_dev *mdev = &mbox->dev[0];
543 struct mbox_hdr *req_hdr;
544 struct mbox_msghdr *msg;
547 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
548 if (req_hdr->num_msgs == 0)
551 offset = mbox->rx_start + RTE_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
552 for (i = 0; i < req_hdr->num_msgs; i++) {
553 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
555 otx2_base_dbg("Message 0x%x (%s) pf:%d/vf:%d",
556 msg->id, otx2_mbox_id2name(msg->id),
557 otx2_get_pf(msg->pcifunc),
558 otx2_get_vf(msg->pcifunc));
559 err = mbox_process_msgs_up(dev, msg);
561 otx2_err("Error %d handling 0x%x (%s)",
562 err, msg->id, otx2_mbox_id2name(msg->id));
563 offset = mbox->rx_start + msg->next_msgoff;
565 /* Send mbox responses */
566 if (mdev->num_msgs) {
567 otx2_base_dbg("Reply num_msgs:%d", mdev->num_msgs);
568 otx2_mbox_msg_send(mbox, 0);
573 otx2_pf_vf_mbox_irq(void *param)
575 struct otx2_dev *dev = param;
578 intr = otx2_read64(dev->bar2 + RVU_VF_INT);
580 otx2_base_dbg("Proceeding to check mbox UP messages if any");
582 otx2_write64(intr, dev->bar2 + RVU_VF_INT);
583 otx2_base_dbg("Irq 0x%" PRIx64 "(pf:%d,vf:%d)", intr, dev->pf, dev->vf);
585 /* First process all configuration messages */
586 otx2_process_msgs(dev, dev->mbox);
588 /* Process Uplink messages */
589 otx2_process_msgs_up(dev, &dev->mbox_up);
593 otx2_af_pf_mbox_irq(void *param)
595 struct otx2_dev *dev = param;
598 intr = otx2_read64(dev->bar2 + RVU_PF_INT);
600 otx2_base_dbg("Proceeding to check mbox UP messages if any");
602 otx2_write64(intr, dev->bar2 + RVU_PF_INT);
603 otx2_base_dbg("Irq 0x%" PRIx64 "(pf:%d,vf:%d)", intr, dev->pf, dev->vf);
605 /* First process all configuration messages */
606 otx2_process_msgs(dev, dev->mbox);
608 /* Process Uplink messages */
609 otx2_process_msgs_up(dev, &dev->mbox_up);
613 mbox_register_pf_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
615 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
619 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)
620 otx2_write64(~0ull, dev->bar2 +
621 RVU_PF_VFPF_MBOX_INT_ENA_W1CX(i));
623 otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
627 /* MBOX interrupt for VF(0...63) <-> PF */
628 rc = otx2_register_irq(intr_handle, otx2_vf_pf_mbox_irq, dev,
629 RVU_PF_INT_VEC_VFPF_MBOX0);
632 otx2_err("Fail to register PF(VF0-63) mbox irq");
635 /* MBOX interrupt for VF(64...128) <-> PF */
636 rc = otx2_register_irq(intr_handle, otx2_vf_pf_mbox_irq, dev,
637 RVU_PF_INT_VEC_VFPF_MBOX1);
640 otx2_err("Fail to register PF(VF64-128) mbox irq");
643 /* MBOX interrupt AF <-> PF */
644 rc = otx2_register_irq(intr_handle, otx2_af_pf_mbox_irq,
645 dev, RVU_PF_INT_VEC_AFPF_MBOX);
647 otx2_err("Fail to register AF<->PF mbox irq");
652 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)
653 otx2_write64(~0ull, dev->bar2 +
654 RVU_PF_VFPF_MBOX_INT_ENA_W1SX(i));
656 otx2_write64(~0ull, dev->bar2 + RVU_PF_INT);
657 otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S);
663 mbox_register_vf_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
665 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
669 otx2_write64(~0ull, dev->bar2 + RVU_VF_INT_ENA_W1C);
671 /* MBOX interrupt PF <-> VF */
672 rc = otx2_register_irq(intr_handle, otx2_pf_vf_mbox_irq,
673 dev, RVU_VF_INT_VEC_MBOX);
675 otx2_err("Fail to register PF<->VF mbox irq");
680 otx2_write64(~0ull, dev->bar2 + RVU_VF_INT);
681 otx2_write64(~0ull, dev->bar2 + RVU_VF_INT_ENA_W1S);
687 mbox_register_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
689 if (otx2_dev_is_vf(dev))
690 return mbox_register_vf_irq(pci_dev, dev);
692 return mbox_register_pf_irq(pci_dev, dev);
696 mbox_unregister_pf_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
698 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
702 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)
703 otx2_write64(~0ull, dev->bar2 +
704 RVU_PF_VFPF_MBOX_INT_ENA_W1CX(i));
706 otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
710 rte_eal_alarm_cancel(otx2_vf_pf_mbox_handle_msg, dev);
712 /* Unregister the interrupt handler for each vectors */
713 /* MBOX interrupt for VF(0...63) <-> PF */
714 otx2_unregister_irq(intr_handle, otx2_vf_pf_mbox_irq, dev,
715 RVU_PF_INT_VEC_VFPF_MBOX0);
717 /* MBOX interrupt for VF(64...128) <-> PF */
718 otx2_unregister_irq(intr_handle, otx2_vf_pf_mbox_irq, dev,
719 RVU_PF_INT_VEC_VFPF_MBOX1);
721 /* MBOX interrupt AF <-> PF */
722 otx2_unregister_irq(intr_handle, otx2_af_pf_mbox_irq, dev,
723 RVU_PF_INT_VEC_AFPF_MBOX);
728 mbox_unregister_vf_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
730 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
733 otx2_write64(~0ull, dev->bar2 + RVU_VF_INT_ENA_W1C);
735 /* Unregister the interrupt handler */
736 otx2_unregister_irq(intr_handle, otx2_pf_vf_mbox_irq, dev,
737 RVU_VF_INT_VEC_MBOX);
741 mbox_unregister_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
743 if (otx2_dev_is_vf(dev))
744 mbox_unregister_vf_irq(pci_dev, dev);
746 mbox_unregister_pf_irq(pci_dev, dev);
750 vf_flr_send_msg(struct otx2_dev *dev, uint16_t vf)
752 struct otx2_mbox *mbox = dev->mbox;
756 req = otx2_mbox_alloc_msg_vf_flr(mbox);
757 /* Overwrite pcifunc to indicate VF */
758 req->hdr.pcifunc = otx2_pfvf_func(dev->pf, vf);
760 /* Sync message in interrupt context */
761 rc = pf_af_sync_msg(dev, NULL);
763 otx2_err("Failed to send VF FLR mbox msg, rc=%d", rc);
769 otx2_pf_vf_flr_irq(void *param)
771 struct otx2_dev *dev = (struct otx2_dev *)param;
772 uint16_t max_vf = 64, vf;
777 max_vf = (dev->maxvf > 0) ? dev->maxvf : 64;
780 otx2_base_dbg("FLR VF interrupt: max_vf: %d", max_vf);
782 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) {
783 intr = otx2_read64(bar2 + RVU_PF_VFFLR_INTX(i));
787 for (vf = 0; vf < max_vf; vf++) {
788 if (!(intr & (1ULL << vf)))
791 otx2_base_dbg("FLR: i :%d intr: 0x%" PRIx64 ", vf-%d",
792 i, intr, (64 * i + vf));
793 /* Clear interrupt */
794 otx2_write64(BIT_ULL(vf), bar2 + RVU_PF_VFFLR_INTX(i));
795 /* Disable the interrupt */
796 otx2_write64(BIT_ULL(vf),
797 bar2 + RVU_PF_VFFLR_INT_ENA_W1CX(i));
798 /* Inform AF about VF reset */
799 vf_flr_send_msg(dev, vf);
801 /* Signal FLR finish */
802 otx2_write64(BIT_ULL(vf), bar2 + RVU_PF_VFTRPENDX(i));
803 /* Enable interrupt */
805 bar2 + RVU_PF_VFFLR_INT_ENA_W1SX(i));
811 vf_flr_unregister_irqs(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
813 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
816 otx2_base_dbg("Unregister VF FLR interrupts for %s", pci_dev->name);
819 for (i = 0; i < MAX_VFPF_DWORD_BITS; i++)
820 otx2_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INT_ENA_W1CX(i));
822 otx2_unregister_irq(intr_handle, otx2_pf_vf_flr_irq, dev,
823 RVU_PF_INT_VEC_VFFLR0);
825 otx2_unregister_irq(intr_handle, otx2_pf_vf_flr_irq, dev,
826 RVU_PF_INT_VEC_VFFLR1);
832 vf_flr_register_irqs(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
834 struct rte_intr_handle *handle = &pci_dev->intr_handle;
837 otx2_base_dbg("Register VF FLR interrupts for %s", pci_dev->name);
839 rc = otx2_register_irq(handle, otx2_pf_vf_flr_irq, dev,
840 RVU_PF_INT_VEC_VFFLR0);
842 otx2_err("Failed to init RVU_PF_INT_VEC_VFFLR0 rc=%d", rc);
844 rc = otx2_register_irq(handle, otx2_pf_vf_flr_irq, dev,
845 RVU_PF_INT_VEC_VFFLR1);
847 otx2_err("Failed to init RVU_PF_INT_VEC_VFFLR1 rc=%d", rc);
849 /* Enable HW interrupt */
850 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) {
851 otx2_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INTX(i));
852 otx2_write64(~0ull, dev->bar2 + RVU_PF_VFTRPENDX(i));
853 otx2_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INT_ENA_W1SX(i));
860 * Get number of active VFs for the given PF device.
863 otx2_dev_active_vfs(void *otx2_dev)
865 struct otx2_dev *dev = otx2_dev;
868 for (i = 0; i < MAX_VFPF_DWORD_BITS; i++)
869 count += __builtin_popcount(dev->active_vfs[i]);
875 otx2_update_vf_hwcap(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
877 switch (pci_dev->id.device_id) {
878 case PCI_DEVID_OCTEONTX2_RVU_PF:
880 case PCI_DEVID_OCTEONTX2_RVU_SSO_TIM_VF:
881 case PCI_DEVID_OCTEONTX2_RVU_NPA_VF:
882 case PCI_DEVID_OCTEONTX2_RVU_CPT_VF:
883 case PCI_DEVID_OCTEONTX2_RVU_AF_VF:
884 case PCI_DEVID_OCTEONTX2_RVU_VF:
885 case PCI_DEVID_OCTEONTX2_RVU_SDP_VF:
886 dev->hwcap |= OTX2_HWCAP_F_VF;
893 * Initialize the otx2 device
896 otx2_dev_priv_init(struct rte_pci_device *pci_dev, void *otx2_dev)
898 int up_direction = MBOX_DIR_PFAF_UP;
899 int rc, direction = MBOX_DIR_PFAF;
900 uint64_t intr_offset = RVU_PF_INT;
901 struct otx2_dev *dev = otx2_dev;
902 uintptr_t bar2, bar4;
906 bar2 = (uintptr_t)pci_dev->mem_resource[2].addr;
907 bar4 = (uintptr_t)pci_dev->mem_resource[4].addr;
909 if (bar2 == 0 || bar4 == 0) {
910 otx2_err("Failed to get pci bars");
915 dev->node = pci_dev->device.numa_node;
916 dev->maxvf = pci_dev->max_vfs;
920 otx2_update_vf_hwcap(pci_dev, dev);
922 if (otx2_dev_is_vf(dev)) {
923 direction = MBOX_DIR_VFPF;
924 up_direction = MBOX_DIR_VFPF_UP;
925 intr_offset = RVU_VF_INT;
928 /* Initialize the local mbox */
929 rc = otx2_mbox_init(&dev->mbox_local, bar4, bar2, direction, 1,
933 dev->mbox = &dev->mbox_local;
935 rc = otx2_mbox_init(&dev->mbox_up, bar4, bar2, up_direction, 1,
940 /* Register mbox interrupts */
941 rc = mbox_register_irq(pci_dev, dev);
945 /* Check the readiness of PF/VF */
946 rc = otx2_send_ready_msg(dev->mbox, &dev->pf_func);
948 goto mbox_unregister;
950 dev->pf = otx2_get_pf(dev->pf_func);
951 dev->vf = otx2_get_vf(dev->pf_func);
952 memset(&dev->active_vfs, 0, sizeof(dev->active_vfs));
954 /* Found VF devices in a PF device */
955 if (pci_dev->max_vfs > 0) {
957 /* Remap mbox area for all vf's */
958 bar4_addr = otx2_read64(bar2 + RVU_PF_VF_BAR4_ADDR);
959 if (bar4_addr == 0) {
964 hwbase = mbox_mem_map(bar4_addr, MBOX_SIZE * pci_dev->max_vfs);
965 if (hwbase == MAP_FAILED) {
969 /* Init mbox object */
970 rc = otx2_mbox_init(&dev->mbox_vfpf, (uintptr_t)hwbase,
971 bar2, MBOX_DIR_PFVF, pci_dev->max_vfs,
976 /* PF -> VF UP messages */
977 rc = otx2_mbox_init(&dev->mbox_vfpf_up, (uintptr_t)hwbase,
978 bar2, MBOX_DIR_PFVF_UP, pci_dev->max_vfs,
984 /* Register VF-FLR irq handlers */
985 if (otx2_dev_is_pf(dev)) {
986 rc = vf_flr_register_irqs(pci_dev, dev);
990 dev->mbox_active = 1;
994 mbox_mem_unmap(hwbase, MBOX_SIZE * pci_dev->max_vfs);
996 mbox_unregister_irq(pci_dev, dev);
998 otx2_mbox_fini(dev->mbox);
999 otx2_mbox_fini(&dev->mbox_up);
1006 * Finalize the otx2 device
1009 otx2_dev_fini(struct rte_pci_device *pci_dev, void *otx2_dev)
1011 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1012 struct otx2_dev *dev = otx2_dev;
1013 struct otx2_idev_cfg *idev;
1014 struct otx2_mbox *mbox;
1016 /* Clear references to this pci dev */
1017 idev = otx2_intra_dev_get_cfg();
1018 if (idev->npa_lf && idev->npa_lf->pci_dev == pci_dev)
1019 idev->npa_lf = NULL;
1021 mbox_unregister_irq(pci_dev, dev);
1023 if (otx2_dev_is_pf(dev))
1024 vf_flr_unregister_irqs(pci_dev, dev);
1025 /* Release PF - VF */
1026 mbox = &dev->mbox_vfpf;
1027 if (mbox->hwbase && mbox->dev)
1028 mbox_mem_unmap((void *)mbox->hwbase,
1029 MBOX_SIZE * pci_dev->max_vfs);
1030 otx2_mbox_fini(mbox);
1031 mbox = &dev->mbox_vfpf_up;
1032 otx2_mbox_fini(mbox);
1034 /* Release PF - AF */
1036 otx2_mbox_fini(mbox);
1037 mbox = &dev->mbox_up;
1038 otx2_mbox_fini(mbox);
1039 dev->mbox_active = 0;
1041 /* Disable MSIX vectors */
1042 otx2_disable_irqs(intr_handle);