1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
10 #include <rte_alarm.h>
11 #include <rte_common.h>
13 #include <rte_memcpy.h>
14 #include <rte_eal_paging.h>
17 #include "otx2_mbox.h"
19 #define RVU_MAX_VF 64 /* RVU_PF_VFPF_MBOX_INT(0..1) */
20 #define RVU_MAX_INT_RETRY 3
22 /* PF/VF message handling timer */
23 #define VF_PF_MBOX_TIMER_MS (20 * 1000)
26 mbox_mem_map(off_t off, size_t size)
28 void *va = MAP_FAILED;
34 mem_fd = open("/dev/mem", O_RDWR);
38 va = rte_mem_map(NULL, size, RTE_PROT_READ | RTE_PROT_WRITE,
39 RTE_MAP_SHARED, mem_fd, off);
43 otx2_err("Failed to mmap sz=0x%zx, fd=%d, off=%jd",
44 size, mem_fd, (intmax_t)off);
50 mbox_mem_unmap(void *va, size_t size)
53 rte_mem_unmap(va, size);
57 pf_af_sync_msg(struct otx2_dev *dev, struct mbox_msghdr **rsp)
59 uint32_t timeout = 0, sleep = 1; struct otx2_mbox *mbox = dev->mbox;
60 struct otx2_mbox_dev *mdev = &mbox->dev[0];
61 volatile uint64_t int_status;
62 struct mbox_msghdr *msghdr;
66 /* We need to disable PF interrupts. We are in timer interrupt */
67 otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
70 otx2_mbox_msg_send(mbox, 0);
75 if (timeout >= MBOX_RSP_TIMEOUT) {
76 otx2_err("Message timeout: %dms", MBOX_RSP_TIMEOUT);
80 int_status = otx2_read64(dev->bar2 + RVU_PF_INT);
81 } while ((int_status & 0x1) != 0x1);
84 otx2_write64(int_status, dev->bar2 + RVU_PF_INT);
86 /* Enable interrupts */
87 otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S);
91 off = mbox->rx_start +
92 RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
93 msghdr = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + off);
103 af_pf_wait_msg(struct otx2_dev *dev, uint16_t vf, int num_msg)
105 uint32_t timeout = 0, sleep = 1; struct otx2_mbox *mbox = dev->mbox;
106 struct otx2_mbox_dev *mdev = &mbox->dev[0];
107 volatile uint64_t int_status;
108 struct mbox_hdr *req_hdr;
109 struct mbox_msghdr *msg;
110 struct mbox_msghdr *rsp;
115 /* We need to disable PF interrupts. We are in timer interrupt */
116 otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
119 otx2_mbox_msg_send(mbox, 0);
124 if (timeout >= MBOX_RSP_TIMEOUT) {
125 otx2_err("Routed messages %d timeout: %dms",
126 num_msg, MBOX_RSP_TIMEOUT);
129 int_status = otx2_read64(dev->bar2 + RVU_PF_INT);
130 } while ((int_status & 0x1) != 0x1);
133 otx2_write64(~0ull, dev->bar2 + RVU_PF_INT);
135 /* Enable interrupts */
136 otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S);
138 rte_spinlock_lock(&mdev->mbox_lock);
140 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
141 if (req_hdr->num_msgs != num_msg)
142 otx2_err("Routed messages: %d received: %d", num_msg,
145 /* Get messages from mbox */
146 offset = mbox->rx_start +
147 RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
148 for (i = 0; i < req_hdr->num_msgs; i++) {
149 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
150 size = mbox->rx_start + msg->next_msgoff - offset;
152 /* Reserve PF/VF mbox message */
153 size = RTE_ALIGN(size, MBOX_MSG_ALIGN);
154 rsp = otx2_mbox_alloc_msg(&dev->mbox_vfpf, vf, size);
155 otx2_mbox_rsp_init(msg->id, rsp);
157 /* Copy message from AF<->PF mbox to PF<->VF mbox */
158 otx2_mbox_memcpy((uint8_t *)rsp + sizeof(struct mbox_msghdr),
159 (uint8_t *)msg + sizeof(struct mbox_msghdr),
160 size - sizeof(struct mbox_msghdr));
162 /* Set status and sender pf_func data */
164 rsp->pcifunc = msg->pcifunc;
166 /* Whenever a PF comes up, AF sends the link status to it but
167 * when VF comes up no such event is sent to respective VF.
168 * Using MBOX_MSG_NIX_LF_START_RX response from AF for the
169 * purpose and send the link status of PF to VF.
171 if (msg->id == MBOX_MSG_NIX_LF_START_RX) {
172 /* Send link status to VF */
173 struct cgx_link_user_info linfo;
174 struct mbox_msghdr *vf_msg;
176 /* Get the link status */
177 if (dev->ops && dev->ops->link_status_get)
178 dev->ops->link_status_get(dev, &linfo);
180 /* Prepare the message to be sent */
181 vf_msg = otx2_mbox_alloc_msg(&dev->mbox_vfpf_up, vf,
183 otx2_mbox_req_init(MBOX_MSG_CGX_LINK_EVENT, vf_msg);
184 memcpy((uint8_t *)vf_msg + sizeof(struct mbox_msghdr),
185 &linfo, sizeof(struct cgx_link_user_info));
187 vf_msg->rc = msg->rc;
188 vf_msg->pcifunc = msg->pcifunc;
190 otx2_mbox_msg_send(&dev->mbox_vfpf_up, vf);
192 offset = mbox->rx_start + msg->next_msgoff;
194 rte_spinlock_unlock(&mdev->mbox_lock);
196 return req_hdr->num_msgs;
200 vf_pf_process_msgs(struct otx2_dev *dev, uint16_t vf)
202 int offset, routed = 0; struct otx2_mbox *mbox = &dev->mbox_vfpf;
203 struct otx2_mbox_dev *mdev = &mbox->dev[vf];
204 struct mbox_hdr *req_hdr;
205 struct mbox_msghdr *msg;
209 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
210 if (!req_hdr->num_msgs)
213 offset = mbox->rx_start + RTE_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
215 for (i = 0; i < req_hdr->num_msgs; i++) {
217 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
218 size = mbox->rx_start + msg->next_msgoff - offset;
221 msg->pcifunc = otx2_pfvf_func(dev->pf, vf);
223 if (msg->id == MBOX_MSG_READY) {
224 struct ready_msg_rsp *rsp;
225 uint16_t max_bits = sizeof(dev->active_vfs[0]) * 8;
227 /* Handle READY message in PF */
228 dev->active_vfs[vf / max_bits] |=
229 BIT_ULL(vf % max_bits);
230 rsp = (struct ready_msg_rsp *)
231 otx2_mbox_alloc_msg(mbox, vf, sizeof(*rsp));
232 otx2_mbox_rsp_init(msg->id, rsp);
234 /* PF/VF function ID */
235 rsp->hdr.pcifunc = msg->pcifunc;
238 struct mbox_msghdr *af_req;
239 /* Reserve AF/PF mbox message */
240 size = RTE_ALIGN(size, MBOX_MSG_ALIGN);
241 af_req = otx2_mbox_alloc_msg(dev->mbox, 0, size);
242 otx2_mbox_req_init(msg->id, af_req);
244 /* Copy message from VF<->PF mbox to PF<->AF mbox */
245 otx2_mbox_memcpy((uint8_t *)af_req +
246 sizeof(struct mbox_msghdr),
247 (uint8_t *)msg + sizeof(struct mbox_msghdr),
248 size - sizeof(struct mbox_msghdr));
249 af_req->pcifunc = msg->pcifunc;
252 offset = mbox->rx_start + msg->next_msgoff;
256 otx2_base_dbg("pf:%d routed %d messages from vf:%d to AF",
257 dev->pf, routed, vf);
258 af_pf_wait_msg(dev, vf, routed);
259 otx2_mbox_reset(dev->mbox, 0);
262 /* Send mbox responses to VF */
263 if (mdev->num_msgs) {
264 otx2_base_dbg("pf:%d reply %d messages to vf:%d",
265 dev->pf, mdev->num_msgs, vf);
266 otx2_mbox_msg_send(mbox, vf);
273 vf_pf_process_up_msgs(struct otx2_dev *dev, uint16_t vf)
275 struct otx2_mbox *mbox = &dev->mbox_vfpf_up;
276 struct otx2_mbox_dev *mdev = &mbox->dev[vf];
277 struct mbox_hdr *req_hdr;
278 struct mbox_msghdr *msg;
283 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
284 if (req_hdr->num_msgs == 0)
287 offset = mbox->rx_start + RTE_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
289 for (i = 0; i < req_hdr->num_msgs; i++) {
290 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
294 msg->pcifunc = otx2_pfvf_func(dev->pf, vf);
297 case MBOX_MSG_CGX_LINK_EVENT:
298 otx2_base_dbg("PF: Msg 0x%x (%s) fn:0x%x (pf:%d,vf:%d)",
299 msg->id, otx2_mbox_id2name(msg->id),
300 msg->pcifunc, otx2_get_pf(msg->pcifunc),
301 otx2_get_vf(msg->pcifunc));
303 case MBOX_MSG_CGX_PTP_RX_INFO:
304 otx2_base_dbg("PF: Msg 0x%x (%s) fn:0x%x (pf:%d,vf:%d)",
305 msg->id, otx2_mbox_id2name(msg->id),
306 msg->pcifunc, otx2_get_pf(msg->pcifunc),
307 otx2_get_vf(msg->pcifunc));
310 otx2_err("Not handled UP msg 0x%x (%s) func:0x%x",
311 msg->id, otx2_mbox_id2name(msg->id),
314 offset = mbox->rx_start + msg->next_msgoff;
316 otx2_mbox_reset(mbox, vf);
317 mdev->msgs_acked = msgs_acked;
324 otx2_vf_pf_mbox_handle_msg(void *param)
326 uint16_t vf, max_vf, max_bits;
327 struct otx2_dev *dev = param;
329 max_bits = sizeof(dev->intr.bits[0]) * sizeof(uint64_t);
330 max_vf = max_bits * MAX_VFPF_DWORD_BITS;
332 for (vf = 0; vf < max_vf; vf++) {
333 if (dev->intr.bits[vf/max_bits] & BIT_ULL(vf%max_bits)) {
334 otx2_base_dbg("Process vf:%d request (pf:%d, vf:%d)",
335 vf, dev->pf, dev->vf);
336 vf_pf_process_msgs(dev, vf);
338 vf_pf_process_up_msgs(dev, vf);
339 dev->intr.bits[vf/max_bits] &= ~(BIT_ULL(vf%max_bits));
346 otx2_vf_pf_mbox_irq(void *param)
348 struct otx2_dev *dev = param;
349 bool alarm_set = false;
353 for (vfpf = 0; vfpf < MAX_VFPF_DWORD_BITS; ++vfpf) {
354 intr = otx2_read64(dev->bar2 + RVU_PF_VFPF_MBOX_INTX(vfpf));
358 otx2_base_dbg("vfpf: %d intr: 0x%" PRIx64 " (pf:%d, vf:%d)",
359 vfpf, intr, dev->pf, dev->vf);
361 /* Save and clear intr bits */
362 dev->intr.bits[vfpf] |= intr;
363 otx2_write64(intr, dev->bar2 + RVU_PF_VFPF_MBOX_INTX(vfpf));
367 if (!dev->timer_set && alarm_set) {
369 /* Start timer to handle messages */
370 rte_eal_alarm_set(VF_PF_MBOX_TIMER_MS,
371 otx2_vf_pf_mbox_handle_msg, dev);
376 otx2_process_msgs(struct otx2_dev *dev, struct otx2_mbox *mbox)
378 struct otx2_mbox_dev *mdev = &mbox->dev[0];
379 struct mbox_hdr *req_hdr;
380 struct mbox_msghdr *msg;
385 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
386 if (req_hdr->num_msgs == 0)
389 offset = mbox->rx_start + RTE_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
390 for (i = 0; i < req_hdr->num_msgs; i++) {
391 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
394 otx2_base_dbg("Message 0x%x (%s) pf:%d/vf:%d",
395 msg->id, otx2_mbox_id2name(msg->id),
396 otx2_get_pf(msg->pcifunc),
397 otx2_get_vf(msg->pcifunc));
400 /* Add message id's that are handled here */
402 /* Get our identity */
403 dev->pf_func = msg->pcifunc;
408 otx2_err("Message (%s) response has err=%d",
409 otx2_mbox_id2name(msg->id), msg->rc);
412 offset = mbox->rx_start + msg->next_msgoff;
415 otx2_mbox_reset(mbox, 0);
416 /* Update acked if someone is waiting a message */
417 mdev->msgs_acked = msgs_acked;
421 /* Copies the message received from AF and sends it to VF */
423 pf_vf_mbox_send_up_msg(struct otx2_dev *dev, void *rec_msg)
425 uint16_t max_bits = sizeof(dev->active_vfs[0]) * sizeof(uint64_t);
426 struct otx2_mbox *vf_mbox = &dev->mbox_vfpf_up;
427 struct msg_req *msg = rec_msg;
428 struct mbox_msghdr *vf_msg;
432 size = RTE_ALIGN(otx2_mbox_id2size(msg->hdr.id), MBOX_MSG_ALIGN);
433 /* Send UP message to all VF's */
434 for (vf = 0; vf < vf_mbox->ndevs; vf++) {
436 if (!(dev->active_vfs[vf / max_bits] & (BIT_ULL(vf))))
439 otx2_base_dbg("(%s) size: %zx to VF: %d",
440 otx2_mbox_id2name(msg->hdr.id), size, vf);
442 /* Reserve PF/VF mbox message */
443 vf_msg = otx2_mbox_alloc_msg(vf_mbox, vf, size);
445 otx2_err("Failed to alloc VF%d UP message", vf);
448 otx2_mbox_req_init(msg->hdr.id, vf_msg);
451 * Copy message from AF<->PF UP mbox
454 otx2_mbox_memcpy((uint8_t *)vf_msg +
455 sizeof(struct mbox_msghdr), (uint8_t *)msg
456 + sizeof(struct mbox_msghdr), size -
457 sizeof(struct mbox_msghdr));
459 vf_msg->rc = msg->hdr.rc;
460 /* Set PF to be a sender */
461 vf_msg->pcifunc = dev->pf_func;
464 otx2_mbox_msg_send(vf_mbox, vf);
469 otx2_mbox_up_handler_cgx_link_event(struct otx2_dev *dev,
470 struct cgx_link_info_msg *msg,
473 struct cgx_link_user_info *linfo = &msg->link_info;
475 otx2_base_dbg("pf:%d/vf:%d NIC Link %s --> 0x%x (%s) from: pf:%d/vf:%d",
476 otx2_get_pf(dev->pf_func), otx2_get_vf(dev->pf_func),
477 linfo->link_up ? "UP" : "DOWN", msg->hdr.id,
478 otx2_mbox_id2name(msg->hdr.id),
479 otx2_get_pf(msg->hdr.pcifunc),
480 otx2_get_vf(msg->hdr.pcifunc));
482 /* PF gets link notification from AF */
483 if (otx2_get_pf(msg->hdr.pcifunc) == 0) {
484 if (dev->ops && dev->ops->link_status_update)
485 dev->ops->link_status_update(dev, linfo);
487 /* Forward the same message as received from AF to VF */
488 pf_vf_mbox_send_up_msg(dev, msg);
490 /* VF gets link up notification */
491 if (dev->ops && dev->ops->link_status_update)
492 dev->ops->link_status_update(dev, linfo);
500 otx2_mbox_up_handler_cgx_ptp_rx_info(struct otx2_dev *dev,
501 struct cgx_ptp_rx_info_msg *msg,
504 otx2_nix_dbg("pf:%d/vf:%d PTP mode %s --> 0x%x (%s) from: pf:%d/vf:%d",
505 otx2_get_pf(dev->pf_func),
506 otx2_get_vf(dev->pf_func),
507 msg->ptp_en ? "ENABLED" : "DISABLED",
508 msg->hdr.id, otx2_mbox_id2name(msg->hdr.id),
509 otx2_get_pf(msg->hdr.pcifunc),
510 otx2_get_vf(msg->hdr.pcifunc));
512 /* PF gets PTP notification from AF */
513 if (otx2_get_pf(msg->hdr.pcifunc) == 0) {
514 if (dev->ops && dev->ops->ptp_info_update)
515 dev->ops->ptp_info_update(dev, msg->ptp_en);
517 /* Forward the same message as received from AF to VF */
518 pf_vf_mbox_send_up_msg(dev, msg);
520 /* VF gets PTP notification */
521 if (dev->ops && dev->ops->ptp_info_update)
522 dev->ops->ptp_info_update(dev, msg->ptp_en);
530 mbox_process_msgs_up(struct otx2_dev *dev, struct mbox_msghdr *req)
532 /* Check if valid, if not reply with a invalid msg */
533 if (req->sig != OTX2_MBOX_REQ_SIG)
537 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
539 struct _rsp_type *rsp; \
542 rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \
544 sizeof(struct _rsp_type)); \
549 rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \
550 rsp->hdr.pcifunc = dev->pf_func; \
553 err = otx2_mbox_up_handler_ ## _fn_name( \
554 dev, (struct _req_type *)req, rsp); \
561 otx2_reply_invalid_msg(&dev->mbox_up, 0, 0, req->id);
568 otx2_process_msgs_up(struct otx2_dev *dev, struct otx2_mbox *mbox)
570 struct otx2_mbox_dev *mdev = &mbox->dev[0];
571 struct mbox_hdr *req_hdr;
572 struct mbox_msghdr *msg;
575 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
576 if (req_hdr->num_msgs == 0)
579 offset = mbox->rx_start + RTE_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
580 for (i = 0; i < req_hdr->num_msgs; i++) {
581 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
583 otx2_base_dbg("Message 0x%x (%s) pf:%d/vf:%d",
584 msg->id, otx2_mbox_id2name(msg->id),
585 otx2_get_pf(msg->pcifunc),
586 otx2_get_vf(msg->pcifunc));
587 err = mbox_process_msgs_up(dev, msg);
589 otx2_err("Error %d handling 0x%x (%s)",
590 err, msg->id, otx2_mbox_id2name(msg->id));
591 offset = mbox->rx_start + msg->next_msgoff;
593 /* Send mbox responses */
594 if (mdev->num_msgs) {
595 otx2_base_dbg("Reply num_msgs:%d", mdev->num_msgs);
596 otx2_mbox_msg_send(mbox, 0);
601 otx2_pf_vf_mbox_irq(void *param)
603 struct otx2_dev *dev = param;
606 intr = otx2_read64(dev->bar2 + RVU_VF_INT);
608 otx2_base_dbg("Proceeding to check mbox UP messages if any");
610 otx2_write64(intr, dev->bar2 + RVU_VF_INT);
611 otx2_base_dbg("Irq 0x%" PRIx64 "(pf:%d,vf:%d)", intr, dev->pf, dev->vf);
613 /* First process all configuration messages */
614 otx2_process_msgs(dev, dev->mbox);
616 /* Process Uplink messages */
617 otx2_process_msgs_up(dev, &dev->mbox_up);
621 otx2_af_pf_mbox_irq(void *param)
623 struct otx2_dev *dev = param;
626 intr = otx2_read64(dev->bar2 + RVU_PF_INT);
628 otx2_base_dbg("Proceeding to check mbox UP messages if any");
630 otx2_write64(intr, dev->bar2 + RVU_PF_INT);
631 otx2_base_dbg("Irq 0x%" PRIx64 "(pf:%d,vf:%d)", intr, dev->pf, dev->vf);
633 /* First process all configuration messages */
634 otx2_process_msgs(dev, dev->mbox);
636 /* Process Uplink messages */
637 otx2_process_msgs_up(dev, &dev->mbox_up);
641 mbox_register_pf_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
643 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
647 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)
648 otx2_write64(~0ull, dev->bar2 +
649 RVU_PF_VFPF_MBOX_INT_ENA_W1CX(i));
651 otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
655 /* MBOX interrupt for VF(0...63) <-> PF */
656 rc = otx2_register_irq(intr_handle, otx2_vf_pf_mbox_irq, dev,
657 RVU_PF_INT_VEC_VFPF_MBOX0);
660 otx2_err("Fail to register PF(VF0-63) mbox irq");
663 /* MBOX interrupt for VF(64...128) <-> PF */
664 rc = otx2_register_irq(intr_handle, otx2_vf_pf_mbox_irq, dev,
665 RVU_PF_INT_VEC_VFPF_MBOX1);
668 otx2_err("Fail to register PF(VF64-128) mbox irq");
671 /* MBOX interrupt AF <-> PF */
672 rc = otx2_register_irq(intr_handle, otx2_af_pf_mbox_irq,
673 dev, RVU_PF_INT_VEC_AFPF_MBOX);
675 otx2_err("Fail to register AF<->PF mbox irq");
680 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)
681 otx2_write64(~0ull, dev->bar2 +
682 RVU_PF_VFPF_MBOX_INT_ENA_W1SX(i));
684 otx2_write64(~0ull, dev->bar2 + RVU_PF_INT);
685 otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S);
691 mbox_register_vf_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
693 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
697 otx2_write64(~0ull, dev->bar2 + RVU_VF_INT_ENA_W1C);
699 /* MBOX interrupt PF <-> VF */
700 rc = otx2_register_irq(intr_handle, otx2_pf_vf_mbox_irq,
701 dev, RVU_VF_INT_VEC_MBOX);
703 otx2_err("Fail to register PF<->VF mbox irq");
708 otx2_write64(~0ull, dev->bar2 + RVU_VF_INT);
709 otx2_write64(~0ull, dev->bar2 + RVU_VF_INT_ENA_W1S);
715 mbox_register_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
717 if (otx2_dev_is_vf(dev))
718 return mbox_register_vf_irq(pci_dev, dev);
720 return mbox_register_pf_irq(pci_dev, dev);
724 mbox_unregister_pf_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
726 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
730 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)
731 otx2_write64(~0ull, dev->bar2 +
732 RVU_PF_VFPF_MBOX_INT_ENA_W1CX(i));
734 otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
738 rte_eal_alarm_cancel(otx2_vf_pf_mbox_handle_msg, dev);
740 /* Unregister the interrupt handler for each vectors */
741 /* MBOX interrupt for VF(0...63) <-> PF */
742 otx2_unregister_irq(intr_handle, otx2_vf_pf_mbox_irq, dev,
743 RVU_PF_INT_VEC_VFPF_MBOX0);
745 /* MBOX interrupt for VF(64...128) <-> PF */
746 otx2_unregister_irq(intr_handle, otx2_vf_pf_mbox_irq, dev,
747 RVU_PF_INT_VEC_VFPF_MBOX1);
749 /* MBOX interrupt AF <-> PF */
750 otx2_unregister_irq(intr_handle, otx2_af_pf_mbox_irq, dev,
751 RVU_PF_INT_VEC_AFPF_MBOX);
756 mbox_unregister_vf_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
758 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
761 otx2_write64(~0ull, dev->bar2 + RVU_VF_INT_ENA_W1C);
763 /* Unregister the interrupt handler */
764 otx2_unregister_irq(intr_handle, otx2_pf_vf_mbox_irq, dev,
765 RVU_VF_INT_VEC_MBOX);
769 mbox_unregister_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
771 if (otx2_dev_is_vf(dev))
772 mbox_unregister_vf_irq(pci_dev, dev);
774 mbox_unregister_pf_irq(pci_dev, dev);
778 vf_flr_send_msg(struct otx2_dev *dev, uint16_t vf)
780 struct otx2_mbox *mbox = dev->mbox;
784 req = otx2_mbox_alloc_msg_vf_flr(mbox);
785 /* Overwrite pcifunc to indicate VF */
786 req->hdr.pcifunc = otx2_pfvf_func(dev->pf, vf);
788 /* Sync message in interrupt context */
789 rc = pf_af_sync_msg(dev, NULL);
791 otx2_err("Failed to send VF FLR mbox msg, rc=%d", rc);
797 otx2_pf_vf_flr_irq(void *param)
799 struct otx2_dev *dev = (struct otx2_dev *)param;
800 uint16_t max_vf = 64, vf;
805 max_vf = (dev->maxvf > 0) ? dev->maxvf : 64;
808 otx2_base_dbg("FLR VF interrupt: max_vf: %d", max_vf);
810 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) {
811 intr = otx2_read64(bar2 + RVU_PF_VFFLR_INTX(i));
815 for (vf = 0; vf < max_vf; vf++) {
816 if (!(intr & (1ULL << vf)))
819 otx2_base_dbg("FLR: i :%d intr: 0x%" PRIx64 ", vf-%d",
820 i, intr, (64 * i + vf));
821 /* Clear interrupt */
822 otx2_write64(BIT_ULL(vf), bar2 + RVU_PF_VFFLR_INTX(i));
823 /* Disable the interrupt */
824 otx2_write64(BIT_ULL(vf),
825 bar2 + RVU_PF_VFFLR_INT_ENA_W1CX(i));
826 /* Inform AF about VF reset */
827 vf_flr_send_msg(dev, vf);
829 /* Signal FLR finish */
830 otx2_write64(BIT_ULL(vf), bar2 + RVU_PF_VFTRPENDX(i));
831 /* Enable interrupt */
833 bar2 + RVU_PF_VFFLR_INT_ENA_W1SX(i));
839 vf_flr_unregister_irqs(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
841 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
844 otx2_base_dbg("Unregister VF FLR interrupts for %s", pci_dev->name);
847 for (i = 0; i < MAX_VFPF_DWORD_BITS; i++)
848 otx2_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INT_ENA_W1CX(i));
850 otx2_unregister_irq(intr_handle, otx2_pf_vf_flr_irq, dev,
851 RVU_PF_INT_VEC_VFFLR0);
853 otx2_unregister_irq(intr_handle, otx2_pf_vf_flr_irq, dev,
854 RVU_PF_INT_VEC_VFFLR1);
860 vf_flr_register_irqs(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
862 struct rte_intr_handle *handle = &pci_dev->intr_handle;
865 otx2_base_dbg("Register VF FLR interrupts for %s", pci_dev->name);
867 rc = otx2_register_irq(handle, otx2_pf_vf_flr_irq, dev,
868 RVU_PF_INT_VEC_VFFLR0);
870 otx2_err("Failed to init RVU_PF_INT_VEC_VFFLR0 rc=%d", rc);
872 rc = otx2_register_irq(handle, otx2_pf_vf_flr_irq, dev,
873 RVU_PF_INT_VEC_VFFLR1);
875 otx2_err("Failed to init RVU_PF_INT_VEC_VFFLR1 rc=%d", rc);
877 /* Enable HW interrupt */
878 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) {
879 otx2_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INTX(i));
880 otx2_write64(~0ull, dev->bar2 + RVU_PF_VFTRPENDX(i));
881 otx2_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INT_ENA_W1SX(i));
888 * Get number of active VFs for the given PF device.
891 otx2_dev_active_vfs(void *otx2_dev)
893 struct otx2_dev *dev = otx2_dev;
896 for (i = 0; i < MAX_VFPF_DWORD_BITS; i++)
897 count += __builtin_popcount(dev->active_vfs[i]);
903 otx2_update_vf_hwcap(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
905 switch (pci_dev->id.device_id) {
906 case PCI_DEVID_OCTEONTX2_RVU_PF:
908 case PCI_DEVID_OCTEONTX2_RVU_SSO_TIM_VF:
909 case PCI_DEVID_OCTEONTX2_RVU_NPA_VF:
910 case PCI_DEVID_OCTEONTX2_RVU_CPT_VF:
911 case PCI_DEVID_OCTEONTX2_RVU_AF_VF:
912 case PCI_DEVID_OCTEONTX2_RVU_VF:
913 case PCI_DEVID_OCTEONTX2_RVU_SDP_VF:
914 dev->hwcap |= OTX2_HWCAP_F_VF;
921 * Initialize the otx2 device
924 otx2_dev_priv_init(struct rte_pci_device *pci_dev, void *otx2_dev)
926 int up_direction = MBOX_DIR_PFAF_UP;
927 int rc, direction = MBOX_DIR_PFAF;
928 uint64_t intr_offset = RVU_PF_INT;
929 struct otx2_dev *dev = otx2_dev;
930 uintptr_t bar2, bar4;
934 bar2 = (uintptr_t)pci_dev->mem_resource[2].addr;
935 bar4 = (uintptr_t)pci_dev->mem_resource[4].addr;
937 if (bar2 == 0 || bar4 == 0) {
938 otx2_err("Failed to get pci bars");
943 dev->node = pci_dev->device.numa_node;
944 dev->maxvf = pci_dev->max_vfs;
948 otx2_update_vf_hwcap(pci_dev, dev);
950 if (otx2_dev_is_vf(dev)) {
951 direction = MBOX_DIR_VFPF;
952 up_direction = MBOX_DIR_VFPF_UP;
953 intr_offset = RVU_VF_INT;
956 /* Initialize the local mbox */
957 rc = otx2_mbox_init(&dev->mbox_local, bar4, bar2, direction, 1,
961 dev->mbox = &dev->mbox_local;
963 rc = otx2_mbox_init(&dev->mbox_up, bar4, bar2, up_direction, 1,
968 /* Register mbox interrupts */
969 rc = mbox_register_irq(pci_dev, dev);
973 /* Check the readiness of PF/VF */
974 rc = otx2_send_ready_msg(dev->mbox, &dev->pf_func);
976 goto mbox_unregister;
978 dev->pf = otx2_get_pf(dev->pf_func);
979 dev->vf = otx2_get_vf(dev->pf_func);
980 memset(&dev->active_vfs, 0, sizeof(dev->active_vfs));
982 /* Found VF devices in a PF device */
983 if (pci_dev->max_vfs > 0) {
985 /* Remap mbox area for all vf's */
986 bar4_addr = otx2_read64(bar2 + RVU_PF_VF_BAR4_ADDR);
987 if (bar4_addr == 0) {
992 hwbase = mbox_mem_map(bar4_addr, MBOX_SIZE * pci_dev->max_vfs);
993 if (hwbase == MAP_FAILED) {
997 /* Init mbox object */
998 rc = otx2_mbox_init(&dev->mbox_vfpf, (uintptr_t)hwbase,
999 bar2, MBOX_DIR_PFVF, pci_dev->max_vfs,
1004 /* PF -> VF UP messages */
1005 rc = otx2_mbox_init(&dev->mbox_vfpf_up, (uintptr_t)hwbase,
1006 bar2, MBOX_DIR_PFVF_UP, pci_dev->max_vfs,
1012 /* Register VF-FLR irq handlers */
1013 if (otx2_dev_is_pf(dev)) {
1014 rc = vf_flr_register_irqs(pci_dev, dev);
1018 dev->mbox_active = 1;
1022 mbox_mem_unmap(hwbase, MBOX_SIZE * pci_dev->max_vfs);
1024 mbox_unregister_irq(pci_dev, dev);
1026 otx2_mbox_fini(dev->mbox);
1027 otx2_mbox_fini(&dev->mbox_up);
1034 * Finalize the otx2 device
1037 otx2_dev_fini(struct rte_pci_device *pci_dev, void *otx2_dev)
1039 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1040 struct otx2_dev *dev = otx2_dev;
1041 struct otx2_idev_cfg *idev;
1042 struct otx2_mbox *mbox;
1044 /* Clear references to this pci dev */
1045 idev = otx2_intra_dev_get_cfg();
1046 if (idev->npa_lf && idev->npa_lf->pci_dev == pci_dev)
1047 idev->npa_lf = NULL;
1049 mbox_unregister_irq(pci_dev, dev);
1051 if (otx2_dev_is_pf(dev))
1052 vf_flr_unregister_irqs(pci_dev, dev);
1053 /* Release PF - VF */
1054 mbox = &dev->mbox_vfpf;
1055 if (mbox->hwbase && mbox->dev)
1056 mbox_mem_unmap((void *)mbox->hwbase,
1057 MBOX_SIZE * pci_dev->max_vfs);
1058 otx2_mbox_fini(mbox);
1059 mbox = &dev->mbox_vfpf_up;
1060 otx2_mbox_fini(mbox);
1062 /* Release PF - AF */
1064 otx2_mbox_fini(mbox);
1065 mbox = &dev->mbox_up;
1066 otx2_mbox_fini(mbox);
1067 dev->mbox_active = 0;
1069 /* Disable MSIX vectors */
1070 otx2_disable_irqs(intr_handle);