1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
10 #include <rte_alarm.h>
11 #include <rte_common.h>
13 #include <rte_memcpy.h>
14 #include <rte_eal_paging.h>
17 #include "otx2_mbox.h"
19 #define RVU_MAX_VF 64 /* RVU_PF_VFPF_MBOX_INT(0..1) */
20 #define RVU_MAX_INT_RETRY 3
22 /* PF/VF message handling timer */
23 #define VF_PF_MBOX_TIMER_MS (20 * 1000)
26 mbox_mem_map(off_t off, size_t size)
28 void *va = MAP_FAILED;
34 mem_fd = open("/dev/mem", O_RDWR);
38 va = rte_mem_map(NULL, size, RTE_PROT_READ | RTE_PROT_WRITE,
39 RTE_MAP_SHARED, mem_fd, off);
43 otx2_err("Failed to mmap sz=0x%zx, fd=%d, off=%jd",
44 size, mem_fd, (intmax_t)off);
50 mbox_mem_unmap(void *va, size_t size)
53 rte_mem_unmap(va, size);
57 pf_af_sync_msg(struct otx2_dev *dev, struct mbox_msghdr **rsp)
59 uint32_t timeout = 0, sleep = 1; struct otx2_mbox *mbox = dev->mbox;
60 struct otx2_mbox_dev *mdev = &mbox->dev[0];
61 volatile uint64_t int_status;
62 struct mbox_msghdr *msghdr;
66 /* We need to disable PF interrupts. We are in timer interrupt */
67 otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
70 otx2_mbox_msg_send(mbox, 0);
75 if (timeout >= MBOX_RSP_TIMEOUT) {
76 otx2_err("Message timeout: %dms", MBOX_RSP_TIMEOUT);
80 int_status = otx2_read64(dev->bar2 + RVU_PF_INT);
81 } while ((int_status & 0x1) != 0x1);
84 otx2_write64(int_status, dev->bar2 + RVU_PF_INT);
86 /* Enable interrupts */
87 otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S);
91 off = mbox->rx_start +
92 RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
93 msghdr = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + off);
103 af_pf_wait_msg(struct otx2_dev *dev, uint16_t vf, int num_msg)
105 uint32_t timeout = 0, sleep = 1; struct otx2_mbox *mbox = dev->mbox;
106 struct otx2_mbox_dev *mdev = &mbox->dev[0];
107 volatile uint64_t int_status;
108 struct mbox_hdr *req_hdr;
109 struct mbox_msghdr *msg;
110 struct mbox_msghdr *rsp;
115 /* We need to disable PF interrupts. We are in timer interrupt */
116 otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
119 otx2_mbox_msg_send(mbox, 0);
124 if (timeout >= MBOX_RSP_TIMEOUT) {
125 otx2_err("Routed messages %d timeout: %dms",
126 num_msg, MBOX_RSP_TIMEOUT);
129 int_status = otx2_read64(dev->bar2 + RVU_PF_INT);
130 } while ((int_status & 0x1) != 0x1);
133 otx2_write64(~0ull, dev->bar2 + RVU_PF_INT);
135 /* Enable interrupts */
136 otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S);
138 rte_spinlock_lock(&mdev->mbox_lock);
140 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
141 if (req_hdr->num_msgs != num_msg)
142 otx2_err("Routed messages: %d received: %d", num_msg,
145 /* Get messages from mbox */
146 offset = mbox->rx_start +
147 RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
148 for (i = 0; i < req_hdr->num_msgs; i++) {
149 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
150 size = mbox->rx_start + msg->next_msgoff - offset;
152 /* Reserve PF/VF mbox message */
153 size = RTE_ALIGN(size, MBOX_MSG_ALIGN);
154 rsp = otx2_mbox_alloc_msg(&dev->mbox_vfpf, vf, size);
155 otx2_mbox_rsp_init(msg->id, rsp);
157 /* Copy message from AF<->PF mbox to PF<->VF mbox */
158 otx2_mbox_memcpy((uint8_t *)rsp + sizeof(struct mbox_msghdr),
159 (uint8_t *)msg + sizeof(struct mbox_msghdr),
160 size - sizeof(struct mbox_msghdr));
162 /* Set status and sender pf_func data */
164 rsp->pcifunc = msg->pcifunc;
166 /* Whenever a PF comes up, AF sends the link status to it but
167 * when VF comes up no such event is sent to respective VF.
168 * Using MBOX_MSG_NIX_LF_START_RX response from AF for the
169 * purpose and send the link status of PF to VF.
171 if (msg->id == MBOX_MSG_NIX_LF_START_RX) {
172 /* Send link status to VF */
173 struct cgx_link_user_info linfo;
174 struct mbox_msghdr *vf_msg;
177 /* Get the link status */
178 if (dev->ops && dev->ops->link_status_get)
179 dev->ops->link_status_get(dev, &linfo);
181 sz = RTE_ALIGN(otx2_mbox_id2size(
182 MBOX_MSG_CGX_LINK_EVENT), MBOX_MSG_ALIGN);
183 /* Prepare the message to be sent */
184 vf_msg = otx2_mbox_alloc_msg(&dev->mbox_vfpf_up, vf,
186 otx2_mbox_req_init(MBOX_MSG_CGX_LINK_EVENT, vf_msg);
187 memcpy((uint8_t *)vf_msg + sizeof(struct mbox_msghdr),
188 &linfo, sizeof(struct cgx_link_user_info));
190 vf_msg->rc = msg->rc;
191 vf_msg->pcifunc = msg->pcifunc;
193 otx2_mbox_msg_send(&dev->mbox_vfpf_up, vf);
195 offset = mbox->rx_start + msg->next_msgoff;
197 rte_spinlock_unlock(&mdev->mbox_lock);
199 return req_hdr->num_msgs;
203 vf_pf_process_msgs(struct otx2_dev *dev, uint16_t vf)
205 int offset, routed = 0; struct otx2_mbox *mbox = &dev->mbox_vfpf;
206 struct otx2_mbox_dev *mdev = &mbox->dev[vf];
207 struct mbox_hdr *req_hdr;
208 struct mbox_msghdr *msg;
212 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
213 if (!req_hdr->num_msgs)
216 offset = mbox->rx_start + RTE_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
218 for (i = 0; i < req_hdr->num_msgs; i++) {
220 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
221 size = mbox->rx_start + msg->next_msgoff - offset;
224 msg->pcifunc = otx2_pfvf_func(dev->pf, vf);
226 if (msg->id == MBOX_MSG_READY) {
227 struct ready_msg_rsp *rsp;
228 uint16_t max_bits = sizeof(dev->active_vfs[0]) * 8;
230 /* Handle READY message in PF */
231 dev->active_vfs[vf / max_bits] |=
232 BIT_ULL(vf % max_bits);
233 rsp = (struct ready_msg_rsp *)
234 otx2_mbox_alloc_msg(mbox, vf, sizeof(*rsp));
235 otx2_mbox_rsp_init(msg->id, rsp);
237 /* PF/VF function ID */
238 rsp->hdr.pcifunc = msg->pcifunc;
241 struct mbox_msghdr *af_req;
242 /* Reserve AF/PF mbox message */
243 size = RTE_ALIGN(size, MBOX_MSG_ALIGN);
244 af_req = otx2_mbox_alloc_msg(dev->mbox, 0, size);
245 otx2_mbox_req_init(msg->id, af_req);
247 /* Copy message from VF<->PF mbox to PF<->AF mbox */
248 otx2_mbox_memcpy((uint8_t *)af_req +
249 sizeof(struct mbox_msghdr),
250 (uint8_t *)msg + sizeof(struct mbox_msghdr),
251 size - sizeof(struct mbox_msghdr));
252 af_req->pcifunc = msg->pcifunc;
255 offset = mbox->rx_start + msg->next_msgoff;
259 otx2_base_dbg("pf:%d routed %d messages from vf:%d to AF",
260 dev->pf, routed, vf);
261 af_pf_wait_msg(dev, vf, routed);
262 otx2_mbox_reset(dev->mbox, 0);
265 /* Send mbox responses to VF */
266 if (mdev->num_msgs) {
267 otx2_base_dbg("pf:%d reply %d messages to vf:%d",
268 dev->pf, mdev->num_msgs, vf);
269 otx2_mbox_msg_send(mbox, vf);
276 vf_pf_process_up_msgs(struct otx2_dev *dev, uint16_t vf)
278 struct otx2_mbox *mbox = &dev->mbox_vfpf_up;
279 struct otx2_mbox_dev *mdev = &mbox->dev[vf];
280 struct mbox_hdr *req_hdr;
281 struct mbox_msghdr *msg;
286 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
287 if (req_hdr->num_msgs == 0)
290 offset = mbox->rx_start + RTE_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
292 for (i = 0; i < req_hdr->num_msgs; i++) {
293 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
297 msg->pcifunc = otx2_pfvf_func(dev->pf, vf);
300 case MBOX_MSG_CGX_LINK_EVENT:
301 otx2_base_dbg("PF: Msg 0x%x (%s) fn:0x%x (pf:%d,vf:%d)",
302 msg->id, otx2_mbox_id2name(msg->id),
303 msg->pcifunc, otx2_get_pf(msg->pcifunc),
304 otx2_get_vf(msg->pcifunc));
306 case MBOX_MSG_CGX_PTP_RX_INFO:
307 otx2_base_dbg("PF: Msg 0x%x (%s) fn:0x%x (pf:%d,vf:%d)",
308 msg->id, otx2_mbox_id2name(msg->id),
309 msg->pcifunc, otx2_get_pf(msg->pcifunc),
310 otx2_get_vf(msg->pcifunc));
313 otx2_err("Not handled UP msg 0x%x (%s) func:0x%x",
314 msg->id, otx2_mbox_id2name(msg->id),
317 offset = mbox->rx_start + msg->next_msgoff;
319 otx2_mbox_reset(mbox, vf);
320 mdev->msgs_acked = msgs_acked;
327 otx2_vf_pf_mbox_handle_msg(void *param)
329 uint16_t vf, max_vf, max_bits;
330 struct otx2_dev *dev = param;
332 max_bits = sizeof(dev->intr.bits[0]) * sizeof(uint64_t);
333 max_vf = max_bits * MAX_VFPF_DWORD_BITS;
335 for (vf = 0; vf < max_vf; vf++) {
336 if (dev->intr.bits[vf/max_bits] & BIT_ULL(vf%max_bits)) {
337 otx2_base_dbg("Process vf:%d request (pf:%d, vf:%d)",
338 vf, dev->pf, dev->vf);
339 vf_pf_process_msgs(dev, vf);
341 vf_pf_process_up_msgs(dev, vf);
342 dev->intr.bits[vf/max_bits] &= ~(BIT_ULL(vf%max_bits));
349 otx2_vf_pf_mbox_irq(void *param)
351 struct otx2_dev *dev = param;
352 bool alarm_set = false;
356 for (vfpf = 0; vfpf < MAX_VFPF_DWORD_BITS; ++vfpf) {
357 intr = otx2_read64(dev->bar2 + RVU_PF_VFPF_MBOX_INTX(vfpf));
361 otx2_base_dbg("vfpf: %d intr: 0x%" PRIx64 " (pf:%d, vf:%d)",
362 vfpf, intr, dev->pf, dev->vf);
364 /* Save and clear intr bits */
365 dev->intr.bits[vfpf] |= intr;
366 otx2_write64(intr, dev->bar2 + RVU_PF_VFPF_MBOX_INTX(vfpf));
370 if (!dev->timer_set && alarm_set) {
372 /* Start timer to handle messages */
373 rte_eal_alarm_set(VF_PF_MBOX_TIMER_MS,
374 otx2_vf_pf_mbox_handle_msg, dev);
379 otx2_process_msgs(struct otx2_dev *dev, struct otx2_mbox *mbox)
381 struct otx2_mbox_dev *mdev = &mbox->dev[0];
382 struct mbox_hdr *req_hdr;
383 struct mbox_msghdr *msg;
388 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
389 if (req_hdr->num_msgs == 0)
392 offset = mbox->rx_start + RTE_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
393 for (i = 0; i < req_hdr->num_msgs; i++) {
394 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
397 otx2_base_dbg("Message 0x%x (%s) pf:%d/vf:%d",
398 msg->id, otx2_mbox_id2name(msg->id),
399 otx2_get_pf(msg->pcifunc),
400 otx2_get_vf(msg->pcifunc));
403 /* Add message id's that are handled here */
405 /* Get our identity */
406 dev->pf_func = msg->pcifunc;
411 otx2_err("Message (%s) response has err=%d",
412 otx2_mbox_id2name(msg->id), msg->rc);
415 offset = mbox->rx_start + msg->next_msgoff;
418 otx2_mbox_reset(mbox, 0);
419 /* Update acked if someone is waiting a message */
420 mdev->msgs_acked = msgs_acked;
424 /* Copies the message received from AF and sends it to VF */
426 pf_vf_mbox_send_up_msg(struct otx2_dev *dev, void *rec_msg)
428 uint16_t max_bits = sizeof(dev->active_vfs[0]) * sizeof(uint64_t);
429 struct otx2_mbox *vf_mbox = &dev->mbox_vfpf_up;
430 struct msg_req *msg = rec_msg;
431 struct mbox_msghdr *vf_msg;
435 size = RTE_ALIGN(otx2_mbox_id2size(msg->hdr.id), MBOX_MSG_ALIGN);
436 /* Send UP message to all VF's */
437 for (vf = 0; vf < vf_mbox->ndevs; vf++) {
439 if (!(dev->active_vfs[vf / max_bits] & (BIT_ULL(vf))))
442 otx2_base_dbg("(%s) size: %zx to VF: %d",
443 otx2_mbox_id2name(msg->hdr.id), size, vf);
445 /* Reserve PF/VF mbox message */
446 vf_msg = otx2_mbox_alloc_msg(vf_mbox, vf, size);
448 otx2_err("Failed to alloc VF%d UP message", vf);
451 otx2_mbox_req_init(msg->hdr.id, vf_msg);
454 * Copy message from AF<->PF UP mbox
457 otx2_mbox_memcpy((uint8_t *)vf_msg +
458 sizeof(struct mbox_msghdr), (uint8_t *)msg
459 + sizeof(struct mbox_msghdr), size -
460 sizeof(struct mbox_msghdr));
462 vf_msg->rc = msg->hdr.rc;
463 /* Set PF to be a sender */
464 vf_msg->pcifunc = dev->pf_func;
467 otx2_mbox_msg_send(vf_mbox, vf);
472 otx2_mbox_up_handler_cgx_link_event(struct otx2_dev *dev,
473 struct cgx_link_info_msg *msg,
476 struct cgx_link_user_info *linfo = &msg->link_info;
478 otx2_base_dbg("pf:%d/vf:%d NIC Link %s --> 0x%x (%s) from: pf:%d/vf:%d",
479 otx2_get_pf(dev->pf_func), otx2_get_vf(dev->pf_func),
480 linfo->link_up ? "UP" : "DOWN", msg->hdr.id,
481 otx2_mbox_id2name(msg->hdr.id),
482 otx2_get_pf(msg->hdr.pcifunc),
483 otx2_get_vf(msg->hdr.pcifunc));
485 /* PF gets link notification from AF */
486 if (otx2_get_pf(msg->hdr.pcifunc) == 0) {
487 if (dev->ops && dev->ops->link_status_update)
488 dev->ops->link_status_update(dev, linfo);
490 /* Forward the same message as received from AF to VF */
491 pf_vf_mbox_send_up_msg(dev, msg);
493 /* VF gets link up notification */
494 if (dev->ops && dev->ops->link_status_update)
495 dev->ops->link_status_update(dev, linfo);
503 otx2_mbox_up_handler_cgx_ptp_rx_info(struct otx2_dev *dev,
504 struct cgx_ptp_rx_info_msg *msg,
507 otx2_nix_dbg("pf:%d/vf:%d PTP mode %s --> 0x%x (%s) from: pf:%d/vf:%d",
508 otx2_get_pf(dev->pf_func),
509 otx2_get_vf(dev->pf_func),
510 msg->ptp_en ? "ENABLED" : "DISABLED",
511 msg->hdr.id, otx2_mbox_id2name(msg->hdr.id),
512 otx2_get_pf(msg->hdr.pcifunc),
513 otx2_get_vf(msg->hdr.pcifunc));
515 /* PF gets PTP notification from AF */
516 if (otx2_get_pf(msg->hdr.pcifunc) == 0) {
517 if (dev->ops && dev->ops->ptp_info_update)
518 dev->ops->ptp_info_update(dev, msg->ptp_en);
520 /* Forward the same message as received from AF to VF */
521 pf_vf_mbox_send_up_msg(dev, msg);
523 /* VF gets PTP notification */
524 if (dev->ops && dev->ops->ptp_info_update)
525 dev->ops->ptp_info_update(dev, msg->ptp_en);
533 mbox_process_msgs_up(struct otx2_dev *dev, struct mbox_msghdr *req)
535 /* Check if valid, if not reply with a invalid msg */
536 if (req->sig != OTX2_MBOX_REQ_SIG)
540 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
542 struct _rsp_type *rsp; \
545 rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \
547 sizeof(struct _rsp_type)); \
552 rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \
553 rsp->hdr.pcifunc = dev->pf_func; \
556 err = otx2_mbox_up_handler_ ## _fn_name( \
557 dev, (struct _req_type *)req, rsp); \
564 otx2_reply_invalid_msg(&dev->mbox_up, 0, 0, req->id);
571 otx2_process_msgs_up(struct otx2_dev *dev, struct otx2_mbox *mbox)
573 struct otx2_mbox_dev *mdev = &mbox->dev[0];
574 struct mbox_hdr *req_hdr;
575 struct mbox_msghdr *msg;
578 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
579 if (req_hdr->num_msgs == 0)
582 offset = mbox->rx_start + RTE_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
583 for (i = 0; i < req_hdr->num_msgs; i++) {
584 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
586 otx2_base_dbg("Message 0x%x (%s) pf:%d/vf:%d",
587 msg->id, otx2_mbox_id2name(msg->id),
588 otx2_get_pf(msg->pcifunc),
589 otx2_get_vf(msg->pcifunc));
590 err = mbox_process_msgs_up(dev, msg);
592 otx2_err("Error %d handling 0x%x (%s)",
593 err, msg->id, otx2_mbox_id2name(msg->id));
594 offset = mbox->rx_start + msg->next_msgoff;
596 /* Send mbox responses */
597 if (mdev->num_msgs) {
598 otx2_base_dbg("Reply num_msgs:%d", mdev->num_msgs);
599 otx2_mbox_msg_send(mbox, 0);
604 otx2_pf_vf_mbox_irq(void *param)
606 struct otx2_dev *dev = param;
609 intr = otx2_read64(dev->bar2 + RVU_VF_INT);
611 otx2_base_dbg("Proceeding to check mbox UP messages if any");
613 otx2_write64(intr, dev->bar2 + RVU_VF_INT);
614 otx2_base_dbg("Irq 0x%" PRIx64 "(pf:%d,vf:%d)", intr, dev->pf, dev->vf);
616 /* First process all configuration messages */
617 otx2_process_msgs(dev, dev->mbox);
619 /* Process Uplink messages */
620 otx2_process_msgs_up(dev, &dev->mbox_up);
624 otx2_af_pf_mbox_irq(void *param)
626 struct otx2_dev *dev = param;
629 intr = otx2_read64(dev->bar2 + RVU_PF_INT);
631 otx2_base_dbg("Proceeding to check mbox UP messages if any");
633 otx2_write64(intr, dev->bar2 + RVU_PF_INT);
634 otx2_base_dbg("Irq 0x%" PRIx64 "(pf:%d,vf:%d)", intr, dev->pf, dev->vf);
636 /* First process all configuration messages */
637 otx2_process_msgs(dev, dev->mbox);
639 /* Process Uplink messages */
640 otx2_process_msgs_up(dev, &dev->mbox_up);
644 mbox_register_pf_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
646 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
650 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)
651 otx2_write64(~0ull, dev->bar2 +
652 RVU_PF_VFPF_MBOX_INT_ENA_W1CX(i));
654 otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
658 /* MBOX interrupt for VF(0...63) <-> PF */
659 rc = otx2_register_irq(intr_handle, otx2_vf_pf_mbox_irq, dev,
660 RVU_PF_INT_VEC_VFPF_MBOX0);
663 otx2_err("Fail to register PF(VF0-63) mbox irq");
666 /* MBOX interrupt for VF(64...128) <-> PF */
667 rc = otx2_register_irq(intr_handle, otx2_vf_pf_mbox_irq, dev,
668 RVU_PF_INT_VEC_VFPF_MBOX1);
671 otx2_err("Fail to register PF(VF64-128) mbox irq");
674 /* MBOX interrupt AF <-> PF */
675 rc = otx2_register_irq(intr_handle, otx2_af_pf_mbox_irq,
676 dev, RVU_PF_INT_VEC_AFPF_MBOX);
678 otx2_err("Fail to register AF<->PF mbox irq");
683 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)
684 otx2_write64(~0ull, dev->bar2 +
685 RVU_PF_VFPF_MBOX_INT_ENA_W1SX(i));
687 otx2_write64(~0ull, dev->bar2 + RVU_PF_INT);
688 otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S);
694 mbox_register_vf_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
696 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
700 otx2_write64(~0ull, dev->bar2 + RVU_VF_INT_ENA_W1C);
702 /* MBOX interrupt PF <-> VF */
703 rc = otx2_register_irq(intr_handle, otx2_pf_vf_mbox_irq,
704 dev, RVU_VF_INT_VEC_MBOX);
706 otx2_err("Fail to register PF<->VF mbox irq");
711 otx2_write64(~0ull, dev->bar2 + RVU_VF_INT);
712 otx2_write64(~0ull, dev->bar2 + RVU_VF_INT_ENA_W1S);
718 mbox_register_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
720 if (otx2_dev_is_vf(dev))
721 return mbox_register_vf_irq(pci_dev, dev);
723 return mbox_register_pf_irq(pci_dev, dev);
727 mbox_unregister_pf_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
729 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
733 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)
734 otx2_write64(~0ull, dev->bar2 +
735 RVU_PF_VFPF_MBOX_INT_ENA_W1CX(i));
737 otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
741 rte_eal_alarm_cancel(otx2_vf_pf_mbox_handle_msg, dev);
743 /* Unregister the interrupt handler for each vectors */
744 /* MBOX interrupt for VF(0...63) <-> PF */
745 otx2_unregister_irq(intr_handle, otx2_vf_pf_mbox_irq, dev,
746 RVU_PF_INT_VEC_VFPF_MBOX0);
748 /* MBOX interrupt for VF(64...128) <-> PF */
749 otx2_unregister_irq(intr_handle, otx2_vf_pf_mbox_irq, dev,
750 RVU_PF_INT_VEC_VFPF_MBOX1);
752 /* MBOX interrupt AF <-> PF */
753 otx2_unregister_irq(intr_handle, otx2_af_pf_mbox_irq, dev,
754 RVU_PF_INT_VEC_AFPF_MBOX);
759 mbox_unregister_vf_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
761 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
764 otx2_write64(~0ull, dev->bar2 + RVU_VF_INT_ENA_W1C);
766 /* Unregister the interrupt handler */
767 otx2_unregister_irq(intr_handle, otx2_pf_vf_mbox_irq, dev,
768 RVU_VF_INT_VEC_MBOX);
772 mbox_unregister_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
774 if (otx2_dev_is_vf(dev))
775 mbox_unregister_vf_irq(pci_dev, dev);
777 mbox_unregister_pf_irq(pci_dev, dev);
781 vf_flr_send_msg(struct otx2_dev *dev, uint16_t vf)
783 struct otx2_mbox *mbox = dev->mbox;
787 req = otx2_mbox_alloc_msg_vf_flr(mbox);
788 /* Overwrite pcifunc to indicate VF */
789 req->hdr.pcifunc = otx2_pfvf_func(dev->pf, vf);
791 /* Sync message in interrupt context */
792 rc = pf_af_sync_msg(dev, NULL);
794 otx2_err("Failed to send VF FLR mbox msg, rc=%d", rc);
800 otx2_pf_vf_flr_irq(void *param)
802 struct otx2_dev *dev = (struct otx2_dev *)param;
803 uint16_t max_vf = 64, vf;
808 max_vf = (dev->maxvf > 0) ? dev->maxvf : 64;
811 otx2_base_dbg("FLR VF interrupt: max_vf: %d", max_vf);
813 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) {
814 intr = otx2_read64(bar2 + RVU_PF_VFFLR_INTX(i));
818 for (vf = 0; vf < max_vf; vf++) {
819 if (!(intr & (1ULL << vf)))
822 otx2_base_dbg("FLR: i :%d intr: 0x%" PRIx64 ", vf-%d",
823 i, intr, (64 * i + vf));
824 /* Clear interrupt */
825 otx2_write64(BIT_ULL(vf), bar2 + RVU_PF_VFFLR_INTX(i));
826 /* Disable the interrupt */
827 otx2_write64(BIT_ULL(vf),
828 bar2 + RVU_PF_VFFLR_INT_ENA_W1CX(i));
829 /* Inform AF about VF reset */
830 vf_flr_send_msg(dev, vf);
832 /* Signal FLR finish */
833 otx2_write64(BIT_ULL(vf), bar2 + RVU_PF_VFTRPENDX(i));
834 /* Enable interrupt */
836 bar2 + RVU_PF_VFFLR_INT_ENA_W1SX(i));
842 vf_flr_unregister_irqs(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
844 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
847 otx2_base_dbg("Unregister VF FLR interrupts for %s", pci_dev->name);
850 for (i = 0; i < MAX_VFPF_DWORD_BITS; i++)
851 otx2_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INT_ENA_W1CX(i));
853 otx2_unregister_irq(intr_handle, otx2_pf_vf_flr_irq, dev,
854 RVU_PF_INT_VEC_VFFLR0);
856 otx2_unregister_irq(intr_handle, otx2_pf_vf_flr_irq, dev,
857 RVU_PF_INT_VEC_VFFLR1);
863 vf_flr_register_irqs(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
865 struct rte_intr_handle *handle = pci_dev->intr_handle;
868 otx2_base_dbg("Register VF FLR interrupts for %s", pci_dev->name);
870 rc = otx2_register_irq(handle, otx2_pf_vf_flr_irq, dev,
871 RVU_PF_INT_VEC_VFFLR0);
873 otx2_err("Failed to init RVU_PF_INT_VEC_VFFLR0 rc=%d", rc);
875 rc = otx2_register_irq(handle, otx2_pf_vf_flr_irq, dev,
876 RVU_PF_INT_VEC_VFFLR1);
878 otx2_err("Failed to init RVU_PF_INT_VEC_VFFLR1 rc=%d", rc);
880 /* Enable HW interrupt */
881 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) {
882 otx2_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INTX(i));
883 otx2_write64(~0ull, dev->bar2 + RVU_PF_VFTRPENDX(i));
884 otx2_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INT_ENA_W1SX(i));
891 * Get number of active VFs for the given PF device.
894 otx2_dev_active_vfs(void *otx2_dev)
896 struct otx2_dev *dev = otx2_dev;
899 for (i = 0; i < MAX_VFPF_DWORD_BITS; i++)
900 count += __builtin_popcount(dev->active_vfs[i]);
906 otx2_update_vf_hwcap(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
908 switch (pci_dev->id.device_id) {
909 case PCI_DEVID_OCTEONTX2_RVU_PF:
911 case PCI_DEVID_OCTEONTX2_RVU_SSO_TIM_VF:
912 case PCI_DEVID_OCTEONTX2_RVU_NPA_VF:
913 case PCI_DEVID_OCTEONTX2_RVU_CPT_VF:
914 case PCI_DEVID_OCTEONTX2_RVU_AF_VF:
915 case PCI_DEVID_OCTEONTX2_RVU_VF:
916 case PCI_DEVID_OCTEONTX2_RVU_SDP_VF:
917 dev->hwcap |= OTX2_HWCAP_F_VF;
924 * Initialize the otx2 device
927 otx2_dev_priv_init(struct rte_pci_device *pci_dev, void *otx2_dev)
929 int up_direction = MBOX_DIR_PFAF_UP;
930 int rc, direction = MBOX_DIR_PFAF;
931 uint64_t intr_offset = RVU_PF_INT;
932 struct otx2_dev *dev = otx2_dev;
933 uintptr_t bar2, bar4;
937 bar2 = (uintptr_t)pci_dev->mem_resource[2].addr;
938 bar4 = (uintptr_t)pci_dev->mem_resource[4].addr;
940 if (bar2 == 0 || bar4 == 0) {
941 otx2_err("Failed to get pci bars");
946 dev->node = pci_dev->device.numa_node;
947 dev->maxvf = pci_dev->max_vfs;
951 otx2_update_vf_hwcap(pci_dev, dev);
953 if (otx2_dev_is_vf(dev)) {
954 direction = MBOX_DIR_VFPF;
955 up_direction = MBOX_DIR_VFPF_UP;
956 intr_offset = RVU_VF_INT;
959 /* Initialize the local mbox */
960 rc = otx2_mbox_init(&dev->mbox_local, bar4, bar2, direction, 1,
964 dev->mbox = &dev->mbox_local;
966 rc = otx2_mbox_init(&dev->mbox_up, bar4, bar2, up_direction, 1,
971 /* Register mbox interrupts */
972 rc = mbox_register_irq(pci_dev, dev);
976 /* Check the readiness of PF/VF */
977 rc = otx2_send_ready_msg(dev->mbox, &dev->pf_func);
979 goto mbox_unregister;
981 dev->pf = otx2_get_pf(dev->pf_func);
982 dev->vf = otx2_get_vf(dev->pf_func);
983 memset(&dev->active_vfs, 0, sizeof(dev->active_vfs));
985 /* Found VF devices in a PF device */
986 if (pci_dev->max_vfs > 0) {
988 /* Remap mbox area for all vf's */
989 bar4_addr = otx2_read64(bar2 + RVU_PF_VF_BAR4_ADDR);
990 if (bar4_addr == 0) {
995 hwbase = mbox_mem_map(bar4_addr, MBOX_SIZE * pci_dev->max_vfs);
996 if (hwbase == MAP_FAILED) {
1000 /* Init mbox object */
1001 rc = otx2_mbox_init(&dev->mbox_vfpf, (uintptr_t)hwbase,
1002 bar2, MBOX_DIR_PFVF, pci_dev->max_vfs,
1007 /* PF -> VF UP messages */
1008 rc = otx2_mbox_init(&dev->mbox_vfpf_up, (uintptr_t)hwbase,
1009 bar2, MBOX_DIR_PFVF_UP, pci_dev->max_vfs,
1015 /* Register VF-FLR irq handlers */
1016 if (otx2_dev_is_pf(dev)) {
1017 rc = vf_flr_register_irqs(pci_dev, dev);
1021 dev->mbox_active = 1;
1025 mbox_mem_unmap(hwbase, MBOX_SIZE * pci_dev->max_vfs);
1027 mbox_unregister_irq(pci_dev, dev);
1029 otx2_mbox_fini(dev->mbox);
1030 otx2_mbox_fini(&dev->mbox_up);
1037 * Finalize the otx2 device
1040 otx2_dev_fini(struct rte_pci_device *pci_dev, void *otx2_dev)
1042 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1043 struct otx2_dev *dev = otx2_dev;
1044 struct otx2_idev_cfg *idev;
1045 struct otx2_mbox *mbox;
1047 /* Clear references to this pci dev */
1048 idev = otx2_intra_dev_get_cfg();
1049 if (idev->npa_lf && idev->npa_lf->pci_dev == pci_dev)
1050 idev->npa_lf = NULL;
1052 mbox_unregister_irq(pci_dev, dev);
1054 if (otx2_dev_is_pf(dev))
1055 vf_flr_unregister_irqs(pci_dev, dev);
1056 /* Release PF - VF */
1057 mbox = &dev->mbox_vfpf;
1058 if (mbox->hwbase && mbox->dev)
1059 mbox_mem_unmap((void *)mbox->hwbase,
1060 MBOX_SIZE * pci_dev->max_vfs);
1061 otx2_mbox_fini(mbox);
1062 mbox = &dev->mbox_vfpf_up;
1063 otx2_mbox_fini(mbox);
1065 /* Release PF - AF */
1067 otx2_mbox_fini(mbox);
1068 mbox = &dev->mbox_up;
1069 otx2_mbox_fini(mbox);
1070 dev->mbox_active = 0;
1072 /* Disable MSIX vectors */
1073 otx2_disable_irqs(intr_handle);