1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
10 #include <rte_alarm.h>
11 #include <rte_common.h>
13 #include <rte_memcpy.h>
16 #include "otx2_mbox.h"
18 #define RVU_MAX_VF 64 /* RVU_PF_VFPF_MBOX_INT(0..1) */
19 #define RVU_MAX_INT_RETRY 3
21 /* PF/VF message handling timer */
22 #define VF_PF_MBOX_TIMER_MS (20 * 1000)
25 mbox_mem_map(off_t off, size_t size)
27 void *va = MAP_FAILED;
33 mem_fd = open("/dev/mem", O_RDWR);
37 va = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, mem_fd, off);
41 otx2_err("Failed to mmap sz=0x%zx, fd=%d, off=%jd",
42 size, mem_fd, (intmax_t)off);
48 mbox_mem_unmap(void *va, size_t size)
55 af_pf_wait_msg(struct otx2_dev *dev, uint16_t vf, int num_msg)
57 uint32_t timeout = 0, sleep = 1; struct otx2_mbox *mbox = dev->mbox;
58 struct otx2_mbox_dev *mdev = &mbox->dev[0];
59 volatile uint64_t int_status;
60 struct mbox_hdr *req_hdr;
61 struct mbox_msghdr *msg;
62 struct mbox_msghdr *rsp;
67 /* We need to disable PF interrupts. We are in timer interrupt */
68 otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
71 otx2_mbox_msg_send(mbox, 0);
76 if (timeout >= MBOX_RSP_TIMEOUT) {
77 otx2_err("Routed messages %d timeout: %dms",
78 num_msg, MBOX_RSP_TIMEOUT);
81 int_status = otx2_read64(dev->bar2 + RVU_PF_INT);
82 } while ((int_status & 0x1) != 0x1);
85 otx2_write64(~0ull, dev->bar2 + RVU_PF_INT);
87 /* Enable interrupts */
88 otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S);
90 rte_spinlock_lock(&mdev->mbox_lock);
92 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
93 if (req_hdr->num_msgs != num_msg)
94 otx2_err("Routed messages: %d received: %d", num_msg,
97 /* Get messages from mbox */
98 offset = mbox->rx_start +
99 RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
100 for (i = 0; i < req_hdr->num_msgs; i++) {
101 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
102 size = mbox->rx_start + msg->next_msgoff - offset;
104 /* Reserve PF/VF mbox message */
105 size = RTE_ALIGN(size, MBOX_MSG_ALIGN);
106 rsp = otx2_mbox_alloc_msg(&dev->mbox_vfpf, vf, size);
107 otx2_mbox_rsp_init(msg->id, rsp);
109 /* Copy message from AF<->PF mbox to PF<->VF mbox */
110 otx2_mbox_memcpy((uint8_t *)rsp + sizeof(struct mbox_msghdr),
111 (uint8_t *)msg + sizeof(struct mbox_msghdr),
112 size - sizeof(struct mbox_msghdr));
114 /* Set status and sender pf_func data */
116 rsp->pcifunc = msg->pcifunc;
118 offset = mbox->rx_start + msg->next_msgoff;
120 rte_spinlock_unlock(&mdev->mbox_lock);
122 return req_hdr->num_msgs;
126 vf_pf_process_msgs(struct otx2_dev *dev, uint16_t vf)
128 int offset, routed = 0; struct otx2_mbox *mbox = &dev->mbox_vfpf;
129 struct otx2_mbox_dev *mdev = &mbox->dev[vf];
130 struct mbox_hdr *req_hdr;
131 struct mbox_msghdr *msg;
135 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
136 if (!req_hdr->num_msgs)
139 offset = mbox->rx_start + RTE_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
141 for (i = 0; i < req_hdr->num_msgs; i++) {
143 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
144 size = mbox->rx_start + msg->next_msgoff - offset;
147 msg->pcifunc = otx2_pfvf_func(dev->pf, vf);
149 if (msg->id == MBOX_MSG_READY) {
150 struct ready_msg_rsp *rsp;
151 uint16_t max_bits = sizeof(dev->active_vfs[0]) * 8;
153 /* Handle READY message in PF */
154 dev->active_vfs[vf / max_bits] |=
155 BIT_ULL(vf % max_bits);
156 rsp = (struct ready_msg_rsp *)
157 otx2_mbox_alloc_msg(mbox, vf, sizeof(*rsp));
158 otx2_mbox_rsp_init(msg->id, rsp);
160 /* PF/VF function ID */
161 rsp->hdr.pcifunc = msg->pcifunc;
164 struct mbox_msghdr *af_req;
165 /* Reserve AF/PF mbox message */
166 size = RTE_ALIGN(size, MBOX_MSG_ALIGN);
167 af_req = otx2_mbox_alloc_msg(dev->mbox, 0, size);
168 otx2_mbox_req_init(msg->id, af_req);
170 /* Copy message from VF<->PF mbox to PF<->AF mbox */
171 otx2_mbox_memcpy((uint8_t *)af_req +
172 sizeof(struct mbox_msghdr),
173 (uint8_t *)msg + sizeof(struct mbox_msghdr),
174 size - sizeof(struct mbox_msghdr));
175 af_req->pcifunc = msg->pcifunc;
178 offset = mbox->rx_start + msg->next_msgoff;
182 otx2_base_dbg("pf:%d routed %d messages from vf:%d to AF",
183 dev->pf, routed, vf);
184 af_pf_wait_msg(dev, vf, routed);
185 otx2_mbox_reset(dev->mbox, 0);
188 /* Send mbox responses to VF */
189 if (mdev->num_msgs) {
190 otx2_base_dbg("pf:%d reply %d messages to vf:%d",
191 dev->pf, mdev->num_msgs, vf);
192 otx2_mbox_msg_send(mbox, vf);
199 vf_pf_process_up_msgs(struct otx2_dev *dev, uint16_t vf)
201 struct otx2_mbox *mbox = &dev->mbox_vfpf_up;
202 struct otx2_mbox_dev *mdev = &mbox->dev[vf];
203 struct mbox_hdr *req_hdr;
204 struct mbox_msghdr *msg;
209 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
210 if (req_hdr->num_msgs == 0)
213 offset = mbox->rx_start + RTE_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
215 for (i = 0; i < req_hdr->num_msgs; i++) {
216 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
220 msg->pcifunc = otx2_pfvf_func(dev->pf, vf);
223 case MBOX_MSG_CGX_LINK_EVENT:
224 otx2_base_dbg("PF: Msg 0x%x (%s) fn:0x%x (pf:%d,vf:%d)",
225 msg->id, otx2_mbox_id2name(msg->id),
226 msg->pcifunc, otx2_get_pf(msg->pcifunc),
227 otx2_get_vf(msg->pcifunc));
229 case MBOX_MSG_CGX_PTP_RX_INFO:
230 otx2_base_dbg("PF: Msg 0x%x (%s) fn:0x%x (pf:%d,vf:%d)",
231 msg->id, otx2_mbox_id2name(msg->id),
232 msg->pcifunc, otx2_get_pf(msg->pcifunc),
233 otx2_get_vf(msg->pcifunc));
236 otx2_err("Not handled UP msg 0x%x (%s) func:0x%x",
237 msg->id, otx2_mbox_id2name(msg->id),
240 offset = mbox->rx_start + msg->next_msgoff;
242 otx2_mbox_reset(mbox, vf);
243 mdev->msgs_acked = msgs_acked;
250 otx2_vf_pf_mbox_handle_msg(void *param)
252 uint16_t vf, max_vf, max_bits;
253 struct otx2_dev *dev = param;
255 max_bits = sizeof(dev->intr.bits[0]) * sizeof(uint64_t);
256 max_vf = max_bits * MAX_VFPF_DWORD_BITS;
258 for (vf = 0; vf < max_vf; vf++) {
259 if (dev->intr.bits[vf/max_bits] & BIT_ULL(vf%max_bits)) {
260 otx2_base_dbg("Process vf:%d request (pf:%d, vf:%d)",
261 vf, dev->pf, dev->vf);
262 vf_pf_process_msgs(dev, vf);
264 vf_pf_process_up_msgs(dev, vf);
265 dev->intr.bits[vf/max_bits] &= ~(BIT_ULL(vf%max_bits));
272 otx2_vf_pf_mbox_irq(void *param)
274 struct otx2_dev *dev = param;
275 bool alarm_set = false;
279 for (vfpf = 0; vfpf < MAX_VFPF_DWORD_BITS; ++vfpf) {
280 intr = otx2_read64(dev->bar2 + RVU_PF_VFPF_MBOX_INTX(vfpf));
284 otx2_base_dbg("vfpf: %d intr: 0x%" PRIx64 " (pf:%d, vf:%d)",
285 vfpf, intr, dev->pf, dev->vf);
287 /* Save and clear intr bits */
288 dev->intr.bits[vfpf] |= intr;
289 otx2_write64(intr, dev->bar2 + RVU_PF_VFPF_MBOX_INTX(vfpf));
293 if (!dev->timer_set && alarm_set) {
295 /* Start timer to handle messages */
296 rte_eal_alarm_set(VF_PF_MBOX_TIMER_MS,
297 otx2_vf_pf_mbox_handle_msg, dev);
302 otx2_process_msgs(struct otx2_dev *dev, struct otx2_mbox *mbox)
304 struct otx2_mbox_dev *mdev = &mbox->dev[0];
305 struct mbox_hdr *req_hdr;
306 struct mbox_msghdr *msg;
311 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
312 if (req_hdr->num_msgs == 0)
315 offset = mbox->rx_start + RTE_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
316 for (i = 0; i < req_hdr->num_msgs; i++) {
317 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
320 otx2_base_dbg("Message 0x%x (%s) pf:%d/vf:%d",
321 msg->id, otx2_mbox_id2name(msg->id),
322 otx2_get_pf(msg->pcifunc),
323 otx2_get_vf(msg->pcifunc));
326 /* Add message id's that are handled here */
328 /* Get our identity */
329 dev->pf_func = msg->pcifunc;
334 otx2_err("Message (%s) response has err=%d",
335 otx2_mbox_id2name(msg->id), msg->rc);
338 offset = mbox->rx_start + msg->next_msgoff;
341 otx2_mbox_reset(mbox, 0);
342 /* Update acked if someone is waiting a message */
343 mdev->msgs_acked = msgs_acked;
347 /* Copies the message received from AF and sends it to VF */
349 pf_vf_mbox_send_up_msg(struct otx2_dev *dev, void *rec_msg)
351 uint16_t max_bits = sizeof(dev->active_vfs[0]) * sizeof(uint64_t);
352 struct otx2_mbox *vf_mbox = &dev->mbox_vfpf_up;
353 struct msg_req *msg = rec_msg;
354 struct mbox_msghdr *vf_msg;
358 size = RTE_ALIGN(otx2_mbox_id2size(msg->hdr.id), MBOX_MSG_ALIGN);
359 /* Send UP message to all VF's */
360 for (vf = 0; vf < vf_mbox->ndevs; vf++) {
362 if (!(dev->active_vfs[vf / max_bits] & (BIT_ULL(vf))))
365 otx2_base_dbg("(%s) size: %zx to VF: %d",
366 otx2_mbox_id2name(msg->hdr.id), size, vf);
368 /* Reserve PF/VF mbox message */
369 vf_msg = otx2_mbox_alloc_msg(vf_mbox, vf, size);
371 otx2_err("Failed to alloc VF%d UP message", vf);
374 otx2_mbox_req_init(msg->hdr.id, vf_msg);
377 * Copy message from AF<->PF UP mbox
380 otx2_mbox_memcpy((uint8_t *)vf_msg +
381 sizeof(struct mbox_msghdr), (uint8_t *)msg
382 + sizeof(struct mbox_msghdr), size -
383 sizeof(struct mbox_msghdr));
385 vf_msg->rc = msg->hdr.rc;
386 /* Set PF to be a sender */
387 vf_msg->pcifunc = dev->pf_func;
390 otx2_mbox_msg_send(vf_mbox, vf);
395 otx2_mbox_up_handler_cgx_link_event(struct otx2_dev *dev,
396 struct cgx_link_info_msg *msg,
399 struct cgx_link_user_info *linfo = &msg->link_info;
401 otx2_base_dbg("pf:%d/vf:%d NIC Link %s --> 0x%x (%s) from: pf:%d/vf:%d",
402 otx2_get_pf(dev->pf_func), otx2_get_vf(dev->pf_func),
403 linfo->link_up ? "UP" : "DOWN", msg->hdr.id,
404 otx2_mbox_id2name(msg->hdr.id),
405 otx2_get_pf(msg->hdr.pcifunc),
406 otx2_get_vf(msg->hdr.pcifunc));
408 /* PF gets link notification from AF */
409 if (otx2_get_pf(msg->hdr.pcifunc) == 0) {
410 if (dev->ops && dev->ops->link_status_update)
411 dev->ops->link_status_update(dev, linfo);
413 /* Forward the same message as received from AF to VF */
414 pf_vf_mbox_send_up_msg(dev, msg);
416 /* VF gets link up notification */
417 if (dev->ops && dev->ops->link_status_update)
418 dev->ops->link_status_update(dev, linfo);
426 otx2_mbox_up_handler_cgx_ptp_rx_info(struct otx2_dev *dev,
427 struct cgx_ptp_rx_info_msg *msg,
430 otx2_nix_dbg("pf:%d/vf:%d PTP mode %s --> 0x%x (%s) from: pf:%d/vf:%d",
431 otx2_get_pf(dev->pf_func),
432 otx2_get_vf(dev->pf_func),
433 msg->ptp_en ? "ENABLED" : "DISABLED",
434 msg->hdr.id, otx2_mbox_id2name(msg->hdr.id),
435 otx2_get_pf(msg->hdr.pcifunc),
436 otx2_get_vf(msg->hdr.pcifunc));
438 /* PF gets PTP notification from AF */
439 if (otx2_get_pf(msg->hdr.pcifunc) == 0) {
440 if (dev->ops && dev->ops->ptp_info_update)
441 dev->ops->ptp_info_update(dev, msg->ptp_en);
443 /* Forward the same message as received from AF to VF */
444 pf_vf_mbox_send_up_msg(dev, msg);
446 /* VF gets PTP notification */
447 if (dev->ops && dev->ops->ptp_info_update)
448 dev->ops->ptp_info_update(dev, msg->ptp_en);
456 mbox_process_msgs_up(struct otx2_dev *dev, struct mbox_msghdr *req)
458 /* Check if valid, if not reply with a invalid msg */
459 if (req->sig != OTX2_MBOX_REQ_SIG)
463 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
465 struct _rsp_type *rsp; \
468 rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \
470 sizeof(struct _rsp_type)); \
475 rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \
476 rsp->hdr.pcifunc = dev->pf_func; \
479 err = otx2_mbox_up_handler_ ## _fn_name( \
480 dev, (struct _req_type *)req, rsp); \
487 otx2_reply_invalid_msg(&dev->mbox_up, 0, 0, req->id);
494 otx2_process_msgs_up(struct otx2_dev *dev, struct otx2_mbox *mbox)
496 struct otx2_mbox_dev *mdev = &mbox->dev[0];
497 struct mbox_hdr *req_hdr;
498 struct mbox_msghdr *msg;
501 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
502 if (req_hdr->num_msgs == 0)
505 offset = mbox->rx_start + RTE_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
506 for (i = 0; i < req_hdr->num_msgs; i++) {
507 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
509 otx2_base_dbg("Message 0x%x (%s) pf:%d/vf:%d",
510 msg->id, otx2_mbox_id2name(msg->id),
511 otx2_get_pf(msg->pcifunc),
512 otx2_get_vf(msg->pcifunc));
513 err = mbox_process_msgs_up(dev, msg);
515 otx2_err("Error %d handling 0x%x (%s)",
516 err, msg->id, otx2_mbox_id2name(msg->id));
517 offset = mbox->rx_start + msg->next_msgoff;
519 /* Send mbox responses */
520 if (mdev->num_msgs) {
521 otx2_base_dbg("Reply num_msgs:%d", mdev->num_msgs);
522 otx2_mbox_msg_send(mbox, 0);
527 otx2_pf_vf_mbox_irq(void *param)
529 struct otx2_dev *dev = param;
532 intr = otx2_read64(dev->bar2 + RVU_VF_INT);
536 otx2_write64(intr, dev->bar2 + RVU_VF_INT);
537 otx2_base_dbg("Irq 0x%" PRIx64 "(pf:%d,vf:%d)", intr, dev->pf, dev->vf);
539 /* First process all configuration messages */
540 otx2_process_msgs(dev, dev->mbox);
542 /* Process Uplink messages */
543 otx2_process_msgs_up(dev, &dev->mbox_up);
548 otx2_af_pf_mbox_irq(void *param)
550 struct otx2_dev *dev = param;
553 intr = otx2_read64(dev->bar2 + RVU_PF_INT);
557 otx2_write64(intr, dev->bar2 + RVU_PF_INT);
559 otx2_base_dbg("Irq 0x%" PRIx64 "(pf:%d,vf:%d)", intr, dev->pf, dev->vf);
561 /* First process all configuration messages */
562 otx2_process_msgs(dev, dev->mbox);
564 /* Process Uplink messages */
565 otx2_process_msgs_up(dev, &dev->mbox_up);
570 mbox_register_pf_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
572 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
576 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)
577 otx2_write64(~0ull, dev->bar2 +
578 RVU_PF_VFPF_MBOX_INT_ENA_W1CX(i));
580 otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
584 /* MBOX interrupt for VF(0...63) <-> PF */
585 rc = otx2_register_irq(intr_handle, otx2_vf_pf_mbox_irq, dev,
586 RVU_PF_INT_VEC_VFPF_MBOX0);
589 otx2_err("Fail to register PF(VF0-63) mbox irq");
592 /* MBOX interrupt for VF(64...128) <-> PF */
593 rc = otx2_register_irq(intr_handle, otx2_vf_pf_mbox_irq, dev,
594 RVU_PF_INT_VEC_VFPF_MBOX1);
597 otx2_err("Fail to register PF(VF64-128) mbox irq");
600 /* MBOX interrupt AF <-> PF */
601 rc = otx2_register_irq(intr_handle, otx2_af_pf_mbox_irq,
602 dev, RVU_PF_INT_VEC_AFPF_MBOX);
604 otx2_err("Fail to register AF<->PF mbox irq");
609 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)
610 otx2_write64(~0ull, dev->bar2 +
611 RVU_PF_VFPF_MBOX_INT_ENA_W1SX(i));
613 otx2_write64(~0ull, dev->bar2 + RVU_PF_INT);
614 otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S);
620 mbox_register_vf_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
622 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
626 otx2_write64(~0ull, dev->bar2 + RVU_VF_INT_ENA_W1C);
628 /* MBOX interrupt PF <-> VF */
629 rc = otx2_register_irq(intr_handle, otx2_pf_vf_mbox_irq,
630 dev, RVU_VF_INT_VEC_MBOX);
632 otx2_err("Fail to register PF<->VF mbox irq");
637 otx2_write64(~0ull, dev->bar2 + RVU_VF_INT);
638 otx2_write64(~0ull, dev->bar2 + RVU_VF_INT_ENA_W1S);
644 mbox_register_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
646 if (otx2_dev_is_vf(dev))
647 return mbox_register_vf_irq(pci_dev, dev);
649 return mbox_register_pf_irq(pci_dev, dev);
653 mbox_unregister_pf_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
655 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
659 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)
660 otx2_write64(~0ull, dev->bar2 +
661 RVU_PF_VFPF_MBOX_INT_ENA_W1CX(i));
663 otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
667 rte_eal_alarm_cancel(otx2_vf_pf_mbox_handle_msg, dev);
669 /* Unregister the interrupt handler for each vectors */
670 /* MBOX interrupt for VF(0...63) <-> PF */
671 otx2_unregister_irq(intr_handle, otx2_vf_pf_mbox_irq, dev,
672 RVU_PF_INT_VEC_VFPF_MBOX0);
674 /* MBOX interrupt for VF(64...128) <-> PF */
675 otx2_unregister_irq(intr_handle, otx2_vf_pf_mbox_irq, dev,
676 RVU_PF_INT_VEC_VFPF_MBOX1);
678 /* MBOX interrupt AF <-> PF */
679 otx2_unregister_irq(intr_handle, otx2_af_pf_mbox_irq, dev,
680 RVU_PF_INT_VEC_AFPF_MBOX);
685 mbox_unregister_vf_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
687 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
690 otx2_write64(~0ull, dev->bar2 + RVU_VF_INT_ENA_W1C);
692 /* Unregister the interrupt handler */
693 otx2_unregister_irq(intr_handle, otx2_pf_vf_mbox_irq, dev,
694 RVU_VF_INT_VEC_MBOX);
698 mbox_unregister_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
700 if (otx2_dev_is_vf(dev))
701 return mbox_unregister_vf_irq(pci_dev, dev);
703 return mbox_unregister_pf_irq(pci_dev, dev);
707 otx2_update_pass_hwcap(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
709 RTE_SET_USED(pci_dev);
711 /* Update this logic when we have A1 */
712 dev->hwcap |= OTX2_HWCAP_F_A0;
716 otx2_update_vf_hwcap(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
720 switch (pci_dev->id.device_id) {
721 case PCI_DEVID_OCTEONTX2_RVU_PF:
723 case PCI_DEVID_OCTEONTX2_RVU_SSO_TIM_VF:
724 case PCI_DEVID_OCTEONTX2_RVU_NPA_VF:
725 case PCI_DEVID_OCTEONTX2_RVU_CPT_VF:
726 case PCI_DEVID_OCTEONTX2_RVU_AF_VF:
727 case PCI_DEVID_OCTEONTX2_RVU_VF:
728 dev->hwcap |= OTX2_HWCAP_F_VF;
735 * Initialize the otx2 device
738 otx2_dev_init(struct rte_pci_device *pci_dev, void *otx2_dev)
740 int up_direction = MBOX_DIR_PFAF_UP;
741 int rc, direction = MBOX_DIR_PFAF;
742 struct otx2_dev *dev = otx2_dev;
743 uintptr_t bar2, bar4;
747 bar2 = (uintptr_t)pci_dev->mem_resource[2].addr;
748 bar4 = (uintptr_t)pci_dev->mem_resource[4].addr;
750 if (bar2 == 0 || bar4 == 0) {
751 otx2_err("Failed to get pci bars");
756 dev->node = pci_dev->device.numa_node;
757 dev->maxvf = pci_dev->max_vfs;
761 otx2_update_vf_hwcap(pci_dev, dev);
762 otx2_update_pass_hwcap(pci_dev, dev);
764 if (otx2_dev_is_vf(dev)) {
765 direction = MBOX_DIR_VFPF;
766 up_direction = MBOX_DIR_VFPF_UP;
769 /* Initialize the local mbox */
770 rc = otx2_mbox_init(&dev->mbox_local, bar4, bar2, direction, 1);
773 dev->mbox = &dev->mbox_local;
775 rc = otx2_mbox_init(&dev->mbox_up, bar4, bar2, up_direction, 1);
779 /* Register mbox interrupts */
780 rc = mbox_register_irq(pci_dev, dev);
784 /* Check the readiness of PF/VF */
785 rc = otx2_send_ready_msg(dev->mbox, &dev->pf_func);
787 goto mbox_unregister;
789 dev->pf = otx2_get_pf(dev->pf_func);
790 dev->vf = otx2_get_vf(dev->pf_func);
791 memset(&dev->active_vfs, 0, sizeof(dev->active_vfs));
793 /* Found VF devices in a PF device */
794 if (pci_dev->max_vfs > 0) {
796 /* Remap mbox area for all vf's */
797 bar4_addr = otx2_read64(bar2 + RVU_PF_VF_BAR4_ADDR);
798 if (bar4_addr == 0) {
803 hwbase = mbox_mem_map(bar4_addr, MBOX_SIZE * pci_dev->max_vfs);
804 if (hwbase == MAP_FAILED) {
808 /* Init mbox object */
809 rc = otx2_mbox_init(&dev->mbox_vfpf, (uintptr_t)hwbase,
810 bar2, MBOX_DIR_PFVF, pci_dev->max_vfs);
814 /* PF -> VF UP messages */
815 rc = otx2_mbox_init(&dev->mbox_vfpf_up, (uintptr_t)hwbase,
816 bar2, MBOX_DIR_PFVF_UP, pci_dev->max_vfs);
821 dev->mbox_active = 1;
825 mbox_mem_unmap(hwbase, MBOX_SIZE * pci_dev->max_vfs);
827 mbox_unregister_irq(pci_dev, dev);
829 otx2_mbox_fini(dev->mbox);
830 otx2_mbox_fini(&dev->mbox_up);
837 * Finalize the otx2 device
840 otx2_dev_fini(struct rte_pci_device *pci_dev, void *otx2_dev)
842 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
843 struct otx2_dev *dev = otx2_dev;
844 struct otx2_idev_cfg *idev;
845 struct otx2_mbox *mbox;
847 /* Clear references to this pci dev */
848 idev = otx2_intra_dev_get_cfg();
849 if (idev->npa_lf && idev->npa_lf->pci_dev == pci_dev)
852 mbox_unregister_irq(pci_dev, dev);
854 /* Release PF - VF */
855 mbox = &dev->mbox_vfpf;
856 if (mbox->hwbase && mbox->dev)
857 mbox_mem_unmap((void *)mbox->hwbase,
858 MBOX_SIZE * pci_dev->max_vfs);
859 otx2_mbox_fini(mbox);
860 mbox = &dev->mbox_vfpf_up;
861 otx2_mbox_fini(mbox);
863 /* Release PF - AF */
865 otx2_mbox_fini(mbox);
866 mbox = &dev->mbox_up;
867 otx2_mbox_fini(mbox);
868 dev->mbox_active = 0;
870 /* Disable MSIX vectors */
871 otx2_disable_irqs(intr_handle);