struct mbox *mbox = dev->mbox;
struct mbox_dev *mdev = &mbox->dev[0];
- volatile uint64_t int_status;
+ volatile uint64_t int_status = 0;
struct mbox_msghdr *msghdr;
uint64_t off;
int rc = 0;
/* Reserve PF/VF mbox message */
size = PLT_ALIGN(size, MBOX_MSG_ALIGN);
rsp = mbox_alloc_msg(&dev->mbox_vfpf, vf, size);
+ if (!rsp) {
+ plt_err("Failed to reserve VF%d message", vf);
+ continue;
+ }
+
mbox_rsp_init(msg->id, rsp);
/* Copy message from AF<->PF mbox to PF<->VF mbox */
BIT_ULL(vf % max_bits);
rsp = (struct ready_msg_rsp *)mbox_alloc_msg(
mbox, vf, sizeof(*rsp));
+ if (!rsp) {
+ plt_err("Failed to alloc VF%d READY message",
+ vf);
+ continue;
+ }
+
mbox_rsp_init(msg->id, rsp);
/* PF/VF function ID */
static int
mbox_register_pf_irq(struct plt_pci_device *pci_dev, struct dev *dev)
{
- struct plt_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct plt_intr_handle *intr_handle = pci_dev->intr_handle;
int i, rc;
/* HW clear irq */
static int
mbox_register_vf_irq(struct plt_pci_device *pci_dev, struct dev *dev)
{
- struct plt_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct plt_intr_handle *intr_handle = pci_dev->intr_handle;
int rc;
/* Clear irq */
static void
mbox_unregister_pf_irq(struct plt_pci_device *pci_dev, struct dev *dev)
{
- struct plt_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct plt_intr_handle *intr_handle = pci_dev->intr_handle;
int i;
/* HW clear irq */
static void
mbox_unregister_vf_irq(struct plt_pci_device *pci_dev, struct dev *dev)
{
- struct plt_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct plt_intr_handle *intr_handle = pci_dev->intr_handle;
/* Clear irq */
plt_write64(~0ull, dev->bar2 + RVU_VF_INT_ENA_W1C);
static int
vf_flr_unregister_irqs(struct plt_pci_device *pci_dev, struct dev *dev)
{
- struct plt_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct plt_intr_handle *intr_handle = pci_dev->intr_handle;
int i;
plt_base_dbg("Unregister VF FLR interrupts for %s", pci_dev->name);
static int
vf_flr_register_irqs(struct plt_pci_device *pci_dev, struct dev *dev)
{
- struct plt_intr_handle *handle = &pci_dev->intr_handle;
+ struct plt_intr_handle *handle = pci_dev->intr_handle;
int i, rc;
plt_base_dbg("Register VF FLR interrupts for %s", pci_dev->name);
return 0;
}
+static void
+clear_rvum_interrupts(struct dev *dev)
+{
+ uint64_t intr;
+ int i;
+
+ if (dev_is_vf(dev)) {
+ /* Clear VF mbox interrupt */
+ intr = plt_read64(dev->bar2 + RVU_VF_INT);
+ if (intr)
+ plt_write64(intr, dev->bar2 + RVU_VF_INT);
+ } else {
+ /* Clear AF PF interrupt line */
+ intr = plt_read64(dev->bar2 + RVU_PF_INT);
+ if (intr)
+ plt_write64(intr, dev->bar2 + RVU_PF_INT);
+ for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) {
+ /* Clear MBOX interrupts */
+ intr = plt_read64(dev->bar2 + RVU_PF_VFPF_MBOX_INTX(i));
+ if (intr)
+ plt_write64(intr,
+ dev->bar2 +
+ RVU_PF_VFPF_MBOX_INTX(i));
+ /* Clear VF FLR interrupts */
+ intr = plt_read64(dev->bar2 + RVU_PF_VFFLR_INTX(i));
+ if (intr)
+ plt_write64(intr,
+ dev->bar2 + RVU_PF_VFFLR_INTX(i));
+ }
+ }
+}
+
int
dev_active_vfs(struct dev *dev)
{
struct lmtst_tbl_setup_req *req;
req = mbox_alloc_msg_lmtst_tbl_setup(mbox);
+ if (!req)
+ return -ENOSPC;
+
/* This pcifunc is defined with primary pcifunc whose LMT address
* will be shared. If call contains valid IOVA, following pcifunc
* field is of no use.
*/
if (!dev->disable_shared_lmt) {
idev = idev_get_cfg();
+ if (!idev) {
+ errno = EFAULT;
+ goto free;
+ }
+
if (!__atomic_load_n(&idev->lmt_pf_func, __ATOMIC_ACQUIRE)) {
idev->lmt_base_addr = dev->lmt_base;
idev->lmt_pf_func = dev->pf_func;
intr_offset = RVU_PF_INT;
}
+ /* Clear all RVUM interrupts */
+ clear_rvum_interrupts(dev);
+
/* Initialize the local mbox */
rc = mbox_init(&dev->mbox_local, mbox, bar2, direction, 1, intr_offset);
if (rc)
int
dev_fini(struct dev *dev, struct plt_pci_device *pci_dev)
{
- struct plt_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct plt_intr_handle *intr_handle = pci_dev->intr_handle;
struct mbox *mbox;
/* Check if this dev hosts npalf and has 1+ refs */