/* Reserve PF/VF mbox message */
size = PLT_ALIGN(size, MBOX_MSG_ALIGN);
rsp = mbox_alloc_msg(&dev->mbox_vfpf, vf, size);
+ if (!rsp) {
+ plt_err("Failed to reserve VF%d message", vf);
+ continue;
+ }
+
mbox_rsp_init(msg->id, rsp);
/* Copy message from AF<->PF mbox to PF<->VF mbox */
rsp->rc = msg->rc;
rsp->pcifunc = msg->pcifunc;
+ /* Whenever a PF comes up, AF sends the link status to it but
+ * when VF comes up no such event is sent to respective VF.
+ * Using MBOX_MSG_NIX_LF_START_RX response from AF for the
+ * purpose and send the link status of PF to VF.
+ */
+ if (msg->id == MBOX_MSG_NIX_LF_START_RX) {
+ /* Send link status to VF */
+ struct cgx_link_user_info linfo;
+ struct mbox_msghdr *vf_msg;
+ size_t sz;
+
+ /* Get the link status */
+ memset(&linfo, 0, sizeof(struct cgx_link_user_info));
+ if (dev->ops && dev->ops->link_status_get)
+ dev->ops->link_status_get(dev->roc_nix, &linfo);
+
+ sz = PLT_ALIGN(mbox_id2size(MBOX_MSG_CGX_LINK_EVENT),
+ MBOX_MSG_ALIGN);
+ /* Prepare the message to be sent */
+ vf_msg = mbox_alloc_msg(&dev->mbox_vfpf_up, vf, sz);
+ if (vf_msg) {
+ mbox_req_init(MBOX_MSG_CGX_LINK_EVENT, vf_msg);
+ memcpy((uint8_t *)vf_msg +
+ sizeof(struct mbox_msghdr), &linfo,
+ sizeof(struct cgx_link_user_info));
+
+ vf_msg->rc = msg->rc;
+ vf_msg->pcifunc = msg->pcifunc;
+ /* Send to VF */
+ mbox_msg_send(&dev->mbox_vfpf_up, vf);
+ }
+ }
+
offset = mbox->rx_start + msg->next_msgoff;
}
plt_spinlock_unlock(&mdev->mbox_lock);
BIT_ULL(vf % max_bits);
rsp = (struct ready_msg_rsp *)mbox_alloc_msg(
mbox, vf, sizeof(*rsp));
+ if (!rsp) {
+ plt_err("Failed to alloc VF%d READY message",
+ vf);
+ continue;
+ }
+
mbox_rsp_init(msg->id, rsp);
/* PF/VF function ID */
static int
mbox_register_pf_irq(struct plt_pci_device *pci_dev, struct dev *dev)
{
- struct plt_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct plt_intr_handle *intr_handle = pci_dev->intr_handle;
int i, rc;
/* HW clear irq */
static int
mbox_register_vf_irq(struct plt_pci_device *pci_dev, struct dev *dev)
{
- struct plt_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct plt_intr_handle *intr_handle = pci_dev->intr_handle;
int rc;
/* Clear irq */
static void
mbox_unregister_pf_irq(struct plt_pci_device *pci_dev, struct dev *dev)
{
- struct plt_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct plt_intr_handle *intr_handle = pci_dev->intr_handle;
int i;
/* HW clear irq */
static void
mbox_unregister_vf_irq(struct plt_pci_device *pci_dev, struct dev *dev)
{
- struct plt_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct plt_intr_handle *intr_handle = pci_dev->intr_handle;
/* Clear irq */
plt_write64(~0ull, dev->bar2 + RVU_VF_INT_ENA_W1C);
static int
vf_flr_unregister_irqs(struct plt_pci_device *pci_dev, struct dev *dev)
{
- struct plt_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct plt_intr_handle *intr_handle = pci_dev->intr_handle;
int i;
plt_base_dbg("Unregister VF FLR interrupts for %s", pci_dev->name);
static int
vf_flr_register_irqs(struct plt_pci_device *pci_dev, struct dev *dev)
{
- struct plt_intr_handle *handle = &pci_dev->intr_handle;
+ struct plt_intr_handle *handle = pci_dev->intr_handle;
int i, rc;
plt_base_dbg("Register VF FLR interrupts for %s", pci_dev->name);
return 0;
}
+static void
+clear_rvum_interrupts(struct dev *dev)
+{
+ uint64_t intr;
+ int i;
+
+ if (dev_is_vf(dev)) {
+ /* Clear VF mbox interrupt */
+ intr = plt_read64(dev->bar2 + RVU_VF_INT);
+ if (intr)
+ plt_write64(intr, dev->bar2 + RVU_VF_INT);
+ } else {
+ /* Clear AF PF interrupt line */
+ intr = plt_read64(dev->bar2 + RVU_PF_INT);
+ if (intr)
+ plt_write64(intr, dev->bar2 + RVU_PF_INT);
+ for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) {
+ /* Clear MBOX interrupts */
+ intr = plt_read64(dev->bar2 + RVU_PF_VFPF_MBOX_INTX(i));
+ if (intr)
+ plt_write64(intr,
+ dev->bar2 +
+ RVU_PF_VFPF_MBOX_INTX(i));
+ /* Clear VF FLR interrupts */
+ intr = plt_read64(dev->bar2 + RVU_PF_VFFLR_INTX(i));
+ if (intr)
+ plt_write64(intr,
+ dev->bar2 + RVU_PF_VFFLR_INTX(i));
+ }
+ }
+}
+
int
dev_active_vfs(struct dev *dev)
{
break;
case PCI_DEVID_CNXK_RVU_SSO_TIM_VF:
case PCI_DEVID_CNXK_RVU_NPA_VF:
+ case PCI_DEVID_CN10K_RVU_CPT_VF:
+ case PCI_DEVID_CN9K_RVU_CPT_VF:
case PCI_DEVID_CNXK_RVU_AF_VF:
case PCI_DEVID_CNXK_RVU_VF:
case PCI_DEVID_CNXK_RVU_SDP_VF:
struct lmtst_tbl_setup_req *req;
req = mbox_alloc_msg_lmtst_tbl_setup(mbox);
+ if (!req)
+ return -ENOSPC;
+
/* This pcifunc is defined with primary pcifunc whose LMT address
* will be shared. If call contains valid IOVA, following pcifunc
* field is of no use.
*/
if (!dev->disable_shared_lmt) {
idev = idev_get_cfg();
+ if (!idev) {
+ errno = EFAULT;
+ goto free;
+ }
+
if (!__atomic_load_n(&idev->lmt_pf_func, __ATOMIC_ACQUIRE)) {
idev->lmt_base_addr = dev->lmt_base;
idev->lmt_pf_func = dev->pf_func;
intr_offset = RVU_PF_INT;
}
+ /* Clear all RVUM interrupts */
+ clear_rvum_interrupts(dev);
+
/* Initialize the local mbox */
rc = mbox_init(&dev->mbox_local, mbox, bar2, direction, 1, intr_offset);
if (rc)
int
dev_fini(struct dev *dev, struct plt_pci_device *pci_dev)
{
- struct plt_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct plt_intr_handle *intr_handle = pci_dev->intr_handle;
struct mbox *mbox;
/* Check if this dev hosts npalf and has 1+ refs */