#define RVU_VF_VFPF_MBOX0 (0x0000)
#define RVU_VF_VFPF_MBOX1 (0x0008)
+static inline uint16_t
+msgs_offset(void)
+{
+ return RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+}
+
void
otx2_mbox_fini(struct otx2_mbox *mbox)
{
return 0;
}
+
+/**
+ * @internal
+ * Allocate a message response
+ */
+struct mbox_msghdr *
+otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid, int size,
+ int size_rsp)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ struct mbox_msghdr *msghdr = NULL;
+
+ rte_spinlock_lock(&mdev->mbox_lock);
+ size = RTE_ALIGN(size, MBOX_MSG_ALIGN);
+ size_rsp = RTE_ALIGN(size_rsp, MBOX_MSG_ALIGN);
+ /* Check if there is space in mailbox */
+ if ((mdev->msg_size + size) > mbox->tx_size - msgs_offset())
+ goto exit;
+ if ((mdev->rsp_size + size_rsp) > mbox->rx_size - msgs_offset())
+ goto exit;
+ if (mdev->msg_size == 0)
+ mdev->num_msgs = 0;
+ mdev->num_msgs++;
+
+ msghdr = (struct mbox_msghdr *)(((uintptr_t)mdev->mbase +
+ mbox->tx_start + msgs_offset() + mdev->msg_size));
+
+ /* Clear the whole msg region */
+ otx2_mbox_memset(msghdr, 0, sizeof(*msghdr) + size);
+ /* Init message header with reset values */
+ msghdr->ver = OTX2_MBOX_VERSION;
+ mdev->msg_size += size;
+ mdev->rsp_size += size_rsp;
+ msghdr->next_msgoff = mdev->msg_size + msgs_offset();
+exit:
+ rte_spinlock_unlock(&mdev->mbox_lock);
+
+ return msghdr;
+}
+
+/**
+ * @internal
+ * Send a mailbox message
+ */
+void
+otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ struct mbox_hdr *tx_hdr =
+ (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->tx_start);
+ struct mbox_hdr *rx_hdr =
+ (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
+
+ /* Reset header for next messages */
+ tx_hdr->msg_size = mdev->msg_size;
+ mdev->msg_size = 0;
+ mdev->rsp_size = 0;
+ mdev->msgs_acked = 0;
+
+ /* num_msgs != 0 signals to the peer that the buffer has a number of
+ * messages. So this should be written after copying txmem
+ */
+ tx_hdr->num_msgs = mdev->num_msgs;
+ rx_hdr->num_msgs = 0;
+
+ /* Sync mbox data into memory */
+ rte_wmb();
+
+ /* The interrupt should be fired after num_msgs is written
+ * to the shared memory
+ */
+ rte_write64(1, (volatile void *)(mbox->reg_base +
+ (mbox->trigger | (devid << mbox->tr_shift))));
+}
+
+/**
+ * @internal
+ * Wait and get mailbox response
+ */
+int
+otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid, void **msg)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ struct mbox_msghdr *msghdr;
+ uint64_t offset;
+ int rc;
+
+ rc = otx2_mbox_wait_for_rsp(mbox, devid);
+ if (rc != 1)
+ return -EIO;
+
+ rte_rmb();
+
+ offset = mbox->rx_start +
+ RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+ msghdr = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
+ if (msg != NULL)
+ *msg = msghdr;
+
+ return msghdr->rc;
+}
+
+/**
+ * @internal
+ * Wait and get mailbox response with timeout
+ */
+int
+otx2_mbox_get_rsp_tmo(struct otx2_mbox *mbox, int devid, void **msg,
+ uint32_t tmo)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ struct mbox_msghdr *msghdr;
+ uint64_t offset;
+ int rc;
+
+ rc = otx2_mbox_wait_for_rsp_tmo(mbox, devid, tmo);
+ if (rc != 1)
+ return -EIO;
+
+ rte_rmb();
+
+ offset = mbox->rx_start +
+ RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+ msghdr = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
+ if (msg != NULL)
+ *msg = msghdr;
+
+ return msghdr->rc;
+}
+
+static int
+mbox_wait(struct otx2_mbox *mbox, int devid, uint32_t rst_timo)
+{
+ volatile struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ uint32_t timeout = 0, sleep = 1;
+
+ while (mdev->num_msgs > mdev->msgs_acked) {
+ rte_delay_ms(sleep);
+ timeout += sleep;
+ if (timeout >= rst_timo) {
+ struct mbox_hdr *tx_hdr =
+ (struct mbox_hdr *)((uintptr_t)mdev->mbase +
+ mbox->tx_start);
+ struct mbox_hdr *rx_hdr =
+ (struct mbox_hdr *)((uintptr_t)mdev->mbase +
+ mbox->rx_start);
+
+ otx2_err("MBOX[devid: %d] message wait timeout %d, "
+ "num_msgs: %d, msgs_acked: %d "
+ "(tx/rx num_msgs: %d/%d), msg_size: %d, "
+ "rsp_size: %d",
+ devid, timeout, mdev->num_msgs,
+ mdev->msgs_acked, tx_hdr->num_msgs,
+ rx_hdr->num_msgs, mdev->msg_size,
+ mdev->rsp_size);
+
+ return -EIO;
+ }
+ rte_rmb();
+ }
+ return 0;
+}
+
+int
+otx2_mbox_wait_for_rsp_tmo(struct otx2_mbox *mbox, int devid, uint32_t tmo)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ int rc = 0;
+
+ /* Sync with mbox region */
+ rte_rmb();
+
+ if (mbox->trigger == RVU_PF_VFX_PFVF_MBOX1 ||
+ mbox->trigger == RVU_PF_VFX_PFVF_MBOX0) {
+ /* In case of VF, Wait a bit more to account round trip delay */
+ tmo = tmo * 2;
+ }
+
+ /* Wait message */
+ rc = mbox_wait(mbox, devid, tmo);
+ if (rc)
+ return rc;
+
+ return mdev->msgs_acked;
+}
+
+/**
+ * @internal
+ * Wait for the mailbox response
+ */
+int
+otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid)
+{
+ return otx2_mbox_wait_for_rsp_tmo(mbox, devid, MBOX_RSP_TIMEOUT);
+}
+
+int
+otx2_mbox_get_availmem(struct otx2_mbox *mbox, int devid)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ int avail;
+
+ rte_spinlock_lock(&mdev->mbox_lock);
+ avail = mbox->tx_size - mdev->msg_size - msgs_offset();
+ rte_spinlock_unlock(&mdev->mbox_lock);
+
+ return avail;
+}
+
+int
+otx2_send_ready_msg(struct otx2_mbox *mbox, uint16_t *pcifunc)
+{
+ struct ready_msg_rsp *rsp;
+ int rc;
+
+ otx2_mbox_alloc_msg_ready(mbox);
+
+ otx2_mbox_msg_send(mbox, 0);
+ rc = otx2_mbox_get_rsp(mbox, 0, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ if (pcifunc)
+ *pcifunc = rsp->hdr.pcifunc;
+
+ return 0;
+}
+
+int
+otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, uint16_t pcifunc,
+ uint16_t id)
+{
+ struct msg_rsp *rsp;
+
+ rsp = (struct msg_rsp *)otx2_mbox_alloc_msg(mbox, devid, sizeof(*rsp));
+ if (!rsp)
+ return -ENOMEM;
+ rsp->hdr.id = id;
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
+ rsp->hdr.rc = MBOX_MSG_INVALID;
+ rsp->hdr.pcifunc = pcifunc;
+
+ return 0;
+}
+
+/**
+ * @internal
+ * Convert mail box ID to name
+ */
+const char *otx2_mbox_id2name(uint16_t id)
+{
+ switch (id) {
+#define M(_name, _id, _1, _2, _3) case _id: return # _name;
+ MBOX_MESSAGES
+ MBOX_UP_CGX_MESSAGES
+#undef M
+ default :
+ return "INVALID ID";
+ }
+}
+
+int otx2_mbox_id2size(uint16_t id)
+{
+ switch (id) {
+#define M(_1, _id, _2, _req_type, _3) case _id: return sizeof(struct _req_type);
+ MBOX_MESSAGES
+ MBOX_UP_CGX_MESSAGES
+#undef M
+ default :
+ return 0;
+ }
+}
uint32_t __otx2_io currentbucket;
};
+const char *otx2_mbox_id2name(uint16_t id);
+int otx2_mbox_id2size(uint16_t id);
void otx2_mbox_reset(struct otx2_mbox *mbox, int devid);
int otx2_mbox_init(struct otx2_mbox *mbox, uintptr_t hwbase,
uintptr_t reg_base, int direction, int ndevs);
void otx2_mbox_fini(struct otx2_mbox *mbox);
+void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid);
+int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid);
+int otx2_mbox_wait_for_rsp_tmo(struct otx2_mbox *mbox, int devid, uint32_t tmo);
+int otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid, void **msg);
+int otx2_mbox_get_rsp_tmo(struct otx2_mbox *mbox, int devid, void **msg,
+ uint32_t tmo);
+int otx2_mbox_get_availmem(struct otx2_mbox *mbox, int devid);
+struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
+ int size, int size_rsp);
+
+static inline struct mbox_msghdr *
+otx2_mbox_alloc_msg(struct otx2_mbox *mbox, int devid, int size)
+{
+ return otx2_mbox_alloc_msg_rsp(mbox, devid, size, 0);
+}
+
+static inline void
+otx2_mbox_req_init(uint16_t mbox_id, void *msghdr)
+{
+ struct mbox_msghdr *hdr = msghdr;
+
+ hdr->sig = OTX2_MBOX_REQ_SIG;
+ hdr->ver = OTX2_MBOX_VERSION;
+ hdr->id = mbox_id;
+ hdr->pcifunc = 0;
+}
+
+static inline void
+otx2_mbox_rsp_init(uint16_t mbox_id, void *msghdr)
+{
+ struct mbox_msghdr *hdr = msghdr;
+
+ hdr->sig = OTX2_MBOX_RSP_SIG;
+ hdr->rc = -ETIMEDOUT;
+ hdr->id = mbox_id;
+}
+
+static inline bool
+otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ bool ret;
+
+ rte_spinlock_lock(&mdev->mbox_lock);
+ ret = mdev->num_msgs != 0;
+ rte_spinlock_unlock(&mdev->mbox_lock);
+
+ return ret;
+}
+
+static inline int
+otx2_mbox_process(struct otx2_mbox *mbox)
+{
+ otx2_mbox_msg_send(mbox, 0);
+ return otx2_mbox_get_rsp(mbox, 0, NULL);
+}
+
+static inline int
+otx2_mbox_process_msg(struct otx2_mbox *mbox, void **msg)
+{
+ otx2_mbox_msg_send(mbox, 0);
+ return otx2_mbox_get_rsp(mbox, 0, msg);
+}
+
+static inline int
+otx2_mbox_process_tmo(struct otx2_mbox *mbox, uint32_t tmo)
+{
+ otx2_mbox_msg_send(mbox, 0);
+ return otx2_mbox_get_rsp_tmo(mbox, 0, NULL, tmo);
+}
+
+static inline int
+otx2_mbox_process_msg_tmo(struct otx2_mbox *mbox, void **msg, uint32_t tmo)
+{
+ otx2_mbox_msg_send(mbox, 0);
+ return otx2_mbox_get_rsp_tmo(mbox, 0, msg, tmo);
+}
+
+int otx2_send_ready_msg(struct otx2_mbox *mbox, uint16_t *pf_func /* out */);
+int otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, uint16_t pf_func,
+ uint16_t id);
+
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
+static inline struct _req_type \
+*otx2_mbox_alloc_msg_ ## _fn_name(struct otx2_mbox *mbox) \
+{ \
+ struct _req_type *req; \
+ \
+ req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
+ mbox, 0, sizeof(struct _req_type), \
+ sizeof(struct _rsp_type)); \
+ if (!req) \
+ return NULL; \
+ \
+ req->hdr.sig = OTX2_MBOX_REQ_SIG; \
+ req->hdr.id = _id; \
+ otx2_mbox_dbg("id=0x%x (%s)", \
+ req->hdr.id, otx2_mbox_id2name(req->hdr.id)); \
+ return req; \
+}
+
+MBOX_MESSAGES
+#undef M
+
+/* This is required for copy operations from device memory which do not work on
+ * addresses which are unaligned to 16B. This is because of specific
+ * optimizations to libc memcpy.
+ */
+static inline volatile void *
+otx2_mbox_memcpy(volatile void *d, const volatile void *s, size_t l)
+{
+ const volatile uint8_t *sb;
+ volatile uint8_t *db;
+ size_t i;
+
+ if (!d || !s)
+ return NULL;
+ db = (volatile uint8_t *)d;
+ sb = (const volatile uint8_t *)s;
+ for (i = 0; i < l; i++)
+ db[i] = sb[i];
+ return d;
+}
+
+/* This is required for memory operations from device memory which do not
+ * work on addresses which are unaligned to 16B. This is because of specific
+ * optimizations to libc memset.
+ */
+static inline void
+otx2_mbox_memset(volatile void *d, uint8_t val, size_t l)
+{
+ volatile uint8_t *db;
+ size_t i = 0;
+
+ if (!d || !l)
+ return;
+ db = (volatile uint8_t *)d;
+ for (i = 0; i < l; i++)
+ db[i] = val;
+}
#endif /* __OTX2_MBOX_H__ */