1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
10 #include <rte_atomic.h>
11 #include <rte_cycles.h>
12 #include <rte_malloc.h>
14 #include "otx2_mbox.h"
17 #define RVU_AF_AFPF_MBOX0 (0x02000)
18 #define RVU_AF_AFPF_MBOX1 (0x02008)
20 #define RVU_PF_PFAF_MBOX0 (0xC00)
21 #define RVU_PF_PFAF_MBOX1 (0xC08)
23 #define RVU_PF_VFX_PFVF_MBOX0 (0x0000)
24 #define RVU_PF_VFX_PFVF_MBOX1 (0x0008)
26 #define RVU_VF_VFPF_MBOX0 (0x0000)
27 #define RVU_VF_VFPF_MBOX1 (0x0008)
29 static inline uint16_t
32 return RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
36 otx2_mbox_fini(struct otx2_mbox *mbox)
45 otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
47 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
48 struct mbox_hdr *tx_hdr =
49 (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->tx_start);
50 struct mbox_hdr *rx_hdr =
51 (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
53 rte_spinlock_lock(&mdev->mbox_lock);
60 rte_spinlock_unlock(&mdev->mbox_lock);
64 otx2_mbox_init(struct otx2_mbox *mbox, uintptr_t hwbase, uintptr_t reg_base,
65 int direction, int ndevs, uint64_t intr_offset)
67 struct otx2_mbox_dev *mdev;
70 mbox->intr_offset = intr_offset;
71 mbox->reg_base = reg_base;
72 mbox->hwbase = hwbase;
77 mbox->tx_start = MBOX_DOWN_TX_START;
78 mbox->rx_start = MBOX_DOWN_RX_START;
79 mbox->tx_size = MBOX_DOWN_TX_SIZE;
80 mbox->rx_size = MBOX_DOWN_RX_SIZE;
84 mbox->tx_start = MBOX_DOWN_RX_START;
85 mbox->rx_start = MBOX_DOWN_TX_START;
86 mbox->tx_size = MBOX_DOWN_RX_SIZE;
87 mbox->rx_size = MBOX_DOWN_TX_SIZE;
89 case MBOX_DIR_AFPF_UP:
90 case MBOX_DIR_PFVF_UP:
91 mbox->tx_start = MBOX_UP_TX_START;
92 mbox->rx_start = MBOX_UP_RX_START;
93 mbox->tx_size = MBOX_UP_TX_SIZE;
94 mbox->rx_size = MBOX_UP_RX_SIZE;
96 case MBOX_DIR_PFAF_UP:
97 case MBOX_DIR_VFPF_UP:
98 mbox->tx_start = MBOX_UP_RX_START;
99 mbox->rx_start = MBOX_UP_TX_START;
100 mbox->tx_size = MBOX_UP_RX_SIZE;
101 mbox->rx_size = MBOX_UP_TX_SIZE;
109 case MBOX_DIR_AFPF_UP:
110 mbox->trigger = RVU_AF_AFPF_MBOX0;
114 case MBOX_DIR_PFAF_UP:
115 mbox->trigger = RVU_PF_PFAF_MBOX1;
119 case MBOX_DIR_PFVF_UP:
120 mbox->trigger = RVU_PF_VFX_PFVF_MBOX0;
124 case MBOX_DIR_VFPF_UP:
125 mbox->trigger = RVU_VF_VFPF_MBOX1;
132 mbox->dev = rte_zmalloc("mbox dev",
133 ndevs * sizeof(struct otx2_mbox_dev),
136 otx2_mbox_fini(mbox);
140 for (devid = 0; devid < ndevs; devid++) {
141 mdev = &mbox->dev[devid];
142 mdev->mbase = (void *)(mbox->hwbase + (devid * MBOX_SIZE));
143 rte_spinlock_init(&mdev->mbox_lock);
144 /* Init header to reset value */
145 otx2_mbox_reset(mbox, devid);
153 * Allocate a message response
156 otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid, int size,
159 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
160 struct mbox_msghdr *msghdr = NULL;
162 rte_spinlock_lock(&mdev->mbox_lock);
163 size = RTE_ALIGN(size, MBOX_MSG_ALIGN);
164 size_rsp = RTE_ALIGN(size_rsp, MBOX_MSG_ALIGN);
165 /* Check if there is space in mailbox */
166 if ((mdev->msg_size + size) > mbox->tx_size - msgs_offset())
168 if ((mdev->rsp_size + size_rsp) > mbox->rx_size - msgs_offset())
170 if (mdev->msg_size == 0)
174 msghdr = (struct mbox_msghdr *)(((uintptr_t)mdev->mbase +
175 mbox->tx_start + msgs_offset() + mdev->msg_size));
177 /* Clear the whole msg region */
178 otx2_mbox_memset(msghdr, 0, sizeof(*msghdr) + size);
179 /* Init message header with reset values */
180 msghdr->ver = OTX2_MBOX_VERSION;
181 mdev->msg_size += size;
182 mdev->rsp_size += size_rsp;
183 msghdr->next_msgoff = mdev->msg_size + msgs_offset();
185 rte_spinlock_unlock(&mdev->mbox_lock);
192 * Send a mailbox message
195 otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
197 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
198 struct mbox_hdr *tx_hdr =
199 (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->tx_start);
200 struct mbox_hdr *rx_hdr =
201 (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
203 /* Reset header for next messages */
204 tx_hdr->msg_size = mdev->msg_size;
207 mdev->msgs_acked = 0;
209 /* num_msgs != 0 signals to the peer that the buffer has a number of
210 * messages. So this should be written after copying txmem
212 tx_hdr->num_msgs = mdev->num_msgs;
213 rx_hdr->num_msgs = 0;
215 /* Sync mbox data into memory */
218 /* The interrupt should be fired after num_msgs is written
219 * to the shared memory
221 rte_write64(1, (volatile void *)(mbox->reg_base +
222 (mbox->trigger | (devid << mbox->tr_shift))));
227 * Wait and get mailbox response
230 otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid, void **msg)
232 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
233 struct mbox_msghdr *msghdr;
237 rc = otx2_mbox_wait_for_rsp(mbox, devid);
243 offset = mbox->rx_start +
244 RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
245 msghdr = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
253 * Polling for given wait time to get mailbox response
256 mbox_poll(struct otx2_mbox *mbox, uint32_t wait)
258 uint32_t timeout = 0, sleep = 1;
259 uint32_t wait_us = wait * 1000;
260 uint64_t rsp_reg = 0;
263 reg_addr = mbox->reg_base + mbox->intr_offset;
265 rsp_reg = otx2_read64(reg_addr);
267 if (timeout >= wait_us)
276 /* Clear interrupt */
277 otx2_write64(rsp_reg, reg_addr);
280 otx2_mbox_reset(mbox, 0);
287 * Wait and get mailbox response with timeout
290 otx2_mbox_get_rsp_tmo(struct otx2_mbox *mbox, int devid, void **msg,
293 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
294 struct mbox_msghdr *msghdr;
298 rc = otx2_mbox_wait_for_rsp_tmo(mbox, devid, tmo);
304 offset = mbox->rx_start +
305 RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
306 msghdr = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
314 mbox_wait(struct otx2_mbox *mbox, int devid, uint32_t rst_timo)
316 volatile struct otx2_mbox_dev *mdev = &mbox->dev[devid];
317 uint32_t timeout = 0, sleep = 1;
319 rst_timo = rst_timo * 1000; /* Milli seconds to micro seconds */
320 while (mdev->num_msgs > mdev->msgs_acked) {
323 if (timeout >= rst_timo) {
324 struct mbox_hdr *tx_hdr =
325 (struct mbox_hdr *)((uintptr_t)mdev->mbase +
327 struct mbox_hdr *rx_hdr =
328 (struct mbox_hdr *)((uintptr_t)mdev->mbase +
331 otx2_err("MBOX[devid: %d] message wait timeout %d, "
332 "num_msgs: %d, msgs_acked: %d "
333 "(tx/rx num_msgs: %d/%d), msg_size: %d, "
335 devid, timeout, mdev->num_msgs,
336 mdev->msgs_acked, tx_hdr->num_msgs,
337 rx_hdr->num_msgs, mdev->msg_size,
348 otx2_mbox_wait_for_rsp_tmo(struct otx2_mbox *mbox, int devid, uint32_t tmo)
350 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
353 /* Sync with mbox region */
356 if (mbox->trigger == RVU_PF_VFX_PFVF_MBOX1 ||
357 mbox->trigger == RVU_PF_VFX_PFVF_MBOX0) {
358 /* In case of VF, Wait a bit more to account round trip delay */
363 if (rte_thread_is_intr())
364 rc = mbox_poll(mbox, tmo);
366 rc = mbox_wait(mbox, devid, tmo);
376 * Wait for the mailbox response
379 otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid)
381 return otx2_mbox_wait_for_rsp_tmo(mbox, devid, MBOX_RSP_TIMEOUT);
385 otx2_mbox_get_availmem(struct otx2_mbox *mbox, int devid)
387 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
390 rte_spinlock_lock(&mdev->mbox_lock);
391 avail = mbox->tx_size - mdev->msg_size - msgs_offset();
392 rte_spinlock_unlock(&mdev->mbox_lock);
398 otx2_send_ready_msg(struct otx2_mbox *mbox, uint16_t *pcifunc)
400 struct ready_msg_rsp *rsp;
403 otx2_mbox_alloc_msg_ready(mbox);
405 otx2_mbox_msg_send(mbox, 0);
406 rc = otx2_mbox_get_rsp(mbox, 0, (void *)&rsp);
410 if (rsp->hdr.ver != OTX2_MBOX_VERSION) {
411 otx2_err("Incompatible MBox versions(AF: 0x%04x DPDK: 0x%04x)",
412 rsp->hdr.ver, OTX2_MBOX_VERSION);
417 *pcifunc = rsp->hdr.pcifunc;
423 otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, uint16_t pcifunc,
428 rsp = (struct msg_rsp *)otx2_mbox_alloc_msg(mbox, devid, sizeof(*rsp));
432 rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
433 rsp->hdr.rc = MBOX_MSG_INVALID;
434 rsp->hdr.pcifunc = pcifunc;
441 * Convert mail box ID to name
443 const char *otx2_mbox_id2name(uint16_t id)
446 #define M(_name, _id, _1, _2, _3) case _id: return # _name;
455 int otx2_mbox_id2size(uint16_t id)
458 #define M(_1, _id, _2, _req_type, _3) case _id: return sizeof(struct _req_type);