1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
13 #define RVU_AF_AFPF_MBOX0 (0x02000)
14 #define RVU_AF_AFPF_MBOX1 (0x02008)
16 #define RVU_PF_PFAF_MBOX0 (0xC00)
17 #define RVU_PF_PFAF_MBOX1 (0xC08)
19 #define RVU_PF_VFX_PFVF_MBOX0 (0x0000)
20 #define RVU_PF_VFX_PFVF_MBOX1 (0x0008)
22 #define RVU_VF_VFPF_MBOX0 (0x0000)
23 #define RVU_VF_VFPF_MBOX1 (0x0008)
25 /* RCLK, SCLK in MHz */
26 uint16_t dev_rclk_freq;
27 uint16_t dev_sclk_freq;
29 static inline uint16_t
32 return PLT_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
36 mbox_fini(struct mbox *mbox)
45 mbox_reset(struct mbox *mbox, int devid)
47 struct mbox_dev *mdev = &mbox->dev[devid];
48 struct mbox_hdr *tx_hdr =
49 (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->tx_start);
50 struct mbox_hdr *rx_hdr =
51 (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
53 plt_spinlock_lock(&mdev->mbox_lock);
60 plt_spinlock_unlock(&mdev->mbox_lock);
64 mbox_init(struct mbox *mbox, uintptr_t hwbase, uintptr_t reg_base,
65 int direction, int ndevs, uint64_t intr_offset)
67 struct mbox_dev *mdev;
71 mbox->intr_offset = intr_offset;
72 mbox->reg_base = reg_base;
73 mbox->hwbase = hwbase;
78 mbox->tx_start = MBOX_DOWN_TX_START;
79 mbox->rx_start = MBOX_DOWN_RX_START;
80 mbox->tx_size = MBOX_DOWN_TX_SIZE;
81 mbox->rx_size = MBOX_DOWN_RX_SIZE;
85 mbox->tx_start = MBOX_DOWN_RX_START;
86 mbox->rx_start = MBOX_DOWN_TX_START;
87 mbox->tx_size = MBOX_DOWN_RX_SIZE;
88 mbox->rx_size = MBOX_DOWN_TX_SIZE;
90 case MBOX_DIR_AFPF_UP:
91 case MBOX_DIR_PFVF_UP:
92 mbox->tx_start = MBOX_UP_TX_START;
93 mbox->rx_start = MBOX_UP_RX_START;
94 mbox->tx_size = MBOX_UP_TX_SIZE;
95 mbox->rx_size = MBOX_UP_RX_SIZE;
97 case MBOX_DIR_PFAF_UP:
98 case MBOX_DIR_VFPF_UP:
99 mbox->tx_start = MBOX_UP_RX_START;
100 mbox->rx_start = MBOX_UP_TX_START;
101 mbox->tx_size = MBOX_UP_RX_SIZE;
102 mbox->rx_size = MBOX_UP_TX_SIZE;
110 case MBOX_DIR_AFPF_UP:
111 mbox->trigger = RVU_AF_AFPF_MBOX0;
115 case MBOX_DIR_PFAF_UP:
116 mbox->trigger = RVU_PF_PFAF_MBOX1;
120 case MBOX_DIR_PFVF_UP:
121 mbox->trigger = RVU_PF_VFX_PFVF_MBOX0;
125 case MBOX_DIR_VFPF_UP:
126 mbox->trigger = RVU_VF_VFPF_MBOX1;
133 mbox->dev = plt_zmalloc(ndevs * sizeof(struct mbox_dev), ROC_ALIGN);
139 for (devid = 0; devid < ndevs; devid++) {
140 mdev = &mbox->dev[devid];
141 mdev->mbase = (void *)(mbox->hwbase + (devid * MBOX_SIZE));
142 plt_spinlock_init(&mdev->mbox_lock);
143 /* Init header to reset value */
144 mbox_reset(mbox, devid);
147 var = getenv("ROC_CN10K_MBOX_TIMEOUT");
148 var_to = getenv("ROC_MBOX_TIMEOUT");
151 mbox->rsp_tmo = atoi(var);
153 mbox->rsp_tmo = atoi(var_to);
155 mbox->rsp_tmo = MBOX_RSP_TIMEOUT;
162 * Allocate a message response
165 mbox_alloc_msg_rsp(struct mbox *mbox, int devid, int size, int size_rsp)
167 struct mbox_dev *mdev = &mbox->dev[devid];
168 struct mbox_msghdr *msghdr = NULL;
170 plt_spinlock_lock(&mdev->mbox_lock);
171 size = PLT_ALIGN(size, MBOX_MSG_ALIGN);
172 size_rsp = PLT_ALIGN(size_rsp, MBOX_MSG_ALIGN);
173 /* Check if there is space in mailbox */
174 if ((mdev->msg_size + size) > mbox->tx_size - msgs_offset())
176 if ((mdev->rsp_size + size_rsp) > mbox->rx_size - msgs_offset())
178 if (mdev->msg_size == 0)
182 msghdr = (struct mbox_msghdr *)(((uintptr_t)mdev->mbase +
183 mbox->tx_start + msgs_offset() +
186 /* Clear the whole msg region */
187 mbox_memset(msghdr, 0, sizeof(*msghdr) + size);
188 /* Init message header with reset values */
189 msghdr->ver = MBOX_VERSION;
190 mdev->msg_size += size;
191 mdev->rsp_size += size_rsp;
192 msghdr->next_msgoff = mdev->msg_size + msgs_offset();
194 plt_spinlock_unlock(&mdev->mbox_lock);
201 * Send a mailbox message
204 mbox_msg_send(struct mbox *mbox, int devid)
206 struct mbox_dev *mdev = &mbox->dev[devid];
207 struct mbox_hdr *tx_hdr =
208 (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->tx_start);
209 struct mbox_hdr *rx_hdr =
210 (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
212 /* Reset header for next messages */
213 tx_hdr->msg_size = mdev->msg_size;
216 mdev->msgs_acked = 0;
218 /* num_msgs != 0 signals to the peer that the buffer has a number of
219 * messages. So this should be written after copying txmem
221 tx_hdr->num_msgs = mdev->num_msgs;
222 rx_hdr->num_msgs = 0;
224 /* Sync mbox data into memory */
227 /* The interrupt should be fired after num_msgs is written
228 * to the shared memory
230 plt_write64(1, (volatile void *)(mbox->reg_base +
232 (devid << mbox->tr_shift))));
237 * Wait and get mailbox response
240 mbox_get_rsp(struct mbox *mbox, int devid, void **msg)
242 struct mbox_dev *mdev = &mbox->dev[devid];
243 struct mbox_msghdr *msghdr;
247 rc = mbox_wait_for_rsp(mbox, devid);
253 offset = mbox->rx_start +
254 PLT_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
255 msghdr = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
263 * Polling for given wait time to get mailbox response
266 mbox_poll(struct mbox *mbox, uint32_t wait)
268 uint32_t timeout = 0, sleep = 1;
269 uint32_t wait_us = wait * 1000;
270 uint64_t rsp_reg = 0;
273 reg_addr = mbox->reg_base + mbox->intr_offset;
275 rsp_reg = plt_read64(reg_addr);
277 if (timeout >= wait_us)
286 /* Clear interrupt */
287 plt_write64(rsp_reg, reg_addr);
297 * Wait and get mailbox response with timeout
300 mbox_get_rsp_tmo(struct mbox *mbox, int devid, void **msg, uint32_t tmo)
302 struct mbox_dev *mdev = &mbox->dev[devid];
303 struct mbox_msghdr *msghdr;
307 rc = mbox_wait_for_rsp_tmo(mbox, devid, tmo);
313 offset = mbox->rx_start +
314 PLT_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
315 msghdr = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
323 mbox_wait(struct mbox *mbox, int devid, uint32_t rst_timo)
325 volatile struct mbox_dev *mdev = &mbox->dev[devid];
326 uint32_t timeout = 0, sleep = 1;
328 rst_timo = rst_timo * 1000; /* Milli seconds to micro seconds */
329 while (mdev->num_msgs > mdev->msgs_acked) {
332 if (timeout >= rst_timo) {
333 struct mbox_hdr *tx_hdr =
334 (struct mbox_hdr *)((uintptr_t)mdev->mbase +
336 struct mbox_hdr *rx_hdr =
337 (struct mbox_hdr *)((uintptr_t)mdev->mbase +
340 plt_err("MBOX[devid: %d] message wait timeout %d, "
341 "num_msgs: %d, msgs_acked: %d "
342 "(tx/rx num_msgs: %d/%d), msg_size: %d, "
344 devid, timeout, mdev->num_msgs,
345 mdev->msgs_acked, tx_hdr->num_msgs,
346 rx_hdr->num_msgs, mdev->msg_size,
357 mbox_wait_for_rsp_tmo(struct mbox *mbox, int devid, uint32_t tmo)
359 struct mbox_dev *mdev = &mbox->dev[devid];
362 /* Sync with mbox region */
365 if (mbox->trigger == RVU_PF_VFX_PFVF_MBOX1 ||
366 mbox->trigger == RVU_PF_VFX_PFVF_MBOX0) {
367 /* In case of VF, Wait a bit more to account round trip delay */
372 if (plt_thread_is_intr())
373 rc = mbox_poll(mbox, tmo);
375 rc = mbox_wait(mbox, devid, tmo);
385 * Wait for the mailbox response
388 mbox_wait_for_rsp(struct mbox *mbox, int devid)
390 return mbox_wait_for_rsp_tmo(mbox, devid, mbox->rsp_tmo);
394 mbox_get_availmem(struct mbox *mbox, int devid)
396 struct mbox_dev *mdev = &mbox->dev[devid];
399 plt_spinlock_lock(&mdev->mbox_lock);
400 avail = mbox->tx_size - mdev->msg_size - msgs_offset();
401 plt_spinlock_unlock(&mdev->mbox_lock);
407 send_ready_msg(struct mbox *mbox, uint16_t *pcifunc)
409 struct ready_msg_rsp *rsp;
412 mbox_alloc_msg_ready(mbox);
414 rc = mbox_process_msg(mbox, (void *)&rsp);
418 if (rsp->hdr.ver != MBOX_VERSION) {
419 plt_err("Incompatible MBox versions(AF: 0x%04x Client: 0x%04x)",
420 rsp->hdr.ver, MBOX_VERSION);
425 *pcifunc = rsp->hdr.pcifunc;
427 /* Save rclk & sclk freq */
428 if (!dev_rclk_freq || !dev_sclk_freq) {
429 dev_rclk_freq = rsp->rclk_freq;
430 dev_sclk_freq = rsp->sclk_freq;
436 reply_invalid_msg(struct mbox *mbox, int devid, uint16_t pcifunc, uint16_t id)
440 rsp = (struct msg_rsp *)mbox_alloc_msg(mbox, devid, sizeof(*rsp));
444 rsp->hdr.sig = MBOX_RSP_SIG;
445 rsp->hdr.rc = MBOX_MSG_INVALID;
446 rsp->hdr.pcifunc = pcifunc;
453 * Convert mail box ID to name
456 mbox_id2name(uint16_t id)
461 #define M(_name, _id, _1, _2, _3) \
471 mbox_id2size(uint16_t id)
476 #define M(_1, _id, _2, _req_type, _3) \
478 return sizeof(struct _req_type);