1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
10 #include <rte_atomic.h>
11 #include <rte_cycles.h>
13 #include "otx2_mbox.h"
16 #define RVU_AF_AFPF_MBOX0 (0x02000)
17 #define RVU_AF_AFPF_MBOX1 (0x02008)
19 #define RVU_PF_PFAF_MBOX0 (0xC00)
20 #define RVU_PF_PFAF_MBOX1 (0xC08)
22 #define RVU_PF_VFX_PFVF_MBOX0 (0x0000)
23 #define RVU_PF_VFX_PFVF_MBOX1 (0x0008)
25 #define RVU_VF_VFPF_MBOX0 (0x0000)
26 #define RVU_VF_VFPF_MBOX1 (0x0008)
28 static inline uint16_t
31 return RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
35 otx2_mbox_fini(struct otx2_mbox *mbox)
44 otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
46 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
47 struct mbox_hdr *tx_hdr =
48 (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->tx_start);
49 struct mbox_hdr *rx_hdr =
50 (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
52 rte_spinlock_lock(&mdev->mbox_lock);
59 rte_spinlock_unlock(&mdev->mbox_lock);
63 otx2_mbox_init(struct otx2_mbox *mbox, uintptr_t hwbase, uintptr_t reg_base,
64 int direction, int ndevs, uint64_t intr_offset)
66 struct otx2_mbox_dev *mdev;
69 mbox->intr_offset = intr_offset;
70 mbox->reg_base = reg_base;
71 mbox->hwbase = hwbase;
76 mbox->tx_start = MBOX_DOWN_TX_START;
77 mbox->rx_start = MBOX_DOWN_RX_START;
78 mbox->tx_size = MBOX_DOWN_TX_SIZE;
79 mbox->rx_size = MBOX_DOWN_RX_SIZE;
83 mbox->tx_start = MBOX_DOWN_RX_START;
84 mbox->rx_start = MBOX_DOWN_TX_START;
85 mbox->tx_size = MBOX_DOWN_RX_SIZE;
86 mbox->rx_size = MBOX_DOWN_TX_SIZE;
88 case MBOX_DIR_AFPF_UP:
89 case MBOX_DIR_PFVF_UP:
90 mbox->tx_start = MBOX_UP_TX_START;
91 mbox->rx_start = MBOX_UP_RX_START;
92 mbox->tx_size = MBOX_UP_TX_SIZE;
93 mbox->rx_size = MBOX_UP_RX_SIZE;
95 case MBOX_DIR_PFAF_UP:
96 case MBOX_DIR_VFPF_UP:
97 mbox->tx_start = MBOX_UP_RX_START;
98 mbox->rx_start = MBOX_UP_TX_START;
99 mbox->tx_size = MBOX_UP_RX_SIZE;
100 mbox->rx_size = MBOX_UP_TX_SIZE;
108 case MBOX_DIR_AFPF_UP:
109 mbox->trigger = RVU_AF_AFPF_MBOX0;
113 case MBOX_DIR_PFAF_UP:
114 mbox->trigger = RVU_PF_PFAF_MBOX1;
118 case MBOX_DIR_PFVF_UP:
119 mbox->trigger = RVU_PF_VFX_PFVF_MBOX0;
123 case MBOX_DIR_VFPF_UP:
124 mbox->trigger = RVU_VF_VFPF_MBOX1;
131 mbox->dev = malloc(ndevs * sizeof(struct otx2_mbox_dev));
133 otx2_mbox_fini(mbox);
137 for (devid = 0; devid < ndevs; devid++) {
138 mdev = &mbox->dev[devid];
139 mdev->mbase = (void *)(mbox->hwbase + (devid * MBOX_SIZE));
140 rte_spinlock_init(&mdev->mbox_lock);
141 /* Init header to reset value */
142 otx2_mbox_reset(mbox, devid);
150 * Allocate a message response
153 otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid, int size,
156 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
157 struct mbox_msghdr *msghdr = NULL;
159 rte_spinlock_lock(&mdev->mbox_lock);
160 size = RTE_ALIGN(size, MBOX_MSG_ALIGN);
161 size_rsp = RTE_ALIGN(size_rsp, MBOX_MSG_ALIGN);
162 /* Check if there is space in mailbox */
163 if ((mdev->msg_size + size) > mbox->tx_size - msgs_offset())
165 if ((mdev->rsp_size + size_rsp) > mbox->rx_size - msgs_offset())
167 if (mdev->msg_size == 0)
171 msghdr = (struct mbox_msghdr *)(((uintptr_t)mdev->mbase +
172 mbox->tx_start + msgs_offset() + mdev->msg_size));
174 /* Clear the whole msg region */
175 otx2_mbox_memset(msghdr, 0, sizeof(*msghdr) + size);
176 /* Init message header with reset values */
177 msghdr->ver = OTX2_MBOX_VERSION;
178 mdev->msg_size += size;
179 mdev->rsp_size += size_rsp;
180 msghdr->next_msgoff = mdev->msg_size + msgs_offset();
182 rte_spinlock_unlock(&mdev->mbox_lock);
189 * Send a mailbox message
192 otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
194 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
195 struct mbox_hdr *tx_hdr =
196 (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->tx_start);
197 struct mbox_hdr *rx_hdr =
198 (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
200 /* Reset header for next messages */
201 tx_hdr->msg_size = mdev->msg_size;
204 mdev->msgs_acked = 0;
206 /* num_msgs != 0 signals to the peer that the buffer has a number of
207 * messages. So this should be written after copying txmem
209 tx_hdr->num_msgs = mdev->num_msgs;
210 rx_hdr->num_msgs = 0;
212 /* Sync mbox data into memory */
215 /* The interrupt should be fired after num_msgs is written
216 * to the shared memory
218 rte_write64(1, (volatile void *)(mbox->reg_base +
219 (mbox->trigger | (devid << mbox->tr_shift))));
224 * Wait and get mailbox response
227 otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid, void **msg)
229 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
230 struct mbox_msghdr *msghdr;
234 rc = otx2_mbox_wait_for_rsp(mbox, devid);
240 offset = mbox->rx_start +
241 RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
242 msghdr = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
250 * Polling for given wait time to get mailbox response
253 mbox_poll(struct otx2_mbox *mbox, uint32_t wait)
255 uint32_t timeout = 0, sleep = 1;
256 uint32_t wait_us = wait * 1000;
257 uint64_t rsp_reg = 0;
260 reg_addr = mbox->reg_base + mbox->intr_offset;
262 rsp_reg = otx2_read64(reg_addr);
264 if (timeout >= wait_us)
273 /* Clear interrupt */
274 otx2_write64(rsp_reg, reg_addr);
277 otx2_mbox_reset(mbox, 0);
284 * Wait and get mailbox response with timeout
287 otx2_mbox_get_rsp_tmo(struct otx2_mbox *mbox, int devid, void **msg,
290 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
291 struct mbox_msghdr *msghdr;
295 rc = otx2_mbox_wait_for_rsp_tmo(mbox, devid, tmo);
301 offset = mbox->rx_start +
302 RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
303 msghdr = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
311 mbox_wait(struct otx2_mbox *mbox, int devid, uint32_t rst_timo)
313 volatile struct otx2_mbox_dev *mdev = &mbox->dev[devid];
314 uint32_t timeout = 0, sleep = 1;
316 rst_timo = rst_timo * 1000; /* Milli seconds to micro seconds */
317 while (mdev->num_msgs > mdev->msgs_acked) {
320 if (timeout >= rst_timo) {
321 struct mbox_hdr *tx_hdr =
322 (struct mbox_hdr *)((uintptr_t)mdev->mbase +
324 struct mbox_hdr *rx_hdr =
325 (struct mbox_hdr *)((uintptr_t)mdev->mbase +
328 otx2_err("MBOX[devid: %d] message wait timeout %d, "
329 "num_msgs: %d, msgs_acked: %d "
330 "(tx/rx num_msgs: %d/%d), msg_size: %d, "
332 devid, timeout, mdev->num_msgs,
333 mdev->msgs_acked, tx_hdr->num_msgs,
334 rx_hdr->num_msgs, mdev->msg_size,
345 otx2_mbox_wait_for_rsp_tmo(struct otx2_mbox *mbox, int devid, uint32_t tmo)
347 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
350 /* Sync with mbox region */
353 if (mbox->trigger == RVU_PF_VFX_PFVF_MBOX1 ||
354 mbox->trigger == RVU_PF_VFX_PFVF_MBOX0) {
355 /* In case of VF, Wait a bit more to account round trip delay */
360 if (rte_thread_is_intr())
361 rc = mbox_poll(mbox, tmo);
363 rc = mbox_wait(mbox, devid, tmo);
373 * Wait for the mailbox response
376 otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid)
378 return otx2_mbox_wait_for_rsp_tmo(mbox, devid, MBOX_RSP_TIMEOUT);
382 otx2_mbox_get_availmem(struct otx2_mbox *mbox, int devid)
384 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
387 rte_spinlock_lock(&mdev->mbox_lock);
388 avail = mbox->tx_size - mdev->msg_size - msgs_offset();
389 rte_spinlock_unlock(&mdev->mbox_lock);
395 otx2_send_ready_msg(struct otx2_mbox *mbox, uint16_t *pcifunc)
397 struct ready_msg_rsp *rsp;
400 otx2_mbox_alloc_msg_ready(mbox);
402 otx2_mbox_msg_send(mbox, 0);
403 rc = otx2_mbox_get_rsp(mbox, 0, (void *)&rsp);
407 if (rsp->hdr.ver != OTX2_MBOX_VERSION) {
408 otx2_err("Incompatible MBox versions(AF: 0x%04x DPDK: 0x%04x)",
409 rsp->hdr.ver, OTX2_MBOX_VERSION);
414 *pcifunc = rsp->hdr.pcifunc;
420 otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, uint16_t pcifunc,
425 rsp = (struct msg_rsp *)otx2_mbox_alloc_msg(mbox, devid, sizeof(*rsp));
429 rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
430 rsp->hdr.rc = MBOX_MSG_INVALID;
431 rsp->hdr.pcifunc = pcifunc;
438 * Convert mail box ID to name
440 const char *otx2_mbox_id2name(uint16_t id)
443 #define M(_name, _id, _1, _2, _3) case _id: return # _name;
452 int otx2_mbox_id2size(uint16_t id)
455 #define M(_1, _id, _2, _req_type, _3) case _id: return sizeof(struct _req_type);