1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
10 #include <rte_atomic.h>
11 #include <rte_cycles.h>
13 #include "otx2_mbox.h"
15 #define RVU_AF_AFPF_MBOX0 (0x02000)
16 #define RVU_AF_AFPF_MBOX1 (0x02008)
18 #define RVU_PF_PFAF_MBOX0 (0xC00)
19 #define RVU_PF_PFAF_MBOX1 (0xC08)
21 #define RVU_PF_VFX_PFVF_MBOX0 (0x0000)
22 #define RVU_PF_VFX_PFVF_MBOX1 (0x0008)
24 #define RVU_VF_VFPF_MBOX0 (0x0000)
25 #define RVU_VF_VFPF_MBOX1 (0x0008)
27 static inline uint16_t
30 return RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
34 otx2_mbox_fini(struct otx2_mbox *mbox)
43 otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
45 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
46 struct mbox_hdr *tx_hdr =
47 (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->tx_start);
48 struct mbox_hdr *rx_hdr =
49 (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
51 rte_spinlock_lock(&mdev->mbox_lock);
58 rte_spinlock_unlock(&mdev->mbox_lock);
62 otx2_mbox_init(struct otx2_mbox *mbox, uintptr_t hwbase,
63 uintptr_t reg_base, int direction, int ndevs)
65 struct otx2_mbox_dev *mdev;
68 mbox->reg_base = reg_base;
69 mbox->hwbase = hwbase;
74 mbox->tx_start = MBOX_DOWN_TX_START;
75 mbox->rx_start = MBOX_DOWN_RX_START;
76 mbox->tx_size = MBOX_DOWN_TX_SIZE;
77 mbox->rx_size = MBOX_DOWN_RX_SIZE;
81 mbox->tx_start = MBOX_DOWN_RX_START;
82 mbox->rx_start = MBOX_DOWN_TX_START;
83 mbox->tx_size = MBOX_DOWN_RX_SIZE;
84 mbox->rx_size = MBOX_DOWN_TX_SIZE;
86 case MBOX_DIR_AFPF_UP:
87 case MBOX_DIR_PFVF_UP:
88 mbox->tx_start = MBOX_UP_TX_START;
89 mbox->rx_start = MBOX_UP_RX_START;
90 mbox->tx_size = MBOX_UP_TX_SIZE;
91 mbox->rx_size = MBOX_UP_RX_SIZE;
93 case MBOX_DIR_PFAF_UP:
94 case MBOX_DIR_VFPF_UP:
95 mbox->tx_start = MBOX_UP_RX_START;
96 mbox->rx_start = MBOX_UP_TX_START;
97 mbox->tx_size = MBOX_UP_RX_SIZE;
98 mbox->rx_size = MBOX_UP_TX_SIZE;
106 case MBOX_DIR_AFPF_UP:
107 mbox->trigger = RVU_AF_AFPF_MBOX0;
111 case MBOX_DIR_PFAF_UP:
112 mbox->trigger = RVU_PF_PFAF_MBOX1;
116 case MBOX_DIR_PFVF_UP:
117 mbox->trigger = RVU_PF_VFX_PFVF_MBOX0;
121 case MBOX_DIR_VFPF_UP:
122 mbox->trigger = RVU_VF_VFPF_MBOX1;
129 mbox->dev = malloc(ndevs * sizeof(struct otx2_mbox_dev));
131 otx2_mbox_fini(mbox);
135 for (devid = 0; devid < ndevs; devid++) {
136 mdev = &mbox->dev[devid];
137 mdev->mbase = (void *)(mbox->hwbase + (devid * MBOX_SIZE));
138 rte_spinlock_init(&mdev->mbox_lock);
139 /* Init header to reset value */
140 otx2_mbox_reset(mbox, devid);
148 * Allocate a message response
151 otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid, int size,
154 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
155 struct mbox_msghdr *msghdr = NULL;
157 rte_spinlock_lock(&mdev->mbox_lock);
158 size = RTE_ALIGN(size, MBOX_MSG_ALIGN);
159 size_rsp = RTE_ALIGN(size_rsp, MBOX_MSG_ALIGN);
160 /* Check if there is space in mailbox */
161 if ((mdev->msg_size + size) > mbox->tx_size - msgs_offset())
163 if ((mdev->rsp_size + size_rsp) > mbox->rx_size - msgs_offset())
165 if (mdev->msg_size == 0)
169 msghdr = (struct mbox_msghdr *)(((uintptr_t)mdev->mbase +
170 mbox->tx_start + msgs_offset() + mdev->msg_size));
172 /* Clear the whole msg region */
173 otx2_mbox_memset(msghdr, 0, sizeof(*msghdr) + size);
174 /* Init message header with reset values */
175 msghdr->ver = OTX2_MBOX_VERSION;
176 mdev->msg_size += size;
177 mdev->rsp_size += size_rsp;
178 msghdr->next_msgoff = mdev->msg_size + msgs_offset();
180 rte_spinlock_unlock(&mdev->mbox_lock);
187 * Send a mailbox message
190 otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
192 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
193 struct mbox_hdr *tx_hdr =
194 (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->tx_start);
195 struct mbox_hdr *rx_hdr =
196 (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
198 /* Reset header for next messages */
199 tx_hdr->msg_size = mdev->msg_size;
202 mdev->msgs_acked = 0;
204 /* num_msgs != 0 signals to the peer that the buffer has a number of
205 * messages. So this should be written after copying txmem
207 tx_hdr->num_msgs = mdev->num_msgs;
208 rx_hdr->num_msgs = 0;
210 /* Sync mbox data into memory */
213 /* The interrupt should be fired after num_msgs is written
214 * to the shared memory
216 rte_write64(1, (volatile void *)(mbox->reg_base +
217 (mbox->trigger | (devid << mbox->tr_shift))));
222 * Wait and get mailbox response
225 otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid, void **msg)
227 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
228 struct mbox_msghdr *msghdr;
232 rc = otx2_mbox_wait_for_rsp(mbox, devid);
238 offset = mbox->rx_start +
239 RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
240 msghdr = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
249 * Wait and get mailbox response with timeout
252 otx2_mbox_get_rsp_tmo(struct otx2_mbox *mbox, int devid, void **msg,
255 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
256 struct mbox_msghdr *msghdr;
260 rc = otx2_mbox_wait_for_rsp_tmo(mbox, devid, tmo);
266 offset = mbox->rx_start +
267 RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
268 msghdr = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
276 mbox_wait(struct otx2_mbox *mbox, int devid, uint32_t rst_timo)
278 volatile struct otx2_mbox_dev *mdev = &mbox->dev[devid];
279 uint32_t timeout = 0, sleep = 1;
281 while (mdev->num_msgs > mdev->msgs_acked) {
284 if (timeout >= rst_timo) {
285 struct mbox_hdr *tx_hdr =
286 (struct mbox_hdr *)((uintptr_t)mdev->mbase +
288 struct mbox_hdr *rx_hdr =
289 (struct mbox_hdr *)((uintptr_t)mdev->mbase +
292 otx2_err("MBOX[devid: %d] message wait timeout %d, "
293 "num_msgs: %d, msgs_acked: %d "
294 "(tx/rx num_msgs: %d/%d), msg_size: %d, "
296 devid, timeout, mdev->num_msgs,
297 mdev->msgs_acked, tx_hdr->num_msgs,
298 rx_hdr->num_msgs, mdev->msg_size,
309 otx2_mbox_wait_for_rsp_tmo(struct otx2_mbox *mbox, int devid, uint32_t tmo)
311 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
314 /* Sync with mbox region */
317 if (mbox->trigger == RVU_PF_VFX_PFVF_MBOX1 ||
318 mbox->trigger == RVU_PF_VFX_PFVF_MBOX0) {
319 /* In case of VF, Wait a bit more to account round trip delay */
324 rc = mbox_wait(mbox, devid, tmo);
328 return mdev->msgs_acked;
333 * Wait for the mailbox response
336 otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid)
338 return otx2_mbox_wait_for_rsp_tmo(mbox, devid, MBOX_RSP_TIMEOUT);
342 otx2_mbox_get_availmem(struct otx2_mbox *mbox, int devid)
344 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
347 rte_spinlock_lock(&mdev->mbox_lock);
348 avail = mbox->tx_size - mdev->msg_size - msgs_offset();
349 rte_spinlock_unlock(&mdev->mbox_lock);
355 otx2_send_ready_msg(struct otx2_mbox *mbox, uint16_t *pcifunc)
357 struct ready_msg_rsp *rsp;
360 otx2_mbox_alloc_msg_ready(mbox);
362 otx2_mbox_msg_send(mbox, 0);
363 rc = otx2_mbox_get_rsp(mbox, 0, (void *)&rsp);
367 if (rsp->hdr.ver != OTX2_MBOX_VERSION) {
368 otx2_err("Incompatible MBox versions(AF: 0x%04x DPDK: 0x%04x)",
369 rsp->hdr.ver, OTX2_MBOX_VERSION);
374 *pcifunc = rsp->hdr.pcifunc;
380 otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, uint16_t pcifunc,
385 rsp = (struct msg_rsp *)otx2_mbox_alloc_msg(mbox, devid, sizeof(*rsp));
389 rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
390 rsp->hdr.rc = MBOX_MSG_INVALID;
391 rsp->hdr.pcifunc = pcifunc;
398 * Convert mail box ID to name
400 const char *otx2_mbox_id2name(uint16_t id)
403 #define M(_name, _id, _1, _2, _3) case _id: return # _name;
412 int otx2_mbox_id2size(uint16_t id)
415 #define M(_1, _id, _2, _req_type, _3) case _id: return sizeof(struct _req_type);