1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
7 #include <rte_atomic.h>
8 #include <rte_common.h>
9 #include <rte_cycles.h>
11 #include <rte_spinlock.h>
13 #include "octeontx_mbox.h"
15 /* Mbox operation timeout in seconds */
16 #define MBOX_WAIT_TIME_SEC 3
17 #define MAX_RAM_MBOX_LEN ((SSOW_BAR4_LEN >> 1) - 8 /* Mbox header */)
19 /* Mbox channel state */
21 MBOX_CHAN_STATE_REQ = 1,
22 MBOX_CHAN_STATE_RES = 0,
25 /* Response messages */
29 MBOX_RET_INTERNAL_ERR,
35 uint8_t *ram_mbox_base; /* Base address of mbox message stored in ram */
36 uint8_t *reg; /* Store to this register triggers PF mbox interrupt */
37 uint16_t tag_own; /* Last tag which was written to own channel */
38 uint16_t domain; /* Domain */
42 static struct mbox octeontx_mbox;
45 * Structure used for mbox synchronization
46 * This structure sits at the begin of Mbox RAM and used as main
47 * synchronization point for channel communication
53 uint8_t chan_state : 1;
64 /* MBOX interface version message */
65 struct mbox_intf_ver {
71 RTE_LOG_REGISTER(octeontx_logtype_mbox, pmd.octeontx.mbox, NOTICE);
74 mbox_msgcpy(volatile uint8_t *d, volatile const uint8_t *s, uint16_t size)
78 for (i = 0; i < size; i++)
83 mbox_send_request(struct mbox *m, struct octeontx_mbox_hdr *hdr,
84 const void *txmsg, uint16_t txsize)
86 struct mbox_ram_hdr old_hdr;
87 struct mbox_ram_hdr new_hdr = { {0} };
88 uint64_t *ram_mbox_hdr = (uint64_t *)m->ram_mbox_base;
89 uint8_t *ram_mbox_msg = m->ram_mbox_base + sizeof(struct mbox_ram_hdr);
92 * Initialize the channel with the tag left by last send.
93 * On success full mbox send complete, PF increments the tag by one.
94 * The sender can validate integrity of PF message with this scheme
96 old_hdr.u64 = rte_read64(ram_mbox_hdr);
97 m->tag_own = (old_hdr.tag + 2) & (~0x1ul); /* next even number */
101 mbox_msgcpy(ram_mbox_msg, txmsg, txsize);
103 /* Prepare new hdr */
104 new_hdr.chan_state = MBOX_CHAN_STATE_REQ;
105 new_hdr.coproc = hdr->coproc;
106 new_hdr.msg = hdr->msg;
107 new_hdr.vfid = hdr->vfid;
108 new_hdr.tag = m->tag_own;
109 new_hdr.len = txsize;
111 /* Write the msg header */
112 rte_write64(new_hdr.u64, ram_mbox_hdr);
114 /* Notify PF about the new msg - write to MBOX reg generates PF IRQ */
115 rte_write64(0, m->reg);
119 mbox_wait_response(struct mbox *m, struct octeontx_mbox_hdr *hdr,
120 void *rxmsg, uint16_t rxsize)
124 struct mbox_ram_hdr rx_hdr;
125 uint64_t *ram_mbox_hdr = (uint64_t *)m->ram_mbox_base;
126 uint8_t *ram_mbox_msg = m->ram_mbox_base + sizeof(struct mbox_ram_hdr);
128 /* Wait for response */
129 wait = MBOX_WAIT_TIME_SEC * 1000 * 10;
132 rx_hdr.u64 = rte_read64(ram_mbox_hdr);
133 if (rx_hdr.chan_state == MBOX_CHAN_STATE_RES)
138 hdr->res_code = rx_hdr.res_code;
148 if (m->tag_own != rx_hdr.tag) {
153 /* PF nacked the msg */
154 if (rx_hdr.res_code != MBOX_RET_SUCCESS) {
159 len = RTE_MIN(rx_hdr.len, rxsize);
161 mbox_msgcpy(rxmsg, ram_mbox_msg, len);
166 mbox_log_err("Failed to send mbox(%d/%d) coproc=%d msg=%d ret=(%d,%d)",
167 m->tag_own, rx_hdr.tag, hdr->coproc, hdr->msg, res,
173 mbox_send(struct mbox *m, struct octeontx_mbox_hdr *hdr, const void *txmsg,
174 uint16_t txsize, void *rxmsg, uint16_t rxsize)
178 if (m->init_once == 0 || hdr == NULL ||
179 txsize > MAX_RAM_MBOX_LEN || rxsize > MAX_RAM_MBOX_LEN) {
180 mbox_log_err("Invalid init_once=%d hdr=%p txsz=%d rxsz=%d",
181 m->init_once, hdr, txsize, rxsize);
185 rte_spinlock_lock(&m->lock);
187 mbox_send_request(m, hdr, txmsg, txsize);
188 res = mbox_wait_response(m, hdr, rxmsg, rxsize);
190 rte_spinlock_unlock(&m->lock);
195 octeontx_mbox_set_ram_mbox_base(uint8_t *ram_mbox_base, uint16_t domain)
197 struct mbox *m = &octeontx_mbox;
202 if (ram_mbox_base == NULL) {
203 mbox_log_err("Invalid ram_mbox_base=%p", ram_mbox_base);
207 m->ram_mbox_base = ram_mbox_base;
209 if (m->reg != NULL) {
210 rte_spinlock_init(&m->lock);
219 octeontx_mbox_set_reg(uint8_t *reg, uint16_t domain)
221 struct mbox *m = &octeontx_mbox;
227 mbox_log_err("Invalid reg=%p", reg);
233 if (m->ram_mbox_base != NULL) {
234 rte_spinlock_init(&m->lock);
243 octeontx_mbox_send(struct octeontx_mbox_hdr *hdr, void *txdata,
244 uint16_t txlen, void *rxdata, uint16_t rxlen)
246 struct mbox *m = &octeontx_mbox;
248 RTE_BUILD_BUG_ON(sizeof(struct mbox_ram_hdr) != 8);
249 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
252 return mbox_send(m, hdr, txdata, txlen, rxdata, rxlen);
256 octeontx_start_domain(void)
258 struct octeontx_mbox_hdr hdr = {0};
259 int result = -EINVAL;
261 hdr.coproc = NO_COPROC;
262 hdr.msg = RM_START_APP;
264 result = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0);
266 mbox_log_err("Could not start domain. Err=%d. FuncErr=%d\n",
267 result, hdr.res_code);
275 octeontx_check_mbox_version(struct mbox_intf_ver *app_intf_ver,
276 struct mbox_intf_ver *intf_ver)
278 struct mbox_intf_ver kernel_intf_ver = {0};
279 struct octeontx_mbox_hdr hdr = {0};
283 hdr.coproc = NO_COPROC;
284 hdr.msg = RM_INTERFACE_VERSION;
286 result = octeontx_mbox_send(&hdr, app_intf_ver,
287 sizeof(struct mbox_intf_ver),
288 &kernel_intf_ver, sizeof(kernel_intf_ver));
289 if (result != sizeof(kernel_intf_ver)) {
290 mbox_log_err("Could not send interface version. Err=%d. FuncErr=%d\n",
291 result, hdr.res_code);
296 *intf_ver = kernel_intf_ver;
298 if (app_intf_ver->platform != kernel_intf_ver.platform ||
299 app_intf_ver->major != kernel_intf_ver.major ||
300 app_intf_ver->minor != kernel_intf_ver.minor)
307 octeontx_mbox_init(void)
309 struct mbox_intf_ver MBOX_INTERFACE_VERSION = {
314 struct mbox_intf_ver rm_intf_ver = {0};
315 struct mbox *m = &octeontx_mbox;
321 ret = octeontx_start_domain();
327 ret = octeontx_check_mbox_version(&MBOX_INTERFACE_VERSION,
330 mbox_log_err("MBOX version: Kernel(%d.%d.%d) != DPDK(%d.%d.%d)",
331 rm_intf_ver.platform, rm_intf_ver.major,
332 rm_intf_ver.minor, MBOX_INTERFACE_VERSION.platform,
333 MBOX_INTERFACE_VERSION.major,
334 MBOX_INTERFACE_VERSION.minor);
346 octeontx_get_global_domain(void)
348 struct mbox *m = &octeontx_mbox;