1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
7 #include <rte_atomic.h>
8 #include <rte_common.h>
9 #include <rte_cycles.h>
11 #include <rte_spinlock.h>
13 #include "octeontx_mbox.h"
15 /* Mbox operation timeout in seconds */
16 #define MBOX_WAIT_TIME_SEC 3
17 #define MAX_RAM_MBOX_LEN ((SSOW_BAR4_LEN >> 1) - 8 /* Mbox header */)
19 /* Mbox channel state */
21 MBOX_CHAN_STATE_REQ = 1,
22 MBOX_CHAN_STATE_RES = 0,
25 /* Response messages */
29 MBOX_RET_INTERNAL_ERR,
35 uint8_t *ram_mbox_base; /* Base address of mbox message stored in ram */
36 uint8_t *reg; /* Store to this register triggers PF mbox interrupt */
37 uint16_t tag_own; /* Last tag which was written to own channel */
41 static struct mbox octeontx_mbox;
44 * Structure used for mbox synchronization
45 * This structure sits at the begin of Mbox RAM and used as main
46 * synchronization point for channel communication
52 uint8_t chan_state : 1;
63 /* MBOX interface version message */
64 struct mbox_intf_ver {
70 int octeontx_logtype_mbox;
72 RTE_INIT(otx_init_log)
74 octeontx_logtype_mbox = rte_log_register("pmd.octeontx.mbox");
75 if (octeontx_logtype_mbox >= 0)
76 rte_log_set_level(octeontx_logtype_mbox, RTE_LOG_NOTICE);
80 mbox_msgcpy(volatile uint8_t *d, volatile const uint8_t *s, uint16_t size)
84 for (i = 0; i < size; i++)
89 mbox_send_request(struct mbox *m, struct octeontx_mbox_hdr *hdr,
90 const void *txmsg, uint16_t txsize)
92 struct mbox_ram_hdr old_hdr;
93 struct mbox_ram_hdr new_hdr = { {0} };
94 uint64_t *ram_mbox_hdr = (uint64_t *)m->ram_mbox_base;
95 uint8_t *ram_mbox_msg = m->ram_mbox_base + sizeof(struct mbox_ram_hdr);
98 * Initialize the channel with the tag left by last send.
99 * On success full mbox send complete, PF increments the tag by one.
100 * The sender can validate integrity of PF message with this scheme
102 old_hdr.u64 = rte_read64(ram_mbox_hdr);
103 m->tag_own = (old_hdr.tag + 2) & (~0x1ul); /* next even number */
107 mbox_msgcpy(ram_mbox_msg, txmsg, txsize);
109 /* Prepare new hdr */
110 new_hdr.chan_state = MBOX_CHAN_STATE_REQ;
111 new_hdr.coproc = hdr->coproc;
112 new_hdr.msg = hdr->msg;
113 new_hdr.vfid = hdr->vfid;
114 new_hdr.tag = m->tag_own;
115 new_hdr.len = txsize;
117 /* Write the msg header */
118 rte_write64(new_hdr.u64, ram_mbox_hdr);
120 /* Notify PF about the new msg - write to MBOX reg generates PF IRQ */
121 rte_write64(0, m->reg);
125 mbox_wait_response(struct mbox *m, struct octeontx_mbox_hdr *hdr,
126 void *rxmsg, uint16_t rxsize)
130 struct mbox_ram_hdr rx_hdr;
131 uint64_t *ram_mbox_hdr = (uint64_t *)m->ram_mbox_base;
132 uint8_t *ram_mbox_msg = m->ram_mbox_base + sizeof(struct mbox_ram_hdr);
134 /* Wait for response */
135 wait = MBOX_WAIT_TIME_SEC * 1000 * 10;
138 rx_hdr.u64 = rte_read64(ram_mbox_hdr);
139 if (rx_hdr.chan_state == MBOX_CHAN_STATE_RES)
144 hdr->res_code = rx_hdr.res_code;
154 if (m->tag_own != rx_hdr.tag) {
159 /* PF nacked the msg */
160 if (rx_hdr.res_code != MBOX_RET_SUCCESS) {
165 len = RTE_MIN(rx_hdr.len, rxsize);
167 mbox_msgcpy(rxmsg, ram_mbox_msg, len);
172 mbox_log_err("Failed to send mbox(%d/%d) coproc=%d msg=%d ret=(%d,%d)",
173 m->tag_own, rx_hdr.tag, hdr->coproc, hdr->msg, res,
179 mbox_send(struct mbox *m, struct octeontx_mbox_hdr *hdr, const void *txmsg,
180 uint16_t txsize, void *rxmsg, uint16_t rxsize)
184 if (m->init_once == 0 || hdr == NULL ||
185 txsize > MAX_RAM_MBOX_LEN || rxsize > MAX_RAM_MBOX_LEN) {
186 mbox_log_err("Invalid init_once=%d hdr=%p txsz=%d rxsz=%d",
187 m->init_once, hdr, txsize, rxsize);
191 rte_spinlock_lock(&m->lock);
193 mbox_send_request(m, hdr, txmsg, txsize);
194 res = mbox_wait_response(m, hdr, rxmsg, rxsize);
196 rte_spinlock_unlock(&m->lock);
201 octeontx_mbox_set_ram_mbox_base(uint8_t *ram_mbox_base)
203 struct mbox *m = &octeontx_mbox;
208 if (ram_mbox_base == NULL) {
209 mbox_log_err("Invalid ram_mbox_base=%p", ram_mbox_base);
213 m->ram_mbox_base = ram_mbox_base;
215 if (m->reg != NULL) {
216 rte_spinlock_init(&m->lock);
224 octeontx_mbox_set_reg(uint8_t *reg)
226 struct mbox *m = &octeontx_mbox;
232 mbox_log_err("Invalid reg=%p", reg);
238 if (m->ram_mbox_base != NULL) {
239 rte_spinlock_init(&m->lock);
247 octeontx_mbox_send(struct octeontx_mbox_hdr *hdr, void *txdata,
248 uint16_t txlen, void *rxdata, uint16_t rxlen)
250 struct mbox *m = &octeontx_mbox;
252 RTE_BUILD_BUG_ON(sizeof(struct mbox_ram_hdr) != 8);
253 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
256 return mbox_send(m, hdr, txdata, txlen, rxdata, rxlen);
260 octeontx_start_domain(void)
262 struct octeontx_mbox_hdr hdr = {0};
263 int result = -EINVAL;
265 hdr.coproc = NO_COPROC;
266 hdr.msg = RM_START_APP;
268 result = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0);
270 mbox_log_err("Could not start domain. Err=%d. FuncErr=%d\n",
271 result, hdr.res_code);
279 octeontx_check_mbox_version(struct mbox_intf_ver app_intf_ver,
280 struct mbox_intf_ver *intf_ver)
282 struct mbox_intf_ver kernel_intf_ver = {0};
283 struct octeontx_mbox_hdr hdr = {0};
287 hdr.coproc = NO_COPROC;
288 hdr.msg = RM_INTERFACE_VERSION;
290 result = octeontx_mbox_send(&hdr, &app_intf_ver, sizeof(app_intf_ver),
291 &kernel_intf_ver, sizeof(kernel_intf_ver));
292 if (result != sizeof(kernel_intf_ver)) {
293 mbox_log_err("Could not send interface version. Err=%d. FuncErr=%d\n",
294 result, hdr.res_code);
299 *intf_ver = kernel_intf_ver;
301 if (app_intf_ver.platform != kernel_intf_ver.platform ||
302 app_intf_ver.major != kernel_intf_ver.major ||
303 app_intf_ver.minor != kernel_intf_ver.minor)
310 octeontx_mbox_init(void)
312 const struct mbox_intf_ver MBOX_INTERFACE_VERSION = {
317 struct mbox_intf_ver rm_intf_ver = {0};
318 struct mbox *m = &octeontx_mbox;
324 ret = octeontx_start_domain();
330 ret = octeontx_check_mbox_version(MBOX_INTERFACE_VERSION,
333 mbox_log_err("MBOX version: Kernel(%d.%d.%d) != DPDK(%d.%d.%d)",
334 rm_intf_ver.platform, rm_intf_ver.major,
335 rm_intf_ver.minor, MBOX_INTERFACE_VERSION.platform,
336 MBOX_INTERFACE_VERSION.major,
337 MBOX_INTERFACE_VERSION.minor);