4 * Copyright (C) Cavium networks Ltd. 2017.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Cavium networks nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <rte_atomic.h>
36 #include <rte_common.h>
37 #include <rte_cycles.h>
39 #include <rte_spinlock.h>
41 #include "ssovf_evdev.h"
43 /* Mbox operation timeout in seconds */
44 #define MBOX_WAIT_TIME_SEC 3
45 #define MAX_RAM_MBOX_LEN ((SSOW_BAR4_LEN >> 1) - 8 /* Mbox header */)
47 /* Mbox channel state */
49 MBOX_CHAN_STATE_REQ = 1,
50 MBOX_CHAN_STATE_RES = 0,
53 /* Response messages */
57 MBOX_RET_INTERNAL_ERR,
62 uint8_t *ram_mbox_base; /* Base address of mbox message stored in ram */
63 uint8_t *reg; /* Store to this register triggers PF mbox interrupt */
64 uint16_t tag_own; /* Last tag which was written to own channel */
68 static struct mbox octeontx_mbox;
71 * Structure used for mbox synchronization
72 * This structure sits at the begin of Mbox RAM and used as main
73 * synchronization point for channel communication
79 uint8_t chan_state : 1;
91 mbox_send_request(struct mbox *m, struct octeontx_mbox_hdr *hdr,
92 const void *txmsg, uint16_t txsize)
94 struct mbox_ram_hdr old_hdr;
95 struct mbox_ram_hdr new_hdr = { {0} };
96 uint64_t *ram_mbox_hdr = (uint64_t *)m->ram_mbox_base;
97 uint8_t *ram_mbox_msg = m->ram_mbox_base + sizeof(struct mbox_ram_hdr);
100 * Initialize the channel with the tag left by last send.
101 * On success full mbox send complete, PF increments the tag by one.
102 * The sender can validate integrity of PF message with this scheme
104 old_hdr.u64 = rte_read64(ram_mbox_hdr);
105 m->tag_own = (old_hdr.tag + 2) & (~0x1ul); /* next even number */
109 memcpy(ram_mbox_msg, txmsg, txsize);
111 /* Prepare new hdr */
112 new_hdr.chan_state = MBOX_CHAN_STATE_REQ;
113 new_hdr.coproc = hdr->coproc;
114 new_hdr.msg = hdr->msg;
115 new_hdr.vfid = hdr->vfid;
116 new_hdr.tag = m->tag_own;
117 new_hdr.len = txsize;
119 /* Write the msg header */
120 rte_write64(new_hdr.u64, ram_mbox_hdr);
122 /* Notify PF about the new msg - write to MBOX reg generates PF IRQ */
123 rte_write64(0, m->reg);
127 mbox_wait_response(struct mbox *m, struct octeontx_mbox_hdr *hdr,
128 void *rxmsg, uint16_t rxsize)
132 struct mbox_ram_hdr rx_hdr;
133 uint64_t *ram_mbox_hdr = (uint64_t *)m->ram_mbox_base;
134 uint8_t *ram_mbox_msg = m->ram_mbox_base + sizeof(struct mbox_ram_hdr);
136 /* Wait for response */
137 wait = MBOX_WAIT_TIME_SEC * 1000 * 10;
140 rx_hdr.u64 = rte_read64(ram_mbox_hdr);
141 if (rx_hdr.chan_state == MBOX_CHAN_STATE_RES)
146 hdr->res_code = rx_hdr.res_code;
156 if (m->tag_own != rx_hdr.tag) {
161 /* PF nacked the msg */
162 if (rx_hdr.res_code != MBOX_RET_SUCCESS) {
167 len = RTE_MIN(rx_hdr.len, rxsize);
169 memcpy(rxmsg, ram_mbox_msg, len);
174 ssovf_log_err("Failed to send mbox(%d/%d) coproc=%d msg=%d ret=(%d,%d)",
175 m->tag_own, rx_hdr.tag, hdr->coproc, hdr->msg, res,
181 mbox_send(struct mbox *m, struct octeontx_mbox_hdr *hdr, const void *txmsg,
182 uint16_t txsize, void *rxmsg, uint16_t rxsize)
186 if (m->init_once == 0 || hdr == NULL ||
187 txsize > MAX_RAM_MBOX_LEN || rxsize > MAX_RAM_MBOX_LEN) {
188 ssovf_log_err("Invalid init_once=%d hdr=%p txsz=%d rxsz=%d",
189 m->init_once, hdr, txsize, rxsize);
193 rte_spinlock_lock(&m->lock);
195 mbox_send_request(m, hdr, txmsg, txsize);
196 res = mbox_wait_response(m, hdr, rxmsg, rxsize);
198 rte_spinlock_unlock(&m->lock);
203 mbox_setup(struct mbox *m)
205 if (unlikely(m->init_once == 0)) {
206 rte_spinlock_init(&m->lock);
207 m->ram_mbox_base = octeontx_ssovf_bar(OCTEONTX_SSO_HWS, 0, 4);
208 m->reg = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, 0, 0);
209 m->reg += SSO_VHGRP_PF_MBOX(1);
211 if (m->ram_mbox_base == NULL || m->reg == NULL) {
212 ssovf_log_err("Invalid ram_mbox_base=%p or reg=%p",
213 m->ram_mbox_base, m->reg);
222 octeontx_ssovf_mbox_send(struct octeontx_mbox_hdr *hdr, void *txdata,
223 uint16_t txlen, void *rxdata, uint16_t rxlen)
225 struct mbox *m = &octeontx_mbox;
227 RTE_BUILD_BUG_ON(sizeof(struct mbox_ram_hdr) != 8);
228 if (rte_eal_process_type() != RTE_PROC_PRIMARY || mbox_setup(m))
231 return mbox_send(m, hdr, txdata, txlen, rxdata, rxlen);