#include "ecore_gtt_reg_addr.h"
#include "ecore_iro.h"
#include "ecore_dcbx.h"
+#include "ecore_sp_commands.h"
#define CHIP_MCP_RESP_ITER_US 10
#define EMUL_MCP_RESP_ITER_US (1000 * 1000)
OFFSETOF(struct public_drv_mb, _field))
#define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
- DRV_ID_PDA_COMP_VER_SHIFT)
+ DRV_ID_PDA_COMP_VER_OFFSET)
-#define MCP_BYTES_PER_MBIT_SHIFT 17
+#define MCP_BYTES_PER_MBIT_OFFSET 17
#ifndef ASIC_ONLY
static int loaded;
delay = EMUL_MCP_RESP_ITER_US;
#endif
+ if (p_hwfn->mcp_info->b_block_cmd) {
+ DP_NOTICE(p_hwfn, false,
+ "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
+ return ECORE_ABORTED;
+ }
+
/* Ensure that only a single thread is accessing the mailbox */
OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
(p_mb_params->cmd | seq_num), p_mb_params->param);
}
+static void ecore_mcp_cmd_set_blocking(struct ecore_hwfn *p_hwfn,
+ bool block_cmd)
+{
+ p_hwfn->mcp_info->b_block_cmd = block_cmd;
+
+ DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n",
+ block_cmd ? "Block" : "Unblock");
+}
+
static enum _ecore_status_t
_ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct ecore_mcp_mb_params *p_mb_params,
ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
+ ecore_mcp_cmd_set_blocking(p_hwfn, true);
ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
return ECORE_AGAIN;
}
return ECORE_INVAL;
}
+ if (p_hwfn->mcp_info->b_block_cmd) {
+ DP_NOTICE(p_hwfn, false,
+ "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
+ p_mb_params->cmd, p_mb_params->param);
+ return ECORE_ABORTED;
+ }
+
return _ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
delay);
}
load_req.drv_ver_0 = p_in_params->drv_ver_0;
load_req.drv_ver_1 = p_in_params->drv_ver_1;
load_req.fw_ver = p_in_params->fw_ver;
- ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_ROLE,
- p_in_params->drv_role);
- ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
- p_in_params->timeout_val);
- ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FORCE,
- p_in_params->force_cmd);
- ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
- p_in_params->avoid_eng_reset);
+ SET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role);
+ SET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
+ p_in_params->timeout_val);
+ SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE, p_in_params->force_cmd);
+ SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
+ p_in_params->avoid_eng_reset);
hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ?
DRV_ID_MCP_HSI_VER_CURRENT :
- (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT);
+ (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_OFFSET);
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
mb_params.param,
- ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
- ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
- ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
- ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
+ GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
+ GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
+ GET_MFW_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
+ GET_MFW_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1)
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
load_req.drv_ver_0, load_req.drv_ver_1,
load_req.fw_ver, load_req.misc0,
- ECORE_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_ROLE),
- ECORE_MFW_GET_FIELD(load_req.misc0,
- LOAD_REQ_LOCK_TO),
- ECORE_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FORCE),
- ECORE_MFW_GET_FIELD(load_req.misc0,
- LOAD_REQ_FLAGS0));
+ GET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE),
+ GET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO),
+ GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE),
+ GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0));
rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc != ECORE_SUCCESS) {
"Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
load_rsp.drv_ver_0, load_rsp.drv_ver_1,
load_rsp.fw_ver, load_rsp.misc0,
- ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
- ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
- ECORE_MFW_GET_FIELD(load_rsp.misc0,
- LOAD_RSP_FLAGS0));
+ GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
+ GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
+ GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0));
p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
p_out_params->exist_fw_ver = load_rsp.fw_ver;
p_out_params->exist_drv_role =
- ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
+ GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
p_out_params->mfw_hsi_ver =
- ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
+ GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
p_out_params->drv_exists =
- ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
+ GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
LOAD_RSP_FLAGS0_DRV_EXISTS;
}
return ECORE_SUCCESS;
}
-static void ecore_get_mfw_drv_role(struct ecore_hwfn *p_hwfn,
- enum ecore_drv_role drv_role,
+static void ecore_get_mfw_drv_role(enum ecore_drv_role drv_role,
u8 *p_mfw_drv_role)
{
switch (drv_role) {
ECORE_LOAD_REQ_FORCE_ALL,
};
-static void ecore_get_mfw_force_cmd(struct ecore_hwfn *p_hwfn,
- enum ecore_load_req_force force_cmd,
+static void ecore_get_mfw_force_cmd(enum ecore_load_req_force force_cmd,
u8 *p_mfw_force_cmd)
{
switch (force_cmd) {
in_params.drv_ver_0 = ECORE_VERSION;
in_params.drv_ver_1 = ecore_get_config_bitmap();
in_params.fw_ver = STORM_FW_VERSION;
- ecore_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
+ ecore_get_mfw_drv_role(p_params->drv_role, &mfw_drv_role);
in_params.drv_role = mfw_drv_role;
in_params.timeout_val = p_params->timeout_val;
- ecore_get_mfw_force_cmd(p_hwfn, ECORE_LOAD_REQ_FORCE_NONE,
- &mfw_force_cmd);
+ ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_NONE, &mfw_force_cmd);
in_params.force_cmd = mfw_force_cmd;
in_params.avoid_eng_reset = p_params->avoid_eng_reset;
out_params.exist_drv_ver_0,
out_params.exist_drv_ver_1);
- ecore_get_mfw_force_cmd(p_hwfn,
- ECORE_LOAD_REQ_FORCE_ALL,
+ ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_ALL,
&mfw_force_cmd);
in_params.force_cmd = mfw_force_cmd;
OFFSETOF(struct public_port,
transceiver_data)));
- transceiver_state = GET_FIELD(transceiver_state, ETH_TRANSCEIVER_STATE);
+ transceiver_state = GET_MFW_FIELD(transceiver_state,
+ ETH_TRANSCEIVER_STATE);
if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
OFFSETOF(struct public_port, eee_status));
p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT);
- val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_SHIFT;
+ val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET;
if (val & EEE_1G_ADV)
p_link->eee_adv_caps |= ECORE_EEE_1G_ADV;
if (val & EEE_10G_ADV)
p_link->eee_adv_caps |= ECORE_EEE_10G_ADV;
- val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_SHIFT;
+ val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET;
if (val & EEE_1G_ADV)
p_link->eee_lp_adv_caps |= ECORE_EEE_1G_ADV;
if (val & EEE_10G_ADV)
if (params->eee.adv_caps & ECORE_EEE_10G_ADV)
phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G;
phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer <<
- EEE_TX_TIMER_USEC_SHIFT) &
+ EEE_TX_TIMER_USEC_OFFSET) &
EEE_TX_TIMER_USEC_MASK;
}
*/
p_info->bandwidth_min = (p_shmem_info->config &
FUNC_MF_CFG_MIN_BW_MASK) >>
- FUNC_MF_CFG_MIN_BW_SHIFT;
+ FUNC_MF_CFG_MIN_BW_OFFSET;
if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
DP_INFO(p_hwfn,
"bandwidth minimum out of bounds [%02x]. Set to 1\n",
p_info->bandwidth_max = (p_shmem_info->config &
FUNC_MF_CFG_MAX_BW_MASK) >>
- FUNC_MF_CFG_MAX_BW_SHIFT;
+ FUNC_MF_CFG_MAX_BW_OFFSET;
if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
DP_INFO(p_hwfn,
"bandwidth maximum out of bounds [%02x]. Set to 100\n",
¶m);
}
-static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
+static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn)
{
/* A single notification should be sent to upper driver in CMT mode */
if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
ecore_mcp_update_bw(p_hwfn, p_ptt);
break;
case MFW_DRV_MSG_FAILURE_DETECTED:
- ecore_mcp_handle_fan_failure(p_hwfn, p_ptt);
+ ecore_mcp_handle_fan_failure(p_hwfn);
break;
case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
ecore_mcp_handle_critical_error(p_hwfn, p_ptt);
return &p_hwfn->mcp_info->func_info;
}
-enum _ecore_status_t ecore_mcp_nvm_command(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct ecore_mcp_nvm_params *params)
-{
- enum _ecore_status_t rc;
-
- switch (params->type) {
- case ECORE_MCP_NVM_RD:
- rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
- params->nvm_common.offset,
- ¶ms->nvm_common.resp,
- ¶ms->nvm_common.param,
- params->nvm_rd.buf_size,
- params->nvm_rd.buf);
- break;
- case ECORE_MCP_CMD:
- rc = ecore_mcp_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
- params->nvm_common.offset,
- ¶ms->nvm_common.resp,
- ¶ms->nvm_common.param);
- break;
- case ECORE_MCP_NVM_WR:
- rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
- params->nvm_common.offset,
- ¶ms->nvm_common.resp,
- ¶ms->nvm_common.param,
- params->nvm_wr.buf_size,
- params->nvm_wr.buf);
- break;
- default:
- rc = ECORE_NOTIMPL;
- break;
- }
- return rc;
-}
-
int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u32 personalities)
{
flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
- MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
- flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
+ MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
+ flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_OFFSET));
*p_flash_size = flash_size;
return ECORE_SUCCESS;
}
-enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u8 vf_id, u8 num)
+static enum _ecore_status_t
+ecore_mcp_config_vf_msix_bb(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 vf_id, u8 num)
{
u32 resp = 0, param = 0, rc_param = 0;
enum _ecore_status_t rc;
return ECORE_SUCCESS;
num *= p_hwfn->p_dev->num_hwfns;
- param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
+ param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_OFFSET) &
DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
- param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
+ param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_OFFSET) &
DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
return rc;
}
+static enum _ecore_status_t
+ecore_mcp_config_vf_msix_ah(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 num)
+{
+ u32 resp = 0, param = num, rc_param = 0;
+ enum _ecore_status_t rc;
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX,
+ param, &resp, &rc_param);
+
+ if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) {
+ DP_NOTICE(p_hwfn, true, "MFW failed to set MSI-X for VFs\n");
+ rc = ECORE_INVAL;
+ } else {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Requested 0x%02x MSI-x interrupts for VFs\n",
+ num);
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 vf_id, u8 num)
+{
+ if (ECORE_IS_BB(p_hwfn->p_dev))
+ return ecore_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
+ else
+ return ecore_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num);
+}
+
enum _ecore_status_t
ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct ecore_mcp_drv_version *p_ver)
return rc;
}
+/* A maximal 100 msec waiting time for the MCP to halt */
+#define ECORE_MCP_HALT_SLEEP_MS 10
+#define ECORE_MCP_HALT_MAX_RETRIES 10
+
enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
+ u32 resp = 0, param = 0, cpu_state, cnt = 0;
enum _ecore_status_t rc;
- u32 resp = 0, param = 0;
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
¶m);
- if (rc != ECORE_SUCCESS)
+ if (rc != ECORE_SUCCESS) {
DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+ return rc;
+ }
- return rc;
+ do {
+ OSAL_MSLEEP(ECORE_MCP_HALT_SLEEP_MS);
+ cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
+ if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
+ break;
+ } while (++cnt < ECORE_MCP_HALT_MAX_RETRIES);
+
+ if (cnt == ECORE_MCP_HALT_MAX_RETRIES) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
+ ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
+ return ECORE_BUSY;
+ }
+
+ ecore_mcp_cmd_set_blocking(p_hwfn, true);
+
+ return ECORE_SUCCESS;
}
+#define ECORE_MCP_RESUME_SLEEP_MS 10
+
enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- u32 value, cpu_mode;
+ u32 cpu_mode, cpu_state;
ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
- value = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
- value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
- ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
+ cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
+ ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
+
+ OSAL_MSLEEP(ECORE_MCP_RESUME_SLEEP_MS);
+ cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
- return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -1 : 0;
+ if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
+ cpu_mode, cpu_state);
+ return ECORE_BUSY;
+ }
+
+ ecore_mcp_cmd_set_blocking(p_hwfn, false);
+
+ return ECORE_SUCCESS;
}
enum _ecore_status_t
{
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
u32 bytes_left, offset, bytes_to_copy, buf_size;
- struct ecore_mcp_nvm_params params;
+ u32 nvm_offset, resp, param;
struct ecore_ptt *p_ptt;
enum _ecore_status_t rc = ECORE_SUCCESS;
if (!p_ptt)
return ECORE_BUSY;
- OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
bytes_left = len;
offset = 0;
- params.type = ECORE_MCP_NVM_RD;
- params.nvm_rd.buf_size = &buf_size;
- params.nvm_common.cmd = DRV_MSG_CODE_NVM_READ_NVRAM;
while (bytes_left > 0) {
bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
MCP_DRV_NVM_BUF_LEN);
- params.nvm_common.offset = (addr + offset) |
- (bytes_to_copy << DRV_MB_PARAM_NVM_LEN_SHIFT);
- params.nvm_rd.buf = (u32 *)(p_buf + offset);
- rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
- if (rc != ECORE_SUCCESS || (params.nvm_common.resp !=
- FW_MSG_CODE_NVM_OK)) {
- DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
+ nvm_offset = (addr + offset) | (bytes_to_copy <<
+ DRV_MB_PARAM_NVM_LEN_OFFSET);
+ rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_NVM_READ_NVRAM,
+ nvm_offset, &resp, ¶m, &buf_size,
+ (u32 *)(p_buf + offset));
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_dev, false,
+ "ecore_mcp_nvm_rd_cmd() failed, rc = %d\n",
+ rc);
+ resp = FW_MSG_CODE_ERROR;
+ break;
+ }
+
+ if (resp != FW_MSG_CODE_NVM_OK) {
+ DP_NOTICE(p_dev, false,
+ "nvm read failed, resp = 0x%08x\n", resp);
+ rc = ECORE_UNKNOWN_ERROR;
break;
}
* isn't preemptible. Sleep a bit to prevent CPU hogging.
*/
if (bytes_left % 0x1000 <
- (bytes_left - *params.nvm_rd.buf_size) % 0x1000)
+ (bytes_left - buf_size) % 0x1000)
OSAL_MSLEEP(1);
- offset += *params.nvm_rd.buf_size;
- bytes_left -= *params.nvm_rd.buf_size;
+ offset += buf_size;
+ bytes_left -= buf_size;
}
- p_dev->mcp_nvm_resp = params.nvm_common.resp;
+ p_dev->mcp_nvm_resp = resp;
ecore_ptt_release(p_hwfn, p_ptt);
return rc;
u32 addr, u8 *p_buf, u32 len)
{
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
- struct ecore_mcp_nvm_params params;
struct ecore_ptt *p_ptt;
+ u32 resp, param;
enum _ecore_status_t rc;
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt)
return ECORE_BUSY;
- OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
- params.type = ECORE_MCP_NVM_RD;
- params.nvm_rd.buf_size = &len;
- params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_READ) ?
- DRV_MSG_CODE_PHY_CORE_READ : DRV_MSG_CODE_PHY_RAW_READ;
- params.nvm_common.offset = addr;
- params.nvm_rd.buf = (u32 *)p_buf;
- rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
+ rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
+ (cmd == ECORE_PHY_CORE_READ) ?
+ DRV_MSG_CODE_PHY_CORE_READ :
+ DRV_MSG_CODE_PHY_RAW_READ,
+ addr, &resp, ¶m, &len, (u32 *)p_buf);
if (rc != ECORE_SUCCESS)
DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
- p_dev->mcp_nvm_resp = params.nvm_common.resp;
+ p_dev->mcp_nvm_resp = resp;
ecore_ptt_release(p_hwfn, p_ptt);
return rc;
enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
{
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
- struct ecore_mcp_nvm_params params;
struct ecore_ptt *p_ptt;
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt)
return ECORE_BUSY;
- OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp));
ecore_ptt_release(p_hwfn, p_ptt);
enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr)
{
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
- struct ecore_mcp_nvm_params params;
struct ecore_ptt *p_ptt;
+ u32 resp, param;
enum _ecore_status_t rc;
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt)
return ECORE_BUSY;
- OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
- params.type = ECORE_MCP_CMD;
- params.nvm_common.cmd = DRV_MSG_CODE_NVM_DEL_FILE;
- params.nvm_common.offset = addr;
- rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
- p_dev->mcp_nvm_resp = params.nvm_common.resp;
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_DEL_FILE, addr,
+ &resp, ¶m);
+ p_dev->mcp_nvm_resp = resp;
ecore_ptt_release(p_hwfn, p_ptt);
return rc;
u32 addr)
{
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
- struct ecore_mcp_nvm_params params;
struct ecore_ptt *p_ptt;
+ u32 resp, param;
enum _ecore_status_t rc;
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt)
return ECORE_BUSY;
- OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
- params.type = ECORE_MCP_CMD;
- params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN;
- params.nvm_common.offset = addr;
- rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
- p_dev->mcp_nvm_resp = params.nvm_common.resp;
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, addr,
+ &resp, ¶m);
+ p_dev->mcp_nvm_resp = resp;
ecore_ptt_release(p_hwfn, p_ptt);
return rc;
enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
u32 addr, u8 *p_buf, u32 len)
{
+ u32 buf_idx, buf_size, nvm_cmd, nvm_offset, resp, param;
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
enum _ecore_status_t rc = ECORE_INVAL;
- struct ecore_mcp_nvm_params params;
struct ecore_ptt *p_ptt;
- u32 buf_idx, buf_size;
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt)
return ECORE_BUSY;
- OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
- params.type = ECORE_MCP_NVM_WR;
- if (cmd == ECORE_PUT_FILE_DATA)
- params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
- else
- params.nvm_common.cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
+ switch (cmd) {
+ case ECORE_PUT_FILE_DATA:
+ nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
+ break;
+ case ECORE_NVM_WRITE_NVRAM:
+ nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
+ break;
+ case ECORE_EXT_PHY_FW_UPGRADE:
+ nvm_cmd = DRV_MSG_CODE_EXT_PHY_FW_UPGRADE;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true, "Invalid nvm write command 0x%x\n",
+ cmd);
+ rc = ECORE_INVAL;
+ goto out;
+ }
+
buf_idx = 0;
while (buf_idx < len) {
buf_size = OSAL_MIN_T(u32, (len - buf_idx),
MCP_DRV_NVM_BUF_LEN);
- params.nvm_common.offset = ((buf_size <<
- DRV_MB_PARAM_NVM_LEN_SHIFT)
- | addr) + buf_idx;
- params.nvm_wr.buf_size = buf_size;
- params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
- rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
- if (rc != ECORE_SUCCESS ||
- ((params.nvm_common.resp != FW_MSG_CODE_NVM_OK) &&
- (params.nvm_common.resp !=
- FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK)))
- DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
+ nvm_offset = ((buf_size << DRV_MB_PARAM_NVM_LEN_OFFSET) |
+ addr) +
+ buf_idx;
+ rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset,
+ &resp, ¶m, buf_size,
+ (u32 *)&p_buf[buf_idx]);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_dev, false,
+ "ecore_mcp_nvm_write() failed, rc = %d\n",
+ rc);
+ resp = FW_MSG_CODE_ERROR;
+ break;
+ }
+
+ if (resp != FW_MSG_CODE_OK &&
+ resp != FW_MSG_CODE_NVM_OK &&
+ resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK) {
+ DP_NOTICE(p_dev, false,
+ "nvm write failed, resp = 0x%08x\n", resp);
+ rc = ECORE_UNKNOWN_ERROR;
+ break;
+ }
/* This can be a lengthy process, and it's possible scheduler
* isn't preemptible. Sleep a bit to prevent CPU hogging.
buf_idx += buf_size;
}
- p_dev->mcp_nvm_resp = params.nvm_common.resp;
+ p_dev->mcp_nvm_resp = resp;
+out:
ecore_ptt_release(p_hwfn, p_ptt);
return rc;
u32 addr, u8 *p_buf, u32 len)
{
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
- struct ecore_mcp_nvm_params params;
struct ecore_ptt *p_ptt;
+ u32 resp, param, nvm_cmd;
enum _ecore_status_t rc;
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt)
return ECORE_BUSY;
- OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
- params.type = ECORE_MCP_NVM_WR;
- params.nvm_wr.buf_size = len;
- params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_WRITE) ?
- DRV_MSG_CODE_PHY_CORE_WRITE : DRV_MSG_CODE_PHY_RAW_WRITE;
- params.nvm_common.offset = addr;
- params.nvm_wr.buf = (u32 *)p_buf;
- rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
+ nvm_cmd = (cmd == ECORE_PHY_CORE_WRITE) ? DRV_MSG_CODE_PHY_CORE_WRITE :
+ DRV_MSG_CODE_PHY_RAW_WRITE;
+ rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, addr,
+ &resp, ¶m, len, (u32 *)p_buf);
if (rc != ECORE_SUCCESS)
DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
- p_dev->mcp_nvm_resp = params.nvm_common.resp;
+ p_dev->mcp_nvm_resp = resp;
ecore_ptt_release(p_hwfn, p_ptt);
return rc;
u32 addr)
{
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
- struct ecore_mcp_nvm_params params;
struct ecore_ptt *p_ptt;
+ u32 resp, param;
enum _ecore_status_t rc;
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt)
return ECORE_BUSY;
- OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
- params.type = ECORE_MCP_CMD;
- params.nvm_common.cmd = DRV_MSG_CODE_SET_SECURE_MODE;
- params.nvm_common.offset = addr;
- rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
- p_dev->mcp_nvm_resp = params.nvm_common.resp;
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_SECURE_MODE, addr,
+ &resp, ¶m);
+ p_dev->mcp_nvm_resp = resp;
ecore_ptt_release(p_hwfn, p_ptt);
return rc;
u32 port, u32 addr, u32 offset,
u32 len, u8 *p_buf)
{
- struct ecore_mcp_nvm_params params;
+ u32 bytes_left, bytes_to_copy, buf_size, nvm_offset;
+ u32 resp, param;
enum _ecore_status_t rc;
- u32 bytes_left, bytes_to_copy, buf_size;
- OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
- params.nvm_common.offset =
- (port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) |
- (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT);
+ nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) |
+ (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET);
addr = offset;
offset = 0;
bytes_left = len;
- params.type = ECORE_MCP_NVM_RD;
- params.nvm_rd.buf_size = &buf_size;
- params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_READ;
while (bytes_left > 0) {
bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
MAX_I2C_TRANSACTION_SIZE);
- params.nvm_rd.buf = (u32 *)(p_buf + offset);
- params.nvm_common.offset &=
- (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
- DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
- params.nvm_common.offset |=
- ((addr + offset) <<
- DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT);
- params.nvm_common.offset |=
- (bytes_to_copy << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT);
- rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
- if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
+ nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
+ DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
+ nvm_offset |= ((addr + offset) <<
+ DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET);
+ nvm_offset |= (bytes_to_copy <<
+ DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET);
+ rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_TRANSCEIVER_READ,
+ nvm_offset, &resp, ¶m, &buf_size,
+ (u32 *)(p_buf + offset));
+ if ((resp & FW_MSG_CODE_MASK) ==
FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
return ECORE_NODEV;
- } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
+ } else if ((resp & FW_MSG_CODE_MASK) !=
FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
return ECORE_UNKNOWN_ERROR;
- offset += *params.nvm_rd.buf_size;
- bytes_left -= *params.nvm_rd.buf_size;
+ offset += buf_size;
+ bytes_left -= buf_size;
}
return ECORE_SUCCESS;
u32 port, u32 addr, u32 offset,
u32 len, u8 *p_buf)
{
- struct ecore_mcp_nvm_params params;
+ u32 buf_idx, buf_size, nvm_offset, resp, param;
enum _ecore_status_t rc;
- u32 buf_idx, buf_size;
-
- OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
- params.nvm_common.offset =
- (port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) |
- (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT);
- params.type = ECORE_MCP_NVM_WR;
- params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_WRITE;
+
+ nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) |
+ (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET);
buf_idx = 0;
while (buf_idx < len) {
buf_size = OSAL_MIN_T(u32, (len - buf_idx),
MAX_I2C_TRANSACTION_SIZE);
- params.nvm_common.offset &=
- (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
- DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
- params.nvm_common.offset |=
- ((offset + buf_idx) <<
- DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT);
- params.nvm_common.offset |=
- (buf_size << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT);
- params.nvm_wr.buf_size = buf_size;
- params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
- rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
- if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
+ nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
+ DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
+ nvm_offset |= ((offset + buf_idx) <<
+ DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET);
+ nvm_offset |= (buf_size <<
+ DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET);
+ rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_TRANSCEIVER_WRITE,
+ nvm_offset, &resp, ¶m, buf_size,
+ (u32 *)&p_buf[buf_idx]);
+ if ((resp & FW_MSG_CODE_MASK) ==
FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
return ECORE_NODEV;
- } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
+ } else if ((resp & FW_MSG_CODE_MASK) !=
FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
return ECORE_UNKNOWN_ERROR;
enum _ecore_status_t rc = ECORE_SUCCESS;
u32 drv_mb_param = 0, rsp;
- drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT);
+ drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET);
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ,
drv_mb_param, &rsp, gpio_val);
enum _ecore_status_t rc = ECORE_SUCCESS;
u32 drv_mb_param = 0, param, rsp;
- drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT) |
- (gpio_val << DRV_MB_PARAM_GPIO_VALUE_SHIFT);
+ drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET) |
+ (gpio_val << DRV_MB_PARAM_GPIO_VALUE_OFFSET);
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE,
drv_mb_param, &rsp, ¶m);
u32 drv_mb_param = 0, rsp, val = 0;
enum _ecore_status_t rc = ECORE_SUCCESS;
- drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT;
+ drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET;
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO,
drv_mb_param, &rsp, &val);
return rc;
*gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >>
- DRV_MB_PARAM_GPIO_DIRECTION_SHIFT;
+ DRV_MB_PARAM_GPIO_DIRECTION_OFFSET;
*gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >>
- DRV_MB_PARAM_GPIO_CTRL_SHIFT;
+ DRV_MB_PARAM_GPIO_CTRL_OFFSET;
if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
return ECORE_UNKNOWN_ERROR;
enum _ecore_status_t rc = ECORE_SUCCESS;
drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
- DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
+ DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
drv_mb_param, &rsp, ¶m);
enum _ecore_status_t rc = ECORE_SUCCESS;
drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
- DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
+ DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
drv_mb_param, &rsp, ¶m);
enum _ecore_status_t rc = ECORE_SUCCESS;
drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
- DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
+ DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
drv_mb_param, &rsp, num_images);
struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct bist_nvm_image_att *p_image_att, u32 image_index)
{
- struct ecore_mcp_nvm_params params;
+ u32 buf_size, nvm_offset, resp, param;
enum _ecore_status_t rc;
- u32 buf_size;
- OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
- params.nvm_common.offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
- DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
- params.nvm_common.offset |= (image_index <<
- DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT);
-
- params.type = ECORE_MCP_NVM_RD;
- params.nvm_rd.buf_size = &buf_size;
- params.nvm_common.cmd = DRV_MSG_CODE_BIST_TEST;
- params.nvm_rd.buf = (u32 *)p_image_att;
-
- rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
+ nvm_offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
+ DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
+ nvm_offset |= (image_index <<
+ DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_OFFSET);
+ rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
+ nvm_offset, &resp, ¶m, &buf_size,
+ (u32 *)p_image_att);
if (rc != ECORE_SUCCESS)
return rc;
- if (((params.nvm_common.resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
+ if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
(p_image_att->return_code != 1))
rc = ECORE_UNKNOWN_ERROR;
val = mfw_temp_info.sensor[i];
p_temp_sensor = &p_temp_info->sensors[i];
p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >>
- SENSOR_LOCATION_SHIFT;
+ SENSOR_LOCATION_OFFSET;
p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >>
- THRESHOLD_HIGH_SHIFT;
+ THRESHOLD_HIGH_OFFSET;
p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >>
- CRITICAL_TEMPERATURE_SHIFT;
+ CRITICAL_TEMPERATURE_OFFSET;
p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >>
- CURRENT_TEMP_SHIFT;
+ CURRENT_TEMP_OFFSET;
}
return ECORE_SUCCESS;
struct ecore_ptt *p_ptt,
struct ecore_mba_vers *p_mba_vers)
{
- struct ecore_mcp_nvm_params params;
+ u32 buf_size, resp, param;
enum _ecore_status_t rc;
- u32 buf_size;
- OSAL_MEM_ZERO(¶ms, sizeof(params));
- params.type = ECORE_MCP_NVM_RD;
- params.nvm_common.cmd = DRV_MSG_CODE_GET_MBA_VERSION;
- params.nvm_common.offset = 0;
- params.nvm_rd.buf = &p_mba_vers->mba_vers[0];
- params.nvm_rd.buf_size = &buf_size;
- rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
+ rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MBA_VERSION,
+ 0, &resp, ¶m, &buf_size,
+ &p_mba_vers->mba_vers[0]);
if (rc != ECORE_SUCCESS)
return rc;
- if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
- FW_MSG_CODE_NVM_OK)
+ if ((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
rc = ECORE_UNKNOWN_ERROR;
if (buf_size != MCP_DRV_NVM_BUF_LEN)
#define ECORE_RESC_ALLOC_VERSION_MINOR 0
#define ECORE_RESC_ALLOC_VERSION \
((ECORE_RESC_ALLOC_VERSION_MAJOR << \
- DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
+ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_OFFSET) | \
(ECORE_RESC_ALLOC_VERSION_MINOR << \
- DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
+ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_OFFSET))
struct ecore_resc_alloc_in_params {
u32 cmd;
"Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
p_in_params->cmd, p_in_params->res_id,
ecore_hw_get_resc_name(p_in_params->res_id),
- ECORE_MFW_GET_FIELD(mb_params.param,
- DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
- ECORE_MFW_GET_FIELD(mb_params.param,
- DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
+ GET_MFW_FIELD(mb_params.param,
+ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
+ GET_MFW_FIELD(mb_params.param,
+ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
p_in_params->resc_max_val);
rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
- ECORE_MFW_GET_FIELD(p_out_params->mcp_param,
- FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
- ECORE_MFW_GET_FIELD(p_out_params->mcp_param,
- FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
+ GET_MFW_FIELD(p_out_params->mcp_param,
+ FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
+ GET_MFW_FIELD(p_out_params->mcp_param,
+ FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
p_out_params->resc_num, p_out_params->resc_start,
p_out_params->vf_resc_num, p_out_params->vf_resc_start,
p_out_params->flags);
}
if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
- u8 opcode = ECORE_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
+ u8 opcode = GET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
DP_NOTICE(p_hwfn, false,
"The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
break;
}
- ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
- ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
- ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
+ SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
+ SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
+ SET_MFW_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
return rc;
/* Analyze the response */
- p_params->owner = ECORE_MFW_GET_FIELD(mcp_param,
- RESOURCE_CMD_RSP_OWNER);
- opcode = ECORE_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
+ p_params->owner = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER);
+ opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
return ECORE_SUCCESS;
}
+void ecore_mcp_resc_lock_default_init(struct ecore_resc_lock_params *p_lock,
+ struct ecore_resc_unlock_params *p_unlock,
+ enum ecore_resc_lock resource,
+ bool b_is_permanent)
+{
+ if (p_lock != OSAL_NULL) {
+ OSAL_MEM_ZERO(p_lock, sizeof(*p_lock));
+
+ /* Permanent resources don't require aging, and there's no
+ * point in trying to acquire them more than once since it's
+ * unexpected another entity would release them.
+ */
+ if (b_is_permanent) {
+ p_lock->timeout = ECORE_MCP_RESC_LOCK_TO_NONE;
+ } else {
+ p_lock->retry_num = ECORE_MCP_RESC_LOCK_RETRY_CNT_DFLT;
+ p_lock->retry_interval =
+ ECORE_MCP_RESC_LOCK_RETRY_VAL_DFLT;
+ p_lock->sleep_b4_retry = true;
+ }
+
+ p_lock->resource = resource;
+ }
+
+ if (p_unlock != OSAL_NULL) {
+ OSAL_MEM_ZERO(p_unlock, sizeof(*p_unlock));
+ p_unlock->resource = resource;
+ }
+}
+
enum _ecore_status_t
ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct ecore_resc_unlock_params *p_params)
opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
: RESOURCE_OPCODE_RELEASE;
- ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
- ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
+ SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
+ SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
return rc;
/* Analyze the response */
- opcode = ECORE_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
+ opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"Resource unlock response: mcp_param 0x%08x [opcode %d]\n",