#include "ecore_sp_commands.h"
#include "ecore_cxt.h"
-#define CHIP_MCP_RESP_ITER_US 10
-#define EMUL_MCP_RESP_ITER_US (1000 * 1000)
+#define GRCBASE_MCP 0xe00000
+#define ECORE_MCP_RESP_ITER_US 10
#define ECORE_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
#define ECORE_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
+#ifndef ASIC_ONLY
+/* Non-ASIC:
+ * The waiting interval is multiplied by 100 to reduce the impact of the
+ * built-in delay of 100usec in each ecore_rd().
+ * In addition, a factor of 4 comparing to ASIC is applied.
+ */
+#define ECORE_EMUL_MCP_RESP_ITER_US (ECORE_MCP_RESP_ITER_US * 100)
+#define ECORE_EMUL_DRV_MB_MAX_RETRIES ((ECORE_DRV_MB_MAX_RETRIES / 100) * 4)
+#define ECORE_EMUL_MCP_RESET_RETRIES ((ECORE_MCP_RESET_RETRIES / 100) * 4)
+#endif
+
#define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
_val)
struct ecore_ptt *p_ptt)
{
struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
+ u32 drv_mb_offsize, mfw_mb_offsize, val;
u8 cnt = ECORE_MCP_SHMEM_RDY_MAX_RETRIES;
u8 msec = ECORE_MCP_SHMEM_RDY_ITER_MS;
- u32 drv_mb_offsize, mfw_mb_offsize;
u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
+ val = ecore_rd(p_hwfn, p_ptt, MCP_REG_CACHE_PAGING_ENABLE);
+ p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
+ if (!p_info->public_base) {
+ DP_NOTICE(p_hwfn, false,
+ "The address of the MCP scratch-pad is not configured\n");
#ifndef ASIC_ONLY
- if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
- DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n");
- p_info->public_base = 0;
- return ECORE_INVAL;
- }
+ /* Zeroed "public_base" implies no MFW */
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+ DP_INFO(p_hwfn, "Emulation: Assume no MFW\n");
#endif
-
- p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
- if (!p_info->public_base)
return ECORE_INVAL;
+ }
p_info->public_base |= GRCBASE_MCP;
if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
- /* Do not free mcp_info here, since public_base indicate that
+ /* Do not free mcp_info here, since "public_base" indicates that
* the MCP is not initialized
*/
return ECORE_SUCCESS;
enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0;
+ u32 prev_generic_por_0, seq, delay = ECORE_MCP_RESP_ITER_US, cnt = 0;
+ u32 retries = ECORE_MCP_RESET_RETRIES;
enum _ecore_status_t rc = ECORE_SUCCESS;
#ifndef ASIC_ONLY
- if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
- delay = EMUL_MCP_RESP_ITER_US;
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+ delay = ECORE_EMUL_MCP_RESP_ITER_US;
+ retries = ECORE_EMUL_MCP_RESET_RETRIES;
+ }
#endif
-
if (p_hwfn->mcp_info->b_block_cmd) {
DP_NOTICE(p_hwfn, false,
"The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
/* Ensure that only a single thread is accessing the mailbox */
OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
- org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
+ prev_generic_por_0 = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
/* Set drv command along with the updated sequence */
ecore_mcp_reread_offsets(p_hwfn, p_ptt);
seq = ++p_hwfn->mcp_info->drv_mb_seq;
DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
+ /* Give the MFW up to 500 second (50*1000*10usec) to resume */
do {
- /* Wait for MFW response */
OSAL_UDELAY(delay);
- /* Give the FW up to 500 second (50*1000*10usec) */
- } while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt,
- MISCS_REG_GENERIC_POR_0)) &&
- (cnt++ < ECORE_MCP_RESET_RETRIES));
- if (org_mcp_reset_seq !=
- ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
+ if (ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0) !=
+ prev_generic_por_0)
+ break;
+ } while (cnt++ < retries);
+
+ if (ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0) !=
+ prev_generic_por_0) {
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"MCP was reset after %d usec\n", cnt * delay);
} else {
return rc;
}
+#ifndef ASIC_ONLY
+static void ecore_emul_mcp_load_req(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_mb_params *p_mb_params)
+{
+ if (GET_MFW_FIELD(p_mb_params->param, DRV_ID_MCP_HSI_VER) !=
+ 1 /* ECORE_LOAD_REQ_HSI_VER_1 */) {
+ p_mb_params->mcp_resp = FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1;
+ return;
+ }
+
+ if (!loaded)
+ p_mb_params->mcp_resp = FW_MSG_CODE_DRV_LOAD_ENGINE;
+ else if (!loaded_port[p_hwfn->port_id])
+ p_mb_params->mcp_resp = FW_MSG_CODE_DRV_LOAD_PORT;
+ else
+ p_mb_params->mcp_resp = FW_MSG_CODE_DRV_LOAD_FUNCTION;
+
+ /* On CMT, always tell that it's engine */
+ if (ECORE_IS_CMT(p_hwfn->p_dev))
+ p_mb_params->mcp_resp = FW_MSG_CODE_DRV_LOAD_ENGINE;
+
+ loaded++;
+ loaded_port[p_hwfn->port_id]++;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Load phase: 0x%08x load cnt: 0x%x port id=%d port_load=%d\n",
+ p_mb_params->mcp_resp, loaded, p_hwfn->port_id,
+ loaded_port[p_hwfn->port_id]);
+}
+
+static void ecore_emul_mcp_unload_req(struct ecore_hwfn *p_hwfn)
+{
+ loaded--;
+ loaded_port[p_hwfn->port_id]--;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n", loaded);
+}
+
+static enum _ecore_status_t
+ecore_emul_mcp_cmd(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_mb_params *p_mb_params)
+{
+ if (!CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+ return ECORE_INVAL;
+
+ switch (p_mb_params->cmd) {
+ case DRV_MSG_CODE_LOAD_REQ:
+ ecore_emul_mcp_load_req(p_hwfn, p_mb_params);
+ break;
+ case DRV_MSG_CODE_UNLOAD_REQ:
+ ecore_emul_mcp_unload_req(p_hwfn);
+ break;
+ case DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT:
+ case DRV_MSG_CODE_RESOURCE_CMD:
+ case DRV_MSG_CODE_MDUMP_CMD:
+ case DRV_MSG_CODE_GET_ENGINE_CONFIG:
+ case DRV_MSG_CODE_GET_PPFID_BITMAP:
+ return ECORE_NOTIMPL;
+ default:
+ break;
+ }
+
+ return ECORE_SUCCESS;
+}
+#endif
+
/* Must be called while cmd_lock is acquired */
static bool ecore_mcp_has_pending_cmd(struct ecore_hwfn *p_hwfn)
{
struct ecore_ptt *p_ptt)
{
u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
+ u32 delay = ECORE_MCP_RESP_ITER_US;
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+ delay = ECORE_EMUL_MCP_RESP_ITER_US;
+#endif
cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
cpu_pc_0 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
- OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
+ OSAL_UDELAY(delay);
cpu_pc_1 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
- OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
+ OSAL_UDELAY(delay);
cpu_pc_2 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
DP_NOTICE(p_hwfn, false,
{
osal_size_t union_data_size = sizeof(union drv_union_data);
u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
- u32 delay = CHIP_MCP_RESP_ITER_US;
+ u32 usecs = ECORE_MCP_RESP_ITER_US;
#ifndef ASIC_ONLY
- if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
- delay = EMUL_MCP_RESP_ITER_US;
- /* There is a built-in delay of 100usec in each MFW response read */
- if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
- max_retries /= 10;
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && !ecore_mcp_is_init(p_hwfn))
+ return ecore_emul_mcp_cmd(p_hwfn, p_mb_params);
+
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+ max_retries = ECORE_EMUL_DRV_MB_MAX_RETRIES;
+ usecs = ECORE_EMUL_MCP_RESP_ITER_US;
+ }
#endif
+ if (ECORE_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) {
+ max_retries = DIV_ROUND_UP(max_retries, 1000);
+ usecs *= 1000;
+ }
/* MCP not initialized */
if (!ecore_mcp_is_init(p_hwfn)) {
}
return _ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
- delay);
+ usecs);
}
enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
struct ecore_mcp_mb_params mb_params;
enum _ecore_status_t rc;
-#ifndef ASIC_ONLY
- if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
- if (cmd == DRV_MSG_CODE_UNLOAD_REQ) {
- loaded--;
- loaded_port[p_hwfn->port_id]--;
- DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n",
- loaded);
- }
- return ECORE_SUCCESS;
- }
-#endif
-
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
mb_params.cmd = cmd;
mb_params.param = param;
return ECORE_SUCCESS;
}
-#ifndef ASIC_ONLY
-static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
- u32 *p_load_code)
-{
- static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
-
- if (!loaded)
- load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
- else if (!loaded_port[p_hwfn->port_id])
- load_phase = FW_MSG_CODE_DRV_LOAD_PORT;
- else
- load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
-
- /* On CMT, always tell that it's engine */
- if (ECORE_IS_CMT(p_hwfn->p_dev))
- load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
-
- *p_load_code = load_phase;
- loaded++;
- loaded_port[p_hwfn->port_id]++;
-
- DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
- "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n",
- *p_load_code, loaded, p_hwfn->port_id,
- loaded_port[p_hwfn->port_id]);
-}
-#endif
-
static bool
ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role,
enum ecore_override_force_load override_force_load)
u8 mfw_drv_role = 0, mfw_force_cmd;
enum _ecore_status_t rc;
-#ifndef ASIC_ONLY
- if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
- ecore_mcp_mf_workaround(p_hwfn, &p_params->load_code);
- return ECORE_SUCCESS;
- }
-#endif
-
OSAL_MEM_ZERO(&in_params, sizeof(in_params));
in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT;
in_params.drv_ver_0 = ECORE_VERSION;
u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
u32 path_addr = SECTION_ADDR(mfw_path_offsize,
ECORE_PATH_ID(p_hwfn));
- u32 disabled_vfs[VF_MAX_STATIC / 32];
+ u32 disabled_vfs[EXT_VF_BITMAP_SIZE_IN_DWORDS];
int i;
+ OSAL_MEM_ZERO(disabled_vfs, EXT_VF_BITMAP_SIZE_IN_BYTES);
+
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"Reading Disabled VF information from [offset %08x],"
" path_addr %08x\n",
mfw_path_offsize, path_addr);
- for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
+ for (i = 0; i < VF_BITMAP_SIZE_IN_DWORDS; i++) {
disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,
path_addr +
OFFSETOF(struct public_path,
struct ecore_ptt *p_ptt,
u32 *vfs_to_ack)
{
- u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
- PUBLIC_FUNC);
- u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);
- u32 func_addr = SECTION_ADDR(mfw_func_offsize,
- MCP_PF_ID(p_hwfn));
struct ecore_mcp_mb_params mb_params;
enum _ecore_status_t rc;
- int i;
+ u16 i;
- for (i = 0; i < (VF_MAX_STATIC / 32); i++)
+ for (i = 0; i < VF_BITMAP_SIZE_IN_DWORDS; i++)
DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
"Acking VFs [%08x,...,%08x] - %08x\n",
i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
mb_params.p_data_src = vfs_to_ack;
- mb_params.data_src_size = VF_MAX_STATIC / 8;
+ mb_params.data_src_size = (u8)VF_BITMAP_SIZE_IN_BYTES;
rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt,
&mb_params);
if (rc != ECORE_SUCCESS) {
return ECORE_TIMEOUT;
}
- /* TMP - clear the ACK bits; should be done by MFW */
- for (i = 0; i < (VF_MAX_STATIC / 32); i++)
- ecore_wr(p_hwfn, p_ptt,
- func_addr +
- OFFSETOF(struct public_func, drv_ack_vf_disabled) +
- i * sizeof(u32), 0);
-
return rc;
}
u32 cmd;
#ifndef ASIC_ONLY
- if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+ if (b_up)
+ OSAL_LINK_UPDATE(p_hwfn);
return ECORE_SUCCESS;
+ }
#endif
/* Set the shmem configuration according to params */
p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
FUNC_MF_CFG_OV_STAG_MASK;
p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
- if (OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS, &p_hwfn->p_dev->mf_bits)) {
+ if (OSAL_GET_BIT(ECORE_MF_OVLAN_CLSS, &p_hwfn->p_dev->mf_bits)) {
if (p_hwfn->hw_info.ovlan != ECORE_MCP_VLAN_UNSET) {
ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE,
p_hwfn->hw_info.ovlan);
struct mdump_config_stc mdump_config;
enum _ecore_status_t rc;
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && !ecore_mcp_is_init(p_hwfn)) {
+ DP_INFO(p_hwfn, "Emulation: Can't get mdump info\n");
+ return ECORE_NOTIMPL;
+ }
+#endif
+
OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info));
addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
struct public_func shmem_info;
u32 port_cfg, val;
- if (!OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits))
+ if (!OSAL_GET_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits))
return;
OSAL_MEMSET(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info));
/* update storm FW with negotiation results */
ecore_sp_pf_update_ufp(p_hwfn);
+ /* update stag pcp value */
+ ecore_sp_pf_update_stag(p_hwfn);
+
return ECORE_SUCCESS;
}
u32 global_offsize;
#ifndef ASIC_ONLY
- if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
- DP_NOTICE(p_hwfn, false, "Emulation - can't get MFW version\n");
- return ECORE_SUCCESS;
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && !ecore_mcp_is_init(p_hwfn)) {
+ DP_INFO(p_hwfn, "Emulation: Can't get MFW version\n");
+ return ECORE_NOTIMPL;
}
#endif
return ECORE_SUCCESS;
}
+int ecore_mcp_get_mbi_ver(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 *p_mbi_ver)
+{
+ u32 nvm_cfg_addr, nvm_cfg1_offset, mbi_ver_addr;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && !ecore_mcp_is_init(p_hwfn)) {
+ DP_INFO(p_hwfn, "Emulation: Can't get MBI version\n");
+ return -EOPNOTSUPP;
+ }
+#endif
+
+ if (IS_VF(p_hwfn->p_dev))
+ return -EINVAL;
+
+ /* Read the address of the nvm_cfg */
+ nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
+ if (!nvm_cfg_addr) {
+ DP_NOTICE(p_hwfn, false, "Shared memory not initialized\n");
+ return -EINVAL;
+ }
+
+ /* Read the offset of nvm_cfg1 */
+ nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
+
+ mbi_ver_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, glob) + offsetof(struct nvm_cfg1_glob,
+ mbi_version);
+ *p_mbi_ver =
+ ecore_rd(p_hwfn, p_ptt,
+ mbi_ver_addr) & (NVM_CFG1_GLOB_MBI_VERSION_0_MASK |
+ NVM_CFG1_GLOB_MBI_VERSION_1_MASK |
+ NVM_CFG1_GLOB_MBI_VERSION_2_MASK);
+
+ return 0;
+}
+
enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 *p_media_type)
{
- enum _ecore_status_t rc = ECORE_SUCCESS;
+ *p_media_type = MEDIA_UNSPECIFIED;
/* TODO - Add support for VFs */
if (IS_VF(p_hwfn->p_dev))
return ECORE_INVAL;
if (!ecore_mcp_is_init(p_hwfn)) {
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+ DP_INFO(p_hwfn, "Emulation: Can't get media type\n");
+ return ECORE_NOTIMPL;
+ }
+#endif
DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
return ECORE_BUSY;
}
- if (!p_ptt) {
- *p_media_type = MEDIA_UNSPECIFIED;
- rc = ECORE_INVAL;
- } else {
- *p_media_type = ecore_rd(p_hwfn, p_ptt,
- p_hwfn->mcp_info->port_addr +
- OFFSETOF(struct public_port,
- media_type));
- }
+ if (!p_ptt)
+ return ECORE_INVAL;
+
+ *p_media_type = ecore_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->port_addr +
+ OFFSETOF(struct public_port, media_type));
return ECORE_SUCCESS;
}
struct ecore_ptt *p_ptt,
u32 *p_speed_mask)
{
- u32 transceiver_type, transceiver_state;
+ u32 transceiver_type = ETH_TRANSCEIVER_TYPE_NONE, transceiver_state;
ecore_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state,
&transceiver_type);
u32 flash_size;
#ifndef ASIC_ONLY
- if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
- DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n");
- return ECORE_INVAL;
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && !ecore_mcp_is_init(p_hwfn)) {
+ DP_INFO(p_hwfn, "Emulation: Can't get flash size\n");
+ return ECORE_NOTIMPL;
}
#endif
struct ecore_ptt *p_ptt,
u8 vf_id, u8 num)
{
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && !ecore_mcp_is_init(p_hwfn)) {
+ DP_INFO(p_hwfn,
+ "Emulation: Avoid sending the %s mailbox command\n",
+ ECORE_IS_BB(p_hwfn->p_dev) ? "CFG_VF_MSIX" :
+ "CFG_PF_VFS_MSIX");
+ return ECORE_SUCCESS;
+ }
+#endif
+
if (ECORE_IS_BB(p_hwfn->p_dev))
return ecore_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
else
enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
u32 addr, u8 *p_buf, u32 len)
{
- u32 buf_idx, buf_size, nvm_cmd, nvm_offset, resp, param;
+ u32 buf_idx, buf_size, nvm_cmd, nvm_offset;
+ u32 resp = FW_MSG_CODE_ERROR, param;
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
enum _ecore_status_t rc = ECORE_INVAL;
struct ecore_ptt *p_ptt;
{
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
struct ecore_ptt *p_ptt;
- u32 resp, param;
+ u32 resp = 0, param;
enum _ecore_status_t rc;
p_ptt = ecore_ptt_acquire(p_hwfn);
u16 gpio, u32 *gpio_val)
{
enum _ecore_status_t rc = ECORE_SUCCESS;
- u32 drv_mb_param = 0, rsp;
+ u32 drv_mb_param = 0, rsp = 0;
drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET);
u16 gpio, u16 gpio_val)
{
enum _ecore_status_t rc = ECORE_SUCCESS;
- u32 drv_mb_param = 0, param, rsp;
+ u32 drv_mb_param = 0, param, rsp = 0;
drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET) |
(gpio_val << DRV_MB_PARAM_GPIO_VALUE_OFFSET);
enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images(
struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *num_images)
{
- u32 drv_mb_param = 0, rsp;
+ u32 drv_mb_param = 0, rsp = 0;
enum _ecore_status_t rc = ECORE_SUCCESS;
drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
return rc;
}
+enum _ecore_status_t
+ecore_mcp_bist_nvm_get_num_images(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 *num_images)
+{
+ u32 drv_mb_param = 0, rsp;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ SET_MFW_FIELD(drv_mb_param, DRV_MB_PARAM_BIST_TEST_INDEX,
+ DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES);
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
+ drv_mb_param, &rsp, num_images);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if (rsp == FW_MSG_CODE_UNSUPPORTED)
+ rc = ECORE_NOTIMPL;
+ else if (rsp != FW_MSG_CODE_OK)
+ rc = ECORE_UNKNOWN_ERROR;
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_mcp_bist_nvm_get_image_att(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct bist_nvm_image_att *p_image_att,
+ u32 image_index)
+{
+ u32 buf_size, nvm_offset = 0, resp, param;
+ enum _ecore_status_t rc;
+
+ SET_MFW_FIELD(nvm_offset, DRV_MB_PARAM_BIST_TEST_INDEX,
+ DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX);
+ SET_MFW_FIELD(nvm_offset, DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX,
+ image_index);
+ rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
+ nvm_offset, &resp, ¶m, &buf_size,
+ (u32 *)p_image_att);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if (resp == FW_MSG_CODE_UNSUPPORTED)
+ rc = ECORE_NOTIMPL;
+ else if ((resp != FW_MSG_CODE_OK) || (p_image_att->return_code != 1))
+ rc = ECORE_UNKNOWN_ERROR;
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_nvm_info_populate(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_nvm_image_info nvm_info;
+ struct ecore_ptt *p_ptt;
+ enum _ecore_status_t rc;
+ u32 i;
+
+ if (p_hwfn->nvm_info.valid)
+ return ECORE_SUCCESS;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) ||
+ CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev))
+ return ECORE_SUCCESS;
+#endif
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "failed to acquire ptt\n");
+ return ECORE_BUSY;
+ }
+
+ /* Acquire from MFW the amount of available images */
+ OSAL_MEM_ZERO(&nvm_info, sizeof(nvm_info));
+ rc = ecore_mcp_bist_nvm_get_num_images(p_hwfn, p_ptt,
+ &nvm_info.num_images);
+ if (rc == ECORE_NOTIMPL) {
+ DP_INFO(p_hwfn, "DRV_MSG_CODE_BIST_TEST is not supported\n");
+ goto out;
+ } else if ((rc != ECORE_SUCCESS) || (nvm_info.num_images == 0)) {
+ DP_ERR(p_hwfn, "Failed getting number of images\n");
+ goto err0;
+ }
+
+ nvm_info.image_att = OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ nvm_info.num_images *
+ sizeof(struct bist_nvm_image_att));
+ if (!nvm_info.image_att) {
+ rc = ECORE_NOMEM;
+ goto err0;
+ }
+
+ /* Iterate over images and get their attributes */
+ for (i = 0; i < nvm_info.num_images; i++) {
+ rc = ecore_mcp_bist_nvm_get_image_att(p_hwfn, p_ptt,
+ &nvm_info.image_att[i],
+ i);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn,
+ "Failed getting image index %d attributes\n",
+ i);
+ goto err1;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "image index %d, size %x\n", i,
+ nvm_info.image_att[i].len);
+ }
+out:
+ /* Update hwfn's nvm_info */
+ if (nvm_info.num_images) {
+ p_hwfn->nvm_info.num_images = nvm_info.num_images;
+ if (p_hwfn->nvm_info.image_att)
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->nvm_info.image_att);
+ p_hwfn->nvm_info.image_att = nvm_info.image_att;
+ p_hwfn->nvm_info.valid = true;
+ }
+
+ ecore_ptt_release(p_hwfn, p_ptt);
+ return ECORE_SUCCESS;
+
+err1:
+ OSAL_FREE(p_hwfn->p_dev, nvm_info.image_att);
+err0:
+ ecore_ptt_release(p_hwfn, p_ptt);
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_mcp_get_nvm_image_att(struct ecore_hwfn *p_hwfn,
+ enum ecore_nvm_images image_id,
+ struct ecore_nvm_image_att *p_image_att)
+{
+ enum nvm_image_type type;
+ u32 i;
+
+ /* Translate image_id into MFW definitions */
+ switch (image_id) {
+ case ECORE_NVM_IMAGE_ISCSI_CFG:
+ type = NVM_TYPE_ISCSI_CFG;
+ break;
+ case ECORE_NVM_IMAGE_FCOE_CFG:
+ type = NVM_TYPE_FCOE_CFG;
+ break;
+ case ECORE_NVM_IMAGE_MDUMP:
+ type = NVM_TYPE_MDUMP;
+ break;
+ case ECORE_NVM_IMAGE_NVM_CFG1:
+ type = NVM_TYPE_NVM_CFG1;
+ break;
+ case ECORE_NVM_IMAGE_DEFAULT_CFG:
+ type = NVM_TYPE_DEFAULT_CFG;
+ break;
+ case ECORE_NVM_IMAGE_NVM_META:
+ type = NVM_TYPE_META;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, false, "Unknown request of image_id %08x\n",
+ image_id);
+ return ECORE_INVAL;
+ }
+
+ ecore_mcp_nvm_info_populate(p_hwfn);
+ for (i = 0; i < p_hwfn->nvm_info.num_images; i++) {
+ if (type == p_hwfn->nvm_info.image_att[i].image_type)
+ break;
+ }
+ if (i == p_hwfn->nvm_info.num_images) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "Failed to find nvram image of type %08x\n",
+ image_id);
+ return ECORE_NOENT;
+ }
+
+ p_image_att->start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr;
+ p_image_att->length = p_hwfn->nvm_info.image_att[i].len;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_get_nvm_image(struct ecore_hwfn *p_hwfn,
+ enum ecore_nvm_images image_id,
+ u8 *p_buffer, u32 buffer_len)
+{
+ struct ecore_nvm_image_att image_att;
+ enum _ecore_status_t rc;
+
+ OSAL_MEM_ZERO(p_buffer, buffer_len);
+
+ rc = ecore_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Validate sizes - both the image's and the supplied buffer's */
+ if (image_att.length <= 4) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "Image [%d] is too small - only %d bytes\n",
+ image_id, image_att.length);
+ return ECORE_INVAL;
+ }
+
+ if (image_att.length > buffer_len) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "Image [%d] is too big - %08x bytes where only %08x are available\n",
+ image_id, image_att.length, buffer_len);
+ return ECORE_NOMEM;
+ }
+
+ return ecore_mcp_nvm_read(p_hwfn->p_dev, image_att.start_addr,
+ (u8 *)p_buffer, image_att.length);
+}
+
enum _ecore_status_t
ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
__ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct ecore_resc_lock_params *p_params)
{
- u32 param = 0, mcp_resp, mcp_param;
+ u32 param = 0, mcp_resp = 0, mcp_param = 0;
u8 opcode;
enum _ecore_status_t rc;