X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fqede%2Fbase%2Fecore_mcp.c;h=6c65606888e1dcf1db8dab84d2cd5f3a4869d177;hb=f9204d8a23c3312e2939170e6d70c7b5ef7e42d9;hp=db44aa35d9deeb526c8c9866531f11df5285711a;hpb=22c996968bf721082734709d0bab4316e5538aaf;p=dpdk.git diff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c index db44aa35d9..6c65606888 100644 --- a/drivers/net/qede/base/ecore_mcp.c +++ b/drivers/net/qede/base/ecore_mcp.c @@ -1,14 +1,13 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #include "bcm_osal.h" #include "ecore.h" #include "ecore_status.h" +#include "nvm_cfg.h" #include "ecore_mcp.h" #include "mcp_public.h" #include "reg_addr.h" @@ -20,6 +19,8 @@ #include "ecore_gtt_reg_addr.h" #include "ecore_iro.h" #include "ecore_dcbx.h" +#include "ecore_sp_commands.h" +#include "ecore_cxt.h" #define CHIP_MCP_RESP_ITER_US 10 #define EMUL_MCP_RESP_ITER_US (1000 * 1000) @@ -43,9 +44,9 @@ OFFSETOF(struct public_drv_mb, _field)) #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \ - DRV_ID_PDA_COMP_VER_SHIFT) + DRV_ID_PDA_COMP_VER_OFFSET) -#define MCP_BYTES_PER_MBIT_SHIFT 17 +#define MCP_BYTES_PER_MBIT_OFFSET 17 #ifndef ASIC_ONLY static int loaded; @@ -154,6 +155,9 @@ enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn) if (p_hwfn->mcp_info) { struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL, *p_tmp; + OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur); + OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow); + OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock); OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_cmd_elem, p_tmp, &p_hwfn->mcp_info->cmd_list, list, @@ -162,8 +166,6 @@ enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn) } OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); - OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur); - OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow); #ifdef CONFIG_ECORE_LOCK_ALLOC OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->cmd_lock); OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->link_lock); @@ -175,10 +177,16 @@ enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn) return ECORE_SUCCESS; } +/* Maximum of 1 sec to wait for the SHMEM ready indication */ +#define ECORE_MCP_SHMEM_RDY_MAX_RETRIES 20 +#define ECORE_MCP_SHMEM_RDY_ITER_MS 50 + static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { struct ecore_mcp_info *p_info = p_hwfn->mcp_info; + u8 cnt = ECORE_MCP_SHMEM_RDY_MAX_RETRIES; + u8 msec = ECORE_MCP_SHMEM_RDY_ITER_MS; u32 drv_mb_offsize, mfw_mb_offsize; u32 mcp_pf_id = MCP_PF_ID(p_hwfn); @@ -196,6 +204,35 @@ static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn, p_info->public_base |= GRCBASE_MCP; + /* Get the MFW MB address and number of supported messages */ + mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, + SECTION_OFFSIZE_ADDR(p_info->public_base, + PUBLIC_MFW_MB)); + p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id); + p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt, + p_info->mfw_mb_addr); + + /* @@@TBD: + * The driver can notify that there was an MCP reset, and read the SHMEM + * values before the MFW has completed initializing them. + * As a temporary solution, the "sup_msgs" field is used as a data ready + * indication. + * This should be replaced with an actual indication when it is provided + * by the MFW. + */ + while (!p_info->mfw_mb_length && cnt--) { + OSAL_MSLEEP(msec); + p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt, + p_info->mfw_mb_addr); + } + + if (!cnt) { + DP_NOTICE(p_hwfn, false, + "Failed to get the SHMEM ready notification after %d msec\n", + ECORE_MCP_SHMEM_RDY_MAX_RETRIES * msec); + return ECORE_TIMEOUT; + } + /* Calculate the driver and MFW mailbox address */ drv_mb_offsize = ecore_rd(p_hwfn, p_ptt, SECTION_OFFSIZE_ADDR(p_info->public_base, @@ -206,14 +243,6 @@ static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn, " mcp_pf_id = 0x%x\n", drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id); - /* Set the MFW MB address */ - mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, - SECTION_OFFSIZE_ADDR(p_info->public_base, - PUBLIC_MFW_MB)); - p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id); - p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt, - p_info->mfw_mb_addr); - /* Get the current driver mailbox sequence before sending * the first command */ @@ -237,11 +266,30 @@ enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn, /* Allocate mcp_info structure */ p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, - sizeof(*p_hwfn->mcp_info)); - if (!p_hwfn->mcp_info) - goto err; + sizeof(*p_hwfn->mcp_info)); + if (!p_hwfn->mcp_info) { + DP_NOTICE(p_hwfn, false, "Failed to allocate mcp_info\n"); + return ECORE_NOMEM; + } p_info = p_hwfn->mcp_info; + /* Initialize the MFW spinlocks */ +#ifdef CONFIG_ECORE_LOCK_ALLOC + if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock)) { + OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info); + return ECORE_NOMEM; + } + if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock)) { + OSAL_SPIN_LOCK_DEALLOC(&p_info->cmd_lock); + OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info); + return ECORE_NOMEM; + } +#endif + OSAL_SPIN_LOCK_INIT(&p_info->cmd_lock); + OSAL_SPIN_LOCK_INIT(&p_info->link_lock); + + OSAL_LIST_INIT(&p_info->cmd_list); + if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) { DP_NOTICE(p_hwfn, false, "MCP is not initialized\n"); /* Do not free mcp_info here, since public_base indicate that @@ -256,20 +304,10 @@ enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn, if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr) goto err; - /* Initialize the MFW spinlocks */ -#ifdef CONFIG_ECORE_LOCK_ALLOC - OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock); - OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock); -#endif - OSAL_SPIN_LOCK_INIT(&p_info->cmd_lock); - OSAL_SPIN_LOCK_INIT(&p_info->link_lock); - - OSAL_LIST_INIT(&p_info->cmd_list); - return ECORE_SUCCESS; err: - DP_NOTICE(p_hwfn, true, "Failed to allocate mcp memory\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate mcp memory\n"); ecore_mcp_free(p_hwfn); return ECORE_NOMEM; } @@ -303,6 +341,12 @@ enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn, delay = EMUL_MCP_RESP_ITER_US; #endif + if (p_hwfn->mcp_info->b_block_cmd) { + DP_NOTICE(p_hwfn, false, + "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n"); + return ECORE_ABORTED; + } + /* Ensure that only a single thread is accessing the mailbox */ OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock); @@ -430,6 +474,33 @@ static void __ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, (p_mb_params->cmd | seq_num), p_mb_params->param); } +static void ecore_mcp_cmd_set_blocking(struct ecore_hwfn *p_hwfn, + bool block_cmd) +{ + p_hwfn->mcp_info->b_block_cmd = block_cmd; + + DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n", + block_cmd ? "Block" : "Unblock"); +} + +void ecore_mcp_print_cpu_info(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2; + + cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); + cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); + cpu_pc_0 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); + OSAL_UDELAY(CHIP_MCP_RESP_ITER_US); + cpu_pc_1 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); + OSAL_UDELAY(CHIP_MCP_RESP_ITER_US); + cpu_pc_2 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); + + DP_NOTICE(p_hwfn, false, + "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n", + cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2); +} + static enum _ecore_status_t _ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct ecore_mcp_mb_params *p_mb_params, @@ -460,6 +531,7 @@ _ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); OSAL_UDELAY(delay); + OSAL_MFW_CMD_PREEMPT(p_hwfn); } while (++cnt < max_retries); if (cnt >= max_retries) { @@ -501,17 +573,20 @@ _ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, goto err; OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); + OSAL_MFW_CMD_PREEMPT(p_hwfn); } while (++cnt < max_retries); if (cnt >= max_retries) { DP_NOTICE(p_hwfn, false, "The MFW failed to respond to command 0x%08x [param 0x%08x].\n", p_mb_params->cmd, p_mb_params->param); + ecore_mcp_print_cpu_info(p_hwfn, p_ptt); OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock); ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); + ecore_mcp_cmd_set_blocking(p_hwfn, true); ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL); return ECORE_AGAIN; } @@ -553,7 +628,7 @@ ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, /* MCP not initialized */ if (!ecore_mcp_is_init(p_hwfn)) { - DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n"); + DP_NOTICE(p_hwfn, true, "MFW is not initialized!\n"); return ECORE_BUSY; } @@ -566,6 +641,13 @@ ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, return ECORE_INVAL; } + if (p_hwfn->mcp_info->b_block_cmd) { + DP_NOTICE(p_hwfn, false, + "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n", + p_mb_params->cmd, p_mb_params->param); + return ECORE_ABORTED; + } + return _ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries, delay); } @@ -676,7 +758,7 @@ static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn, load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION; /* On CMT, always tell that it's engine */ - if (p_hwfn->p_dev->num_hwfns > 1) + if (ECORE_IS_CMT(p_hwfn->p_dev)) load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE; *p_load_code = load_phase; @@ -804,18 +886,16 @@ __ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, load_req.drv_ver_0 = p_in_params->drv_ver_0; load_req.drv_ver_1 = p_in_params->drv_ver_1; load_req.fw_ver = p_in_params->fw_ver; - ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_ROLE, - p_in_params->drv_role); - ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO, - p_in_params->timeout_val); - ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FORCE, - p_in_params->force_cmd); - ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0, - p_in_params->avoid_eng_reset); + SET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role); + SET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO, + p_in_params->timeout_val); + SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE, p_in_params->force_cmd); + SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0, + p_in_params->avoid_eng_reset); hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ? DRV_ID_MCP_HSI_VER_CURRENT : - (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT); + (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_OFFSET); OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); mb_params.cmd = DRV_MSG_CODE_LOAD_REQ; @@ -828,22 +908,20 @@ __ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n", mb_params.param, - ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW), - ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE), - ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER), - ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER)); + GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW), + GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_TYPE), + GET_MFW_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER), + GET_MFW_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER)); if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1) DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n", load_req.drv_ver_0, load_req.drv_ver_1, load_req.fw_ver, load_req.misc0, - ECORE_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_ROLE), - ECORE_MFW_GET_FIELD(load_req.misc0, - LOAD_REQ_LOCK_TO), - ECORE_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FORCE), - ECORE_MFW_GET_FIELD(load_req.misc0, - LOAD_REQ_FLAGS0)); + GET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE), + GET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO), + GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE), + GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0)); rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc != ECORE_SUCCESS) { @@ -862,28 +940,26 @@ __ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n", load_rsp.drv_ver_0, load_rsp.drv_ver_1, load_rsp.fw_ver, load_rsp.misc0, - ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE), - ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI), - ECORE_MFW_GET_FIELD(load_rsp.misc0, - LOAD_RSP_FLAGS0)); + GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE), + GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI), + GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0)); p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0; p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1; p_out_params->exist_fw_ver = load_rsp.fw_ver; p_out_params->exist_drv_role = - ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE); + GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE); p_out_params->mfw_hsi_ver = - ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI); + GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI); p_out_params->drv_exists = - ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) & + GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) & LOAD_RSP_FLAGS0_DRV_EXISTS; } return ECORE_SUCCESS; } -static void ecore_get_mfw_drv_role(struct ecore_hwfn *p_hwfn, - enum ecore_drv_role drv_role, +static void ecore_get_mfw_drv_role(enum ecore_drv_role drv_role, u8 *p_mfw_drv_role) { switch (drv_role) { @@ -902,8 +978,7 @@ enum ecore_load_req_force { ECORE_LOAD_REQ_FORCE_ALL, }; -static void ecore_get_mfw_force_cmd(struct ecore_hwfn *p_hwfn, - enum ecore_load_req_force force_cmd, +static void ecore_get_mfw_force_cmd(enum ecore_load_req_force force_cmd, u8 *p_mfw_force_cmd) { switch (force_cmd) { @@ -940,11 +1015,10 @@ enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, in_params.drv_ver_0 = ECORE_VERSION; in_params.drv_ver_1 = ecore_get_config_bitmap(); in_params.fw_ver = STORM_FW_VERSION; - ecore_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role); + ecore_get_mfw_drv_role(p_params->drv_role, &mfw_drv_role); in_params.drv_role = mfw_drv_role; in_params.timeout_val = p_params->timeout_val; - ecore_get_mfw_force_cmd(p_hwfn, ECORE_LOAD_REQ_FORCE_NONE, - &mfw_force_cmd); + ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_NONE, &mfw_force_cmd); in_params.force_cmd = mfw_force_cmd; in_params.avoid_eng_reset = p_params->avoid_eng_reset; @@ -981,8 +1055,7 @@ enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, out_params.exist_drv_ver_0, out_params.exist_drv_ver_1); - ecore_get_mfw_force_cmd(p_hwfn, - ECORE_LOAD_REQ_FORCE_ALL, + ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_ALL, &mfw_force_cmd); in_params.force_cmd = mfw_force_cmd; @@ -1052,8 +1125,6 @@ enum _ecore_status_t ecore_mcp_load_done(struct ecore_hwfn *p_hwfn, return rc; } -#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR (1 << 0) - /* Check if there is a DID mismatch between nvm-cfg/efuse */ if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR) DP_NOTICE(p_hwfn, false, @@ -1174,12 +1245,15 @@ static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn, OFFSETOF(struct public_port, transceiver_data))); - transceiver_state = GET_FIELD(transceiver_state, ETH_TRANSCEIVER_STATE); + transceiver_state = GET_MFW_FIELD(transceiver_state, + ETH_TRANSCEIVER_STATE); if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT) DP_NOTICE(p_hwfn, false, "Transceiver is present.\n"); else DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n"); + + OSAL_TRANSCEIVER_UPDATE(p_hwfn); } static void ecore_mcp_read_eee_config(struct ecore_hwfn *p_hwfn, @@ -1193,18 +1267,40 @@ static void ecore_mcp_read_eee_config(struct ecore_hwfn *p_hwfn, eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + OFFSETOF(struct public_port, eee_status)); p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT); - val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_SHIFT; + val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET; if (val & EEE_1G_ADV) p_link->eee_adv_caps |= ECORE_EEE_1G_ADV; if (val & EEE_10G_ADV) p_link->eee_adv_caps |= ECORE_EEE_10G_ADV; - val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_SHIFT; + val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET; if (val & EEE_1G_ADV) p_link->eee_lp_adv_caps |= ECORE_EEE_1G_ADV; if (val & EEE_10G_ADV) p_link->eee_lp_adv_caps |= ECORE_EEE_10G_ADV; } +static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct public_func *p_data, + int pfid) +{ + u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, + PUBLIC_FUNC); + u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr); + u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid); + u32 i, size; + + OSAL_MEM_ZERO(p_data, sizeof(*p_data)); + + size = OSAL_MIN_T(u32, sizeof(*p_data), + SECTION_SIZE(mfw_path_offsize)); + for (i = 0; i < size / sizeof(u32); i++) + ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt, + func_addr + (i << 2)); + + return size; +} + static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, bool b_reset) @@ -1234,10 +1330,24 @@ static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn, goto out; } - if (p_hwfn->b_drv_link_init) - p_link->link_up = !!(status & LINK_STATUS_LINK_UP); - else + if (p_hwfn->b_drv_link_init) { + /* Link indication with modern MFW arrives as per-PF + * indication. + */ + if (p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_VLINK) { + struct public_func shmem_info; + + ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, + MCP_PF_ID(p_hwfn)); + p_link->link_up = !!(shmem_info.status & + FUNC_STATUS_VIRTUAL_LINK_UP); + } else { + p_link->link_up = !!(status & LINK_STATUS_LINK_UP); + } + } else { p_link->link_up = false; + } p_link->full_duplex = true; switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) { @@ -1284,7 +1394,7 @@ static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn, __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw); - /* Mintz bandwidth configuration */ + /* Min bandwidth configuration */ __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw); ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev, p_ptt, @@ -1345,7 +1455,7 @@ static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn, if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) ecore_mcp_read_eee_config(p_hwfn, p_ptt, p_link); - OSAL_LINK_UPDATE(p_hwfn, p_ptt); + OSAL_LINK_UPDATE(p_hwfn); out: OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->link_lock); } @@ -1391,7 +1501,7 @@ enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn, if (params->eee.adv_caps & ECORE_EEE_10G_ADV) phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G; phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer << - EEE_TX_TIMER_USEC_SHIFT) & + EEE_TX_TIMER_USEC_OFFSET) & EEE_TX_TIMER_USEC_MASK; } @@ -1425,7 +1535,7 @@ enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn, */ ecore_mcp_handle_link_change(p_hwfn, p_ptt, !b_up); - return rc; + return ECORE_SUCCESS; } u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn, @@ -1500,7 +1610,8 @@ static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn, hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN; break; default: - DP_INFO(p_hwfn, "Invalid protocol type %d\n", type); + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "Invalid protocol type %d\n", type); return; } @@ -1531,7 +1642,7 @@ static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn, */ p_info->bandwidth_min = (p_shmem_info->config & FUNC_MF_CFG_MIN_BW_MASK) >> - FUNC_MF_CFG_MIN_BW_SHIFT; + FUNC_MF_CFG_MIN_BW_OFFSET; if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) { DP_INFO(p_hwfn, "bandwidth minimum out of bounds [%02x]. Set to 1\n", @@ -1541,7 +1652,7 @@ static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn, p_info->bandwidth_max = (p_shmem_info->config & FUNC_MF_CFG_MAX_BW_MASK) >> - FUNC_MF_CFG_MAX_BW_SHIFT; + FUNC_MF_CFG_MAX_BW_OFFSET; if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) { DP_INFO(p_hwfn, "bandwidth maximum out of bounds [%02x]. Set to 100\n", @@ -1550,28 +1661,6 @@ static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn, } } -static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn, - struct ecore_ptt *p_ptt, - struct public_func *p_data, - int pfid) -{ - u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, - PUBLIC_FUNC); - u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr); - u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid); - u32 i, size; - - OSAL_MEM_ZERO(p_data, sizeof(*p_data)); - - size = OSAL_MIN_T(u32, sizeof(*p_data), - SECTION_SIZE(mfw_path_offsize)); - for (i = 0; i < size / sizeof(u32); i++) - ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt, - func_addr + (i << 2)); - - return size; -} - static void ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { @@ -1594,8 +1683,50 @@ ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) ¶m); } -static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn, - struct ecore_ptt *p_ptt) +static void ecore_mcp_update_stag(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + struct public_func shmem_info; + u32 resp = 0, param = 0; + + ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, + MCP_PF_ID(p_hwfn)); + + p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag & + FUNC_MF_CFG_OV_STAG_MASK; + p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan; + if (OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS, &p_hwfn->p_dev->mf_bits)) { + if (p_hwfn->hw_info.ovlan != ECORE_MCP_VLAN_UNSET) { + ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, + p_hwfn->hw_info.ovlan); + ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 1); + + /* Configure DB to add external vlan to EDPM packets */ + ecore_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1); + ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, + p_hwfn->hw_info.ovlan); + } else { + ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 0); + ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, 0); + + /* Configure DB to add external vlan to EDPM packets */ + ecore_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 0); + ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, 0); + } + + ecore_sp_pf_update_stag(p_hwfn); + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "ovlan = %d hw_mode = 0x%x\n", + p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode); + OSAL_HW_INFO_CHANGE(p_hwfn, ECORE_HW_INFO_CHANGE_OVLAN); + + /* Acknowledge the MFW */ + ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0, + &resp, ¶m); +} + +static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn) { /* A single notification should be sent to upper driver in CMT mode */ if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev)) @@ -1845,6 +1976,74 @@ static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn, ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN); } +void +ecore_mcp_read_ufp_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) +{ + struct public_func shmem_info; + u32 port_cfg, val; + + if (!OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits)) + return; + + OSAL_MEMSET(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info)); + port_cfg = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + + OFFSETOF(struct public_port, oem_cfg_port)); + val = GET_MFW_FIELD(port_cfg, OEM_CFG_CHANNEL_TYPE); + if (val != OEM_CFG_CHANNEL_TYPE_STAGGED) + DP_NOTICE(p_hwfn, false, "Incorrect UFP Channel type %d\n", + val); + + val = GET_MFW_FIELD(port_cfg, OEM_CFG_SCHED_TYPE); + if (val == OEM_CFG_SCHED_TYPE_ETS) + p_hwfn->ufp_info.mode = ECORE_UFP_MODE_ETS; + else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW) + p_hwfn->ufp_info.mode = ECORE_UFP_MODE_VNIC_BW; + else + DP_NOTICE(p_hwfn, false, "Unknown UFP scheduling mode %d\n", + val); + + ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, + MCP_PF_ID(p_hwfn)); + val = GET_MFW_FIELD(shmem_info.oem_cfg_func, OEM_CFG_FUNC_TC); + p_hwfn->ufp_info.tc = (u8)val; + val = GET_MFW_FIELD(shmem_info.oem_cfg_func, + OEM_CFG_FUNC_HOST_PRI_CTRL); + if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC) + p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_VNIC; + else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS) + p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_OS; + else + DP_NOTICE(p_hwfn, false, "Unknown Host priority control %d\n", + val); + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "UFP shmem config: mode = %d tc = %d pri_type = %d\n", + p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc, + p_hwfn->ufp_info.pri_type); +} + +static enum _ecore_status_t +ecore_mcp_handle_ufp_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) +{ + ecore_mcp_read_ufp_config(p_hwfn, p_ptt); + + if (p_hwfn->ufp_info.mode == ECORE_UFP_MODE_VNIC_BW) { + p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc; + p_hwfn->hw_info.offload_tc = p_hwfn->ufp_info.tc; + + ecore_qm_reconf(p_hwfn, p_ptt); + } else { + /* Merge UFP TC with the dcbx TC data */ + ecore_dcbx_mib_update_event(p_hwfn, p_ptt, + ECORE_DCBX_OPERATIONAL_MIB); + } + + /* update storm FW with negotiation results */ + ecore_sp_pf_update_ufp(p_hwfn); + + return ECORE_SUCCESS; +} + enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { @@ -1887,6 +2086,15 @@ enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn, case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED: ecore_dcbx_mib_update_event(p_hwfn, p_ptt, ECORE_DCBX_OPERATIONAL_MIB); + /* clear the user-config cache */ + OSAL_MEMSET(&p_hwfn->p_dcbx_info->set, 0, + sizeof(struct ecore_dcbx_set)); + break; + case MFW_DRV_MSG_LLDP_RECEIVED_TLVS_UPDATED: + ecore_lldp_mib_update_event(p_hwfn, p_ptt); + break; + case MFW_DRV_MSG_OEM_CFG_UPDATE: + ecore_mcp_handle_ufp_event(p_hwfn, p_ptt); break; case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE: ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt); @@ -1903,8 +2111,11 @@ enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn, case MFW_DRV_MSG_BW_UPDATE: ecore_mcp_update_bw(p_hwfn, p_ptt); break; + case MFW_DRV_MSG_S_TAG_UPDATE: + ecore_mcp_update_stag(p_hwfn, p_ptt); + break; case MFW_DRV_MSG_FAILURE_DETECTED: - ecore_mcp_handle_fan_failure(p_hwfn, p_ptt); + ecore_mcp_handle_fan_failure(p_hwfn); break; case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED: ecore_mcp_handle_critical_error(p_hwfn, p_ptt); @@ -1991,19 +2202,20 @@ enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *p_media_type) { + enum _ecore_status_t rc = ECORE_SUCCESS; /* TODO - Add support for VFs */ if (IS_VF(p_hwfn->p_dev)) return ECORE_INVAL; if (!ecore_mcp_is_init(p_hwfn)) { - DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n"); + DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n"); return ECORE_BUSY; } if (!p_ptt) { *p_media_type = MEDIA_UNSPECIFIED; - return ECORE_INVAL; + rc = ECORE_INVAL; } else { *p_media_type = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + @@ -2014,6 +2226,204 @@ enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn, return ECORE_SUCCESS; } +enum _ecore_status_t ecore_mcp_get_transceiver_data(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *p_transceiver_state, + u32 *p_transceiver_type) +{ + u32 transceiver_info; + enum _ecore_status_t rc = ECORE_SUCCESS; + + /* TODO - Add support for VFs */ + if (IS_VF(p_hwfn->p_dev)) + return ECORE_INVAL; + + if (!ecore_mcp_is_init(p_hwfn)) { + DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n"); + return ECORE_BUSY; + } + + *p_transceiver_type = ETH_TRANSCEIVER_TYPE_NONE; + *p_transceiver_state = ETH_TRANSCEIVER_STATE_UPDATING; + + transceiver_info = ecore_rd(p_hwfn, p_ptt, + p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, + transceiver_data)); + + *p_transceiver_state = GET_MFW_FIELD(transceiver_info, + ETH_TRANSCEIVER_STATE); + + if (*p_transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT) { + *p_transceiver_type = GET_MFW_FIELD(transceiver_info, + ETH_TRANSCEIVER_TYPE); + } else { + *p_transceiver_type = ETH_TRANSCEIVER_TYPE_UNKNOWN; + } + + return rc; +} + +static int is_transceiver_ready(u32 transceiver_state, u32 transceiver_type) +{ + if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) && + ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) == 0x0) && + (transceiver_type != ETH_TRANSCEIVER_TYPE_NONE)) + return 1; + + return 0; +} + +enum _ecore_status_t ecore_mcp_trans_speed_mask(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *p_speed_mask) +{ + u32 transceiver_type, transceiver_state; + + ecore_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state, + &transceiver_type); + + + if (is_transceiver_ready(transceiver_state, transceiver_type) == 0) + return ECORE_INVAL; + + switch (transceiver_type) { + case ETH_TRANSCEIVER_TYPE_1G_LX: + case ETH_TRANSCEIVER_TYPE_1G_SX: + case ETH_TRANSCEIVER_TYPE_1G_PCC: + case ETH_TRANSCEIVER_TYPE_1G_ACC: + case ETH_TRANSCEIVER_TYPE_1000BASET: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + break; + + case ETH_TRANSCEIVER_TYPE_10G_SR: + case ETH_TRANSCEIVER_TYPE_10G_LR: + case ETH_TRANSCEIVER_TYPE_10G_LRM: + case ETH_TRANSCEIVER_TYPE_10G_ER: + case ETH_TRANSCEIVER_TYPE_10G_PCC: + case ETH_TRANSCEIVER_TYPE_10G_ACC: + case ETH_TRANSCEIVER_TYPE_4x10G: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; + break; + + case ETH_TRANSCEIVER_TYPE_40G_LR4: + case ETH_TRANSCEIVER_TYPE_40G_SR4: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; + break; + + case ETH_TRANSCEIVER_TYPE_100G_AOC: + case ETH_TRANSCEIVER_TYPE_100G_SR4: + case ETH_TRANSCEIVER_TYPE_100G_LR4: + case ETH_TRANSCEIVER_TYPE_100G_ER4: + case ETH_TRANSCEIVER_TYPE_100G_ACC: + *p_speed_mask = + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; + break; + + case ETH_TRANSCEIVER_TYPE_25G_SR: + case ETH_TRANSCEIVER_TYPE_25G_LR: + case ETH_TRANSCEIVER_TYPE_25G_AOC: + case ETH_TRANSCEIVER_TYPE_25G_ACC_S: + case ETH_TRANSCEIVER_TYPE_25G_ACC_M: + case ETH_TRANSCEIVER_TYPE_25G_ACC_L: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; + break; + + case ETH_TRANSCEIVER_TYPE_25G_CA_N: + case ETH_TRANSCEIVER_TYPE_25G_CA_S: + case ETH_TRANSCEIVER_TYPE_25G_CA_L: + case ETH_TRANSCEIVER_TYPE_4x25G_CR: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + break; + + case ETH_TRANSCEIVER_TYPE_40G_CR4: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + break; + + case ETH_TRANSCEIVER_TYPE_100G_CR4: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR: + *p_speed_mask = + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + break; + + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC: + *p_speed_mask = + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; + break; + + case ETH_TRANSCEIVER_TYPE_XLPPI: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; + break; + + case ETH_TRANSCEIVER_TYPE_10G_BASET: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + break; + + default: + DP_INFO(p_hwfn, "Unknown transcevier type 0x%x\n", + transceiver_type); + *p_speed_mask = 0xff; + break; + } + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_mcp_get_board_config(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *p_board_config) +{ + u32 nvm_cfg_addr, nvm_cfg1_offset, port_cfg_addr; + enum _ecore_status_t rc = ECORE_SUCCESS; + + /* TODO - Add support for VFs */ + if (IS_VF(p_hwfn->p_dev)) + return ECORE_INVAL; + + if (!ecore_mcp_is_init(p_hwfn)) { + DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n"); + return ECORE_BUSY; + } + if (!p_ptt) { + *p_board_config = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED; + rc = ECORE_INVAL; + } else { + nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, + MISC_REG_GEN_PURP_CR0); + nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, + nvm_cfg_addr + 4); + port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + + offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); + *p_board_config = ecore_rd(p_hwfn, p_ptt, + port_cfg_addr + + offsetof(struct nvm_cfg1_port, + board_cfg)); + } + + return rc; +} + /* @DPDK */ /* Old MFW has a global configuration for all PFs regarding RDMA support */ static void @@ -2181,42 +2591,6 @@ const struct ecore_mcp_function_info return &p_hwfn->mcp_info->func_info; } -enum _ecore_status_t ecore_mcp_nvm_command(struct ecore_hwfn *p_hwfn, - struct ecore_ptt *p_ptt, - struct ecore_mcp_nvm_params *params) -{ - enum _ecore_status_t rc; - - switch (params->type) { - case ECORE_MCP_NVM_RD: - rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, params->nvm_common.cmd, - params->nvm_common.offset, - ¶ms->nvm_common.resp, - ¶ms->nvm_common.param, - params->nvm_rd.buf_size, - params->nvm_rd.buf); - break; - case ECORE_MCP_CMD: - rc = ecore_mcp_cmd(p_hwfn, p_ptt, params->nvm_common.cmd, - params->nvm_common.offset, - ¶ms->nvm_common.resp, - ¶ms->nvm_common.param); - break; - case ECORE_MCP_NVM_WR: - rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, params->nvm_common.cmd, - params->nvm_common.offset, - ¶ms->nvm_common.resp, - ¶ms->nvm_common.param, - params->nvm_wr.buf_size, - params->nvm_wr.buf); - break; - default: - rc = ECORE_NOTIMPL; - break; - } - return rc; -} - int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 personalities) { @@ -2262,8 +2636,8 @@ enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn, flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4); flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >> - MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT; - flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT)); + MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT; + flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_OFFSET)); *p_flash_size = flash_size; @@ -2288,9 +2662,10 @@ enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn, return ECORE_SUCCESS; } -enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn, - struct ecore_ptt *p_ptt, - u8 vf_id, u8 num) +static enum _ecore_status_t +ecore_mcp_config_vf_msix_bb(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u8 vf_id, u8 num) { u32 resp = 0, param = 0, rc_param = 0; enum _ecore_status_t rc; @@ -2301,9 +2676,9 @@ enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn, return ECORE_SUCCESS; num *= p_hwfn->p_dev->num_hwfns; - param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) & + param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_OFFSET) & DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK; - param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) & + param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_OFFSET) & DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK; rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param, @@ -2322,6 +2697,39 @@ enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn, return rc; } +static enum _ecore_status_t +ecore_mcp_config_vf_msix_ah(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u8 num) +{ + u32 resp = 0, param = num, rc_param = 0; + enum _ecore_status_t rc; + + rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX, + param, &resp, &rc_param); + + if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) { + DP_NOTICE(p_hwfn, true, "MFW failed to set MSI-X for VFs\n"); + rc = ECORE_INVAL; + } else { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "Requested 0x%02x MSI-x interrupts for VFs\n", + num); + } + + return rc; +} + +enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u8 vf_id, u8 num) +{ + if (ECORE_IS_BB(p_hwfn->p_dev)) + return ecore_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num); + else + return ecore_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num); +} + enum _ecore_status_t ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct ecore_mcp_drv_version *p_ver) @@ -2359,33 +2767,68 @@ ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, return rc; } +/* A maximal 100 msec waiting time for the MCP to halt */ +#define ECORE_MCP_HALT_SLEEP_MS 10 +#define ECORE_MCP_HALT_MAX_RETRIES 10 + enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { + u32 resp = 0, param = 0, cpu_state, cnt = 0; enum _ecore_status_t rc; - u32 resp = 0, param = 0; rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp, ¶m); - if (rc != ECORE_SUCCESS) + if (rc != ECORE_SUCCESS) { DP_ERR(p_hwfn, "MCP response failure, aborting\n"); + return rc; + } - return rc; + do { + OSAL_MSLEEP(ECORE_MCP_HALT_SLEEP_MS); + cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); + if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) + break; + } while (++cnt < ECORE_MCP_HALT_MAX_RETRIES); + + if (cnt == ECORE_MCP_HALT_MAX_RETRIES) { + DP_NOTICE(p_hwfn, false, + "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n", + ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state); + return ECORE_BUSY; + } + + ecore_mcp_cmd_set_blocking(p_hwfn, true); + + return ECORE_SUCCESS; } +#define ECORE_MCP_RESUME_SLEEP_MS 10 + enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { - u32 value, cpu_mode; + u32 cpu_mode, cpu_state; ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff); - value = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); - value &= ~MCP_REG_CPU_MODE_SOFT_HALT; - ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value); cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); + cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT; + ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode); + + OSAL_MSLEEP(ECORE_MCP_RESUME_SLEEP_MS); + cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); + + if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) { + DP_NOTICE(p_hwfn, false, + "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n", + cpu_mode, cpu_state); + return ECORE_BUSY; + } - return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -1 : 0; + ecore_mcp_cmd_set_blocking(p_hwfn, false); + + return ECORE_SUCCESS; } enum _ecore_status_t @@ -2393,9 +2836,9 @@ ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, enum ecore_ov_client client) { - enum _ecore_status_t rc; u32 resp = 0, param = 0; u32 drv_mb_param; + enum _ecore_status_t rc; switch (client) { case ECORE_OV_CLIENT_DRV: @@ -2425,9 +2868,9 @@ ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, enum ecore_ov_driver_state drv_state) { - enum _ecore_status_t rc; u32 resp = 0, param = 0; u32 drv_mb_param; + enum _ecore_status_t rc; switch (drv_state) { case ECORE_OV_DRIVER_STATE_NOT_LOADED: @@ -2460,10 +2903,72 @@ ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, } enum _ecore_status_t -ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn, - struct ecore_ptt *p_ptt, u16 mtu) +ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + u16 mtu) { - return 0; + u32 resp = 0, param = 0, drv_mb_param = 0; + enum _ecore_status_t rc; + + SET_MFW_FIELD(drv_mb_param, DRV_MB_PARAM_OV_MTU_SIZE, (u32)mtu); + rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU, + drv_mb_param, &resp, ¶m); + if (rc != ECORE_SUCCESS) + DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc); + + return rc; +} + +enum _ecore_status_t +ecore_mcp_ov_update_mac(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + u8 *mac) +{ + struct ecore_mcp_mb_params mb_params; + union drv_union_data union_data; + enum _ecore_status_t rc; + + OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); + mb_params.cmd = DRV_MSG_CODE_SET_VMAC; + SET_MFW_FIELD(mb_params.param, DRV_MSG_CODE_VMAC_TYPE, + DRV_MSG_CODE_VMAC_TYPE_MAC); + mb_params.param |= MCP_PF_ID(p_hwfn); + OSAL_MEMCPY(&union_data.raw_data, mac, ETH_ALEN); + mb_params.p_data_src = &union_data; + rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc != ECORE_SUCCESS) + DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc); + + return rc; +} + +enum _ecore_status_t +ecore_mcp_ov_update_eswitch(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + enum ecore_ov_eswitch eswitch) +{ + enum _ecore_status_t rc; + u32 resp = 0, param = 0; + u32 drv_mb_param; + + switch (eswitch) { + case ECORE_OV_ESWITCH_NONE: + drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE; + break; + case ECORE_OV_ESWITCH_VEB: + drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB; + break; + case ECORE_OV_ESWITCH_VEPA: + drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA; + break; + default: + DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch); + return ECORE_INVAL; + } + + rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE, + drv_mb_param, &resp, ¶m); + if (rc != ECORE_SUCCESS) + DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc); + + return rc; } enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn, @@ -2523,7 +3028,7 @@ enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr, { struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); u32 bytes_left, offset, bytes_to_copy, buf_size; - struct ecore_mcp_nvm_params params; + u32 nvm_offset, resp, param; struct ecore_ptt *p_ptt; enum _ecore_status_t rc = ECORE_SUCCESS; @@ -2531,22 +3036,29 @@ enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr, if (!p_ptt) return ECORE_BUSY; - OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params)); bytes_left = len; offset = 0; - params.type = ECORE_MCP_NVM_RD; - params.nvm_rd.buf_size = &buf_size; - params.nvm_common.cmd = DRV_MSG_CODE_NVM_READ_NVRAM; while (bytes_left > 0) { bytes_to_copy = OSAL_MIN_T(u32, bytes_left, MCP_DRV_NVM_BUF_LEN); - params.nvm_common.offset = (addr + offset) | - (bytes_to_copy << DRV_MB_PARAM_NVM_LEN_SHIFT); - params.nvm_rd.buf = (u32 *)(p_buf + offset); - rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms); - if (rc != ECORE_SUCCESS || (params.nvm_common.resp != - FW_MSG_CODE_NVM_OK)) { - DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc); + nvm_offset = (addr + offset) | (bytes_to_copy << + DRV_MB_PARAM_NVM_LEN_OFFSET); + rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, + DRV_MSG_CODE_NVM_READ_NVRAM, + nvm_offset, &resp, ¶m, &buf_size, + (u32 *)(p_buf + offset)); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_dev, false, + "ecore_mcp_nvm_rd_cmd() failed, rc = %d\n", + rc); + resp = FW_MSG_CODE_ERROR; + break; + } + + if (resp != FW_MSG_CODE_NVM_OK) { + DP_NOTICE(p_dev, false, + "nvm read failed, resp = 0x%08x\n", resp); + rc = ECORE_UNKNOWN_ERROR; break; } @@ -2554,43 +3066,40 @@ enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr, * isn't preemptible. Sleep a bit to prevent CPU hogging. */ if (bytes_left % 0x1000 < - (bytes_left - *params.nvm_rd.buf_size) % 0x1000) + (bytes_left - buf_size) % 0x1000) OSAL_MSLEEP(1); - offset += *params.nvm_rd.buf_size; - bytes_left -= *params.nvm_rd.buf_size; + offset += buf_size; + bytes_left -= buf_size; } - p_dev->mcp_nvm_resp = params.nvm_common.resp; + p_dev->mcp_nvm_resp = resp; ecore_ptt_release(p_hwfn, p_ptt); return rc; } enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd, - u32 addr, u8 *p_buf, u32 len) + u32 addr, u8 *p_buf, u32 *p_len) { struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); - struct ecore_mcp_nvm_params params; struct ecore_ptt *p_ptt; + u32 resp = 0, param; enum _ecore_status_t rc; p_ptt = ecore_ptt_acquire(p_hwfn); if (!p_ptt) return ECORE_BUSY; - OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params)); - params.type = ECORE_MCP_NVM_RD; - params.nvm_rd.buf_size = &len; - params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_READ) ? - DRV_MSG_CODE_PHY_CORE_READ : DRV_MSG_CODE_PHY_RAW_READ; - params.nvm_common.offset = addr; - params.nvm_rd.buf = (u32 *)p_buf; - rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms); + rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, + (cmd == ECORE_PHY_CORE_READ) ? + DRV_MSG_CODE_PHY_CORE_READ : + DRV_MSG_CODE_PHY_RAW_READ, + addr, &resp, ¶m, p_len, (u32 *)p_buf); if (rc != ECORE_SUCCESS) DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc); - p_dev->mcp_nvm_resp = params.nvm_common.resp; + p_dev->mcp_nvm_resp = resp; ecore_ptt_release(p_hwfn, p_ptt); return rc; @@ -2599,14 +3108,12 @@ enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd, enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf) { struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); - struct ecore_mcp_nvm_params params; struct ecore_ptt *p_ptt; p_ptt = ecore_ptt_acquire(p_hwfn); if (!p_ptt) return ECORE_BUSY; - OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params)); OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp)); ecore_ptt_release(p_hwfn, p_ptt); @@ -2616,19 +3123,16 @@ enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf) enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr) { struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); - struct ecore_mcp_nvm_params params; struct ecore_ptt *p_ptt; + u32 resp = 0, param; enum _ecore_status_t rc; p_ptt = ecore_ptt_acquire(p_hwfn); if (!p_ptt) return ECORE_BUSY; - OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params)); - params.type = ECORE_MCP_CMD; - params.nvm_common.cmd = DRV_MSG_CODE_NVM_DEL_FILE; - params.nvm_common.offset = addr; - rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms); - p_dev->mcp_nvm_resp = params.nvm_common.resp; + rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_DEL_FILE, addr, + &resp, ¶m); + p_dev->mcp_nvm_resp = resp; ecore_ptt_release(p_hwfn, p_ptt); return rc; @@ -2638,19 +3142,16 @@ enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev, u32 addr) { struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); - struct ecore_mcp_nvm_params params; struct ecore_ptt *p_ptt; + u32 resp = 0, param; enum _ecore_status_t rc; p_ptt = ecore_ptt_acquire(p_hwfn); if (!p_ptt) return ECORE_BUSY; - OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params)); - params.type = ECORE_MCP_CMD; - params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN; - params.nvm_common.offset = addr; - rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms); - p_dev->mcp_nvm_resp = params.nvm_common.resp; + rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, addr, + &resp, ¶m); + p_dev->mcp_nvm_resp = resp; ecore_ptt_release(p_hwfn, p_ptt); return rc; @@ -2662,37 +3163,58 @@ enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev, enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd, u32 addr, u8 *p_buf, u32 len) { + u32 buf_idx, buf_size, nvm_cmd, nvm_offset, resp, param; struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); enum _ecore_status_t rc = ECORE_INVAL; - struct ecore_mcp_nvm_params params; struct ecore_ptt *p_ptt; - u32 buf_idx, buf_size; p_ptt = ecore_ptt_acquire(p_hwfn); if (!p_ptt) return ECORE_BUSY; - OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params)); - params.type = ECORE_MCP_NVM_WR; - if (cmd == ECORE_PUT_FILE_DATA) - params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA; - else - params.nvm_common.cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM; + switch (cmd) { + case ECORE_PUT_FILE_DATA: + nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA; + break; + case ECORE_NVM_WRITE_NVRAM: + nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM; + break; + case ECORE_EXT_PHY_FW_UPGRADE: + nvm_cmd = DRV_MSG_CODE_EXT_PHY_FW_UPGRADE; + break; + default: + DP_NOTICE(p_hwfn, true, "Invalid nvm write command 0x%x\n", + cmd); + rc = ECORE_INVAL; + goto out; + } + buf_idx = 0; while (buf_idx < len) { buf_size = OSAL_MIN_T(u32, (len - buf_idx), MCP_DRV_NVM_BUF_LEN); - params.nvm_common.offset = ((buf_size << - DRV_MB_PARAM_NVM_LEN_SHIFT) - | addr) + buf_idx; - params.nvm_wr.buf_size = buf_size; - params.nvm_wr.buf = (u32 *)&p_buf[buf_idx]; - rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms); - if (rc != ECORE_SUCCESS || - ((params.nvm_common.resp != FW_MSG_CODE_NVM_OK) && - (params.nvm_common.resp != - FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK))) - DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc); + nvm_offset = ((buf_size << DRV_MB_PARAM_NVM_LEN_OFFSET) | + addr) + + buf_idx; + rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset, + &resp, ¶m, buf_size, + (u32 *)&p_buf[buf_idx]); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_dev, false, + "ecore_mcp_nvm_write() failed, rc = %d\n", + rc); + resp = FW_MSG_CODE_ERROR; + break; + } + + if (resp != FW_MSG_CODE_OK && + resp != FW_MSG_CODE_NVM_OK && + resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK) { + DP_NOTICE(p_dev, false, + "nvm write failed, resp = 0x%08x\n", resp); + rc = ECORE_UNKNOWN_ERROR; + break; + } /* This can be a lengthy process, and it's possible scheduler * isn't preemptible. Sleep a bit to prevent CPU hogging. @@ -2704,7 +3226,8 @@ enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd, buf_idx += buf_size; } - p_dev->mcp_nvm_resp = params.nvm_common.resp; + p_dev->mcp_nvm_resp = resp; +out: ecore_ptt_release(p_hwfn, p_ptt); return rc; @@ -2714,7 +3237,7 @@ enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd, u32 addr, u8 *p_buf, u32 len) { struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); - struct ecore_mcp_nvm_params params; + u32 resp = 0, param, nvm_cmd; struct ecore_ptt *p_ptt; enum _ecore_status_t rc; @@ -2722,17 +3245,13 @@ enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd, if (!p_ptt) return ECORE_BUSY; - OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params)); - params.type = ECORE_MCP_NVM_WR; - params.nvm_wr.buf_size = len; - params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_WRITE) ? - DRV_MSG_CODE_PHY_CORE_WRITE : DRV_MSG_CODE_PHY_RAW_WRITE; - params.nvm_common.offset = addr; - params.nvm_wr.buf = (u32 *)p_buf; - rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms); + nvm_cmd = (cmd == ECORE_PHY_CORE_WRITE) ? DRV_MSG_CODE_PHY_CORE_WRITE : + DRV_MSG_CODE_PHY_RAW_WRITE; + rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, addr, + &resp, ¶m, len, (u32 *)p_buf); if (rc != ECORE_SUCCESS) DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc); - p_dev->mcp_nvm_resp = params.nvm_common.resp; + p_dev->mcp_nvm_resp = resp; ecore_ptt_release(p_hwfn, p_ptt); return rc; @@ -2742,20 +3261,17 @@ enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev, u32 addr) { struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); - struct ecore_mcp_nvm_params params; struct ecore_ptt *p_ptt; + u32 resp, param; enum _ecore_status_t rc; p_ptt = ecore_ptt_acquire(p_hwfn); if (!p_ptt) return ECORE_BUSY; - OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params)); - params.type = ECORE_MCP_CMD; - params.nvm_common.cmd = DRV_MSG_CODE_SET_SECURE_MODE; - params.nvm_common.offset = addr; - rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms); - p_dev->mcp_nvm_resp = params.nvm_common.resp; + rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_SECURE_MODE, addr, + &resp, ¶m); + p_dev->mcp_nvm_resp = resp; ecore_ptt_release(p_hwfn, p_ptt); return rc; @@ -2766,42 +3282,42 @@ enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn, u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf) { - struct ecore_mcp_nvm_params params; + u32 bytes_left, bytes_to_copy, buf_size, nvm_offset; + u32 resp, param; enum _ecore_status_t rc; - u32 bytes_left, bytes_to_copy, buf_size; - OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params)); - params.nvm_common.offset = - (port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) | - (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT); + nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) | + (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET); addr = offset; offset = 0; bytes_left = len; - params.type = ECORE_MCP_NVM_RD; - params.nvm_rd.buf_size = &buf_size; - params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_READ; while (bytes_left > 0) { bytes_to_copy = OSAL_MIN_T(u32, bytes_left, MAX_I2C_TRANSACTION_SIZE); - params.nvm_rd.buf = (u32 *)(p_buf + offset); - params.nvm_common.offset &= - (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK | - DRV_MB_PARAM_TRANSCEIVER_PORT_MASK); - params.nvm_common.offset |= - ((addr + offset) << - DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT); - params.nvm_common.offset |= - (bytes_to_copy << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT); - rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms); - if ((params.nvm_common.resp & FW_MSG_CODE_MASK) == - FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) { + nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK | + DRV_MB_PARAM_TRANSCEIVER_PORT_MASK); + nvm_offset |= ((addr + offset) << + DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET); + nvm_offset |= (bytes_to_copy << + DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET); + rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, + DRV_MSG_CODE_TRANSCEIVER_READ, + nvm_offset, &resp, ¶m, &buf_size, + (u32 *)(p_buf + offset)); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, false, + "Failed to send a transceiver read command to the MFW. rc = %d.\n", + rc); + return rc; + } + + if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) return ECORE_NODEV; - } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) != - FW_MSG_CODE_TRANSCEIVER_DIAG_OK) + else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK) return ECORE_UNKNOWN_ERROR; - offset += *params.nvm_rd.buf_size; - bytes_left -= *params.nvm_rd.buf_size; + offset += buf_size; + bytes_left -= buf_size; } return ECORE_SUCCESS; @@ -2812,36 +3328,35 @@ enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn, u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf) { - struct ecore_mcp_nvm_params params; + u32 buf_idx, buf_size, nvm_offset, resp, param; enum _ecore_status_t rc; - u32 buf_idx, buf_size; - - OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params)); - params.nvm_common.offset = - (port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) | - (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT); - params.type = ECORE_MCP_NVM_WR; - params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_WRITE; + + nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) | + (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET); buf_idx = 0; while (buf_idx < len) { buf_size = OSAL_MIN_T(u32, (len - buf_idx), MAX_I2C_TRANSACTION_SIZE); - params.nvm_common.offset &= - (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK | - DRV_MB_PARAM_TRANSCEIVER_PORT_MASK); - params.nvm_common.offset |= - ((offset + buf_idx) << - DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT); - params.nvm_common.offset |= - (buf_size << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT); - params.nvm_wr.buf_size = buf_size; - params.nvm_wr.buf = (u32 *)&p_buf[buf_idx]; - rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms); - if ((params.nvm_common.resp & FW_MSG_CODE_MASK) == - FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) { + nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK | + DRV_MB_PARAM_TRANSCEIVER_PORT_MASK); + nvm_offset |= ((offset + buf_idx) << + DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET); + nvm_offset |= (buf_size << + DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET); + rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, + DRV_MSG_CODE_TRANSCEIVER_WRITE, + nvm_offset, &resp, ¶m, buf_size, + (u32 *)&p_buf[buf_idx]); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, false, + "Failed to send a transceiver write command to the MFW. rc = %d.\n", + rc); + return rc; + } + + if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) return ECORE_NODEV; - } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) != - FW_MSG_CODE_TRANSCEIVER_DIAG_OK) + else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK) return ECORE_UNKNOWN_ERROR; buf_idx += buf_size; @@ -2857,7 +3372,7 @@ enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn, enum _ecore_status_t rc = ECORE_SUCCESS; u32 drv_mb_param = 0, rsp; - drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT); + drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET); rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ, drv_mb_param, &rsp, gpio_val); @@ -2878,8 +3393,8 @@ enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn, enum _ecore_status_t rc = ECORE_SUCCESS; u32 drv_mb_param = 0, param, rsp; - drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT) | - (gpio_val << DRV_MB_PARAM_GPIO_VALUE_SHIFT); + drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET) | + (gpio_val << DRV_MB_PARAM_GPIO_VALUE_OFFSET); rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE, drv_mb_param, &rsp, ¶m); @@ -2901,7 +3416,7 @@ enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn, u32 drv_mb_param = 0, rsp, val = 0; enum _ecore_status_t rc = ECORE_SUCCESS; - drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT; + drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET; rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO, drv_mb_param, &rsp, &val); @@ -2909,9 +3424,9 @@ enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn, return rc; *gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >> - DRV_MB_PARAM_GPIO_DIRECTION_SHIFT; + DRV_MB_PARAM_GPIO_DIRECTION_OFFSET; *gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >> - DRV_MB_PARAM_GPIO_CTRL_SHIFT; + DRV_MB_PARAM_GPIO_CTRL_OFFSET; if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK) return ECORE_UNKNOWN_ERROR; @@ -2926,7 +3441,7 @@ enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn, enum _ecore_status_t rc = ECORE_SUCCESS; drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST << - DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT); + DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET); rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, drv_mb_param, &rsp, ¶m); @@ -2948,7 +3463,7 @@ enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn, enum _ecore_status_t rc = ECORE_SUCCESS; drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST << - DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT); + DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET); rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, drv_mb_param, &rsp, ¶m); @@ -2970,7 +3485,7 @@ enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images( enum _ecore_status_t rc = ECORE_SUCCESS; drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES << - DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT); + DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET); rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, drv_mb_param, &rsp, num_images); @@ -2988,26 +3503,20 @@ enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att( struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct bist_nvm_image_att *p_image_att, u32 image_index) { - struct ecore_mcp_nvm_params params; + u32 buf_size, nvm_offset, resp, param; enum _ecore_status_t rc; - u32 buf_size; - - OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params)); - params.nvm_common.offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX << - DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT); - params.nvm_common.offset |= (image_index << - DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT); - params.type = ECORE_MCP_NVM_RD; - params.nvm_rd.buf_size = &buf_size; - params.nvm_common.cmd = DRV_MSG_CODE_BIST_TEST; - params.nvm_rd.buf = (u32 *)p_image_att; - - rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms); + nvm_offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX << + DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET); + nvm_offset |= (image_index << + DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_OFFSET); + rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, + nvm_offset, &resp, ¶m, &buf_size, + (u32 *)p_image_att); if (rc != ECORE_SUCCESS) return rc; - if (((params.nvm_common.resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) || + if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) || (p_image_att->return_code != 1)) rc = ECORE_UNKNOWN_ERROR; @@ -3041,13 +3550,13 @@ ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn, val = mfw_temp_info.sensor[i]; p_temp_sensor = &p_temp_info->sensors[i]; p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >> - SENSOR_LOCATION_SHIFT; + SENSOR_LOCATION_OFFSET; p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >> - THRESHOLD_HIGH_SHIFT; + THRESHOLD_HIGH_OFFSET; p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >> - CRITICAL_TEMPERATURE_SHIFT; + CRITICAL_TEMPERATURE_OFFSET; p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >> - CURRENT_TEMP_SHIFT; + CURRENT_TEMP_OFFSET; } return ECORE_SUCCESS; @@ -3058,23 +3567,17 @@ enum _ecore_status_t ecore_mcp_get_mba_versions( struct ecore_ptt *p_ptt, struct ecore_mba_vers *p_mba_vers) { - struct ecore_mcp_nvm_params params; + u32 buf_size, resp, param; enum _ecore_status_t rc; - u32 buf_size; - OSAL_MEM_ZERO(¶ms, sizeof(params)); - params.type = ECORE_MCP_NVM_RD; - params.nvm_common.cmd = DRV_MSG_CODE_GET_MBA_VERSION; - params.nvm_common.offset = 0; - params.nvm_rd.buf = &p_mba_vers->mba_vers[0]; - params.nvm_rd.buf_size = &buf_size; - rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms); + rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MBA_VERSION, + 0, &resp, ¶m, &buf_size, + &p_mba_vers->mba_vers[0]); if (rc != ECORE_SUCCESS) return rc; - if ((params.nvm_common.resp & FW_MSG_CODE_MASK) != - FW_MSG_CODE_NVM_OK) + if ((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK) rc = ECORE_UNKNOWN_ERROR; if (buf_size != MCP_DRV_NVM_BUF_LEN) @@ -3150,9 +3653,9 @@ ecore_mcp_get_mfw_res_id(enum ecore_resources res_id) #define ECORE_RESC_ALLOC_VERSION_MINOR 0 #define ECORE_RESC_ALLOC_VERSION \ ((ECORE_RESC_ALLOC_VERSION_MAJOR << \ - DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \ + DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_OFFSET) | \ (ECORE_RESC_ALLOC_VERSION_MINOR << \ - DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT)) + DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_OFFSET)) struct ecore_resc_alloc_in_params { u32 cmd; @@ -3236,10 +3739,10 @@ ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn, "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n", p_in_params->cmd, p_in_params->res_id, ecore_hw_get_resc_name(p_in_params->res_id), - ECORE_MFW_GET_FIELD(mb_params.param, - DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR), - ECORE_MFW_GET_FIELD(mb_params.param, - DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR), + GET_MFW_FIELD(mb_params.param, + DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR), + GET_MFW_FIELD(mb_params.param, + DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR), p_in_params->resc_max_val); rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); @@ -3256,10 +3759,10 @@ ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn, DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n", - ECORE_MFW_GET_FIELD(p_out_params->mcp_param, - FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR), - ECORE_MFW_GET_FIELD(p_out_params->mcp_param, - FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR), + GET_MFW_FIELD(p_out_params->mcp_param, + FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR), + GET_MFW_FIELD(p_out_params->mcp_param, + FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR), p_out_params->resc_num, p_out_params->resc_start, p_out_params->vf_resc_num, p_out_params->vf_resc_start, p_out_params->flags); @@ -3347,7 +3850,7 @@ static enum _ecore_status_t ecore_mcp_resource_cmd(struct ecore_hwfn *p_hwfn, } if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) { - u8 opcode = ECORE_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE); + u8 opcode = GET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE); DP_NOTICE(p_hwfn, false, "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n", @@ -3380,9 +3883,9 @@ __ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, break; } - ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource); - ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode); - ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout); + SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource); + SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode); + SET_MFW_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout); DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n", @@ -3395,9 +3898,8 @@ __ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, return rc; /* Analyze the response */ - p_params->owner = ECORE_MFW_GET_FIELD(mcp_param, - RESOURCE_CMD_RSP_OWNER); - opcode = ECORE_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE); + p_params->owner = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER); + opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE); DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n", @@ -3452,6 +3954,36 @@ ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, return ECORE_SUCCESS; } +void ecore_mcp_resc_lock_default_init(struct ecore_resc_lock_params *p_lock, + struct ecore_resc_unlock_params *p_unlock, + enum ecore_resc_lock resource, + bool b_is_permanent) +{ + if (p_lock != OSAL_NULL) { + OSAL_MEM_ZERO(p_lock, sizeof(*p_lock)); + + /* Permanent resources don't require aging, and there's no + * point in trying to acquire them more than once since it's + * unexpected another entity would release them. + */ + if (b_is_permanent) { + p_lock->timeout = ECORE_MCP_RESC_LOCK_TO_NONE; + } else { + p_lock->retry_num = ECORE_MCP_RESC_LOCK_RETRY_CNT_DFLT; + p_lock->retry_interval = + ECORE_MCP_RESC_LOCK_RETRY_VAL_DFLT; + p_lock->sleep_b4_retry = true; + } + + p_lock->resource = resource; + } + + if (p_unlock != OSAL_NULL) { + OSAL_MEM_ZERO(p_unlock, sizeof(*p_unlock)); + p_unlock->resource = resource; + } +} + enum _ecore_status_t ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct ecore_resc_unlock_params *p_params) @@ -3462,8 +3994,8 @@ ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE : RESOURCE_OPCODE_RELEASE; - ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource); - ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode); + SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource); + SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode); DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n", @@ -3476,7 +4008,7 @@ ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, return rc; /* Analyze the response */ - opcode = ECORE_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE); + opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE); DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Resource unlock response: mcp_param 0x%08x [opcode %d]\n", @@ -3532,8 +4064,178 @@ enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn, u32 mcp_resp, mcp_param, features; features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ | - DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE; + DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE | + DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK; return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT, features, &mcp_resp, &mcp_param); } + +enum _ecore_status_t +ecore_mcp_drv_attribute(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + struct ecore_mcp_drv_attr *p_drv_attr) +{ + struct attribute_cmd_write_stc attr_cmd_write; + enum _attribute_commands_e mfw_attr_cmd; + struct ecore_mcp_mb_params mb_params; + enum _ecore_status_t rc; + + switch (p_drv_attr->attr_cmd) { + case ECORE_MCP_DRV_ATTR_CMD_READ: + mfw_attr_cmd = ATTRIBUTE_CMD_READ; + break; + case ECORE_MCP_DRV_ATTR_CMD_WRITE: + mfw_attr_cmd = ATTRIBUTE_CMD_WRITE; + break; + case ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR: + mfw_attr_cmd = ATTRIBUTE_CMD_READ_CLEAR; + break; + case ECORE_MCP_DRV_ATTR_CMD_CLEAR: + mfw_attr_cmd = ATTRIBUTE_CMD_CLEAR; + break; + default: + DP_NOTICE(p_hwfn, false, "Unknown attribute command %d\n", + p_drv_attr->attr_cmd); + return ECORE_INVAL; + } + + OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); + mb_params.cmd = DRV_MSG_CODE_ATTRIBUTE; + SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_KEY, + p_drv_attr->attr_num); + SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_CMD, + mfw_attr_cmd); + if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_WRITE) { + OSAL_MEM_ZERO(&attr_cmd_write, sizeof(attr_cmd_write)); + attr_cmd_write.val = p_drv_attr->val; + attr_cmd_write.mask = p_drv_attr->mask; + attr_cmd_write.offset = p_drv_attr->offset; + + mb_params.p_data_src = &attr_cmd_write; + mb_params.data_src_size = sizeof(attr_cmd_write); + } + + rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc != ECORE_SUCCESS) + return rc; + + if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) { + DP_INFO(p_hwfn, + "The attribute command is not supported by the MFW\n"); + return ECORE_NOTIMPL; + } else if (mb_params.mcp_resp != FW_MSG_CODE_OK) { + DP_INFO(p_hwfn, + "Failed to send an attribute command [mcp_resp 0x%x, attr_cmd %d, attr_num %d]\n", + mb_params.mcp_resp, p_drv_attr->attr_cmd, + p_drv_attr->attr_num); + return ECORE_INVAL; + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "Attribute Command: cmd %d [mfw_cmd %d], num %d, in={val 0x%08x, mask 0x%08x, offset 0x%08x}, out={val 0x%08x}\n", + p_drv_attr->attr_cmd, mfw_attr_cmd, p_drv_attr->attr_num, + p_drv_attr->val, p_drv_attr->mask, p_drv_attr->offset, + mb_params.mcp_param); + + if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ || + p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR) + p_drv_attr->val = mb_params.mcp_param; + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_mcp_get_engine_config(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + struct ecore_dev *p_dev = p_hwfn->p_dev; + struct ecore_mcp_mb_params mb_params; + u8 fir_valid, l2_valid; + enum _ecore_status_t rc; + + OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); + mb_params.cmd = DRV_MSG_CODE_GET_ENGINE_CONFIG; + rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc != ECORE_SUCCESS) + return rc; + + if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) { + DP_INFO(p_hwfn, + "The get_engine_config command is unsupported by the MFW\n"); + return ECORE_NOTIMPL; + } + + fir_valid = GET_MFW_FIELD(mb_params.mcp_param, + FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID); + if (fir_valid) + p_dev->fir_affin = + GET_MFW_FIELD(mb_params.mcp_param, + FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE); + + l2_valid = GET_MFW_FIELD(mb_params.mcp_param, + FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID); + if (l2_valid) + p_dev->l2_affin_hint = + GET_MFW_FIELD(mb_params.mcp_param, + FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE); + + DP_INFO(p_hwfn, + "Engine affinity config: FIR={valid %hhd, value %hhd}, L2_hint={valid %hhd, value %hhd}\n", + fir_valid, p_dev->fir_affin, l2_valid, p_dev->l2_affin_hint); + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_mcp_get_ppfid_bitmap(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + struct ecore_dev *p_dev = p_hwfn->p_dev; + struct ecore_mcp_mb_params mb_params; + enum _ecore_status_t rc; + + OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); + mb_params.cmd = DRV_MSG_CODE_GET_PPFID_BITMAP; + rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc != ECORE_SUCCESS) + return rc; + + if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) { + DP_INFO(p_hwfn, + "The get_ppfid_bitmap command is unsupported by the MFW\n"); + return ECORE_NOTIMPL; + } + + p_dev->ppfid_bitmap = GET_MFW_FIELD(mb_params.mcp_param, + FW_MB_PARAM_PPFID_BITMAP); + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "PPFID bitmap 0x%hhx\n", + p_dev->ppfid_bitmap); + + return ECORE_SUCCESS; +} + +void ecore_mcp_wol_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + u32 offset, u32 val) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + u32 dword = val; + struct ecore_mcp_mb_params mb_params; + + OSAL_MEMSET(&mb_params, 0, sizeof(struct ecore_mcp_mb_params)); + mb_params.cmd = DRV_MSG_CODE_WRITE_WOL_REG; + mb_params.param = offset; + mb_params.p_data_src = &dword; + mb_params.data_src_size = sizeof(dword); + + rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, false, + "Failed to wol write request, rc = %d\n", rc); + } + + if (mb_params.mcp_resp != FW_MSG_CODE_WOL_READ_WRITE_OK) { + DP_NOTICE(p_hwfn, false, + "Failed to write value 0x%x to offset 0x%x [mcp_resp 0x%x]\n", + val, offset, mb_params.mcp_resp); + rc = ECORE_UNKNOWN_ERROR; + } +}