X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fqede%2Fbase%2Fecore_mcp.c;h=49963c6919c23bda7258e7b8521d444046ccd639;hb=cb71992793940ce5b642d843e07c934a11f3c13b;hp=e6980e696877a0412ae9b77eb1f2e4afbc8bc4a6;hpb=c0845c33ffb1954c1a8c5f6244b7984631cf9706;p=dpdk.git diff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c index e6980e6968..49963c6919 100644 --- a/drivers/net/qede/base/ecore_mcp.c +++ b/drivers/net/qede/base/ecore_mcp.c @@ -1,14 +1,13 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #include "bcm_osal.h" #include "ecore.h" #include "ecore_status.h" +#include "nvm_cfg.h" #include "ecore_mcp.h" #include "mcp_public.h" #include "reg_addr.h" @@ -21,6 +20,7 @@ #include "ecore_iro.h" #include "ecore_dcbx.h" #include "ecore_sp_commands.h" +#include "ecore_cxt.h" #define CHIP_MCP_RESP_ITER_US 10 #define EMUL_MCP_RESP_ITER_US (1000 * 1000) @@ -155,6 +155,9 @@ enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn) if (p_hwfn->mcp_info) { struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL, *p_tmp; + OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur); + OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow); + OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock); OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_cmd_elem, p_tmp, &p_hwfn->mcp_info->cmd_list, list, @@ -163,8 +166,6 @@ enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn) } OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); - OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur); - OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow); #ifdef CONFIG_ECORE_LOCK_ALLOC OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->cmd_lock); OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->link_lock); @@ -238,11 +239,30 @@ enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn, /* Allocate mcp_info structure */ p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, - sizeof(*p_hwfn->mcp_info)); - if (!p_hwfn->mcp_info) - goto err; + sizeof(*p_hwfn->mcp_info)); + if (!p_hwfn->mcp_info) { + DP_NOTICE(p_hwfn, false, "Failed to allocate mcp_info\n"); + return ECORE_NOMEM; + } p_info = p_hwfn->mcp_info; + /* Initialize the MFW spinlocks */ +#ifdef CONFIG_ECORE_LOCK_ALLOC + if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock)) { + OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info); + return ECORE_NOMEM; + } + if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock)) { + OSAL_SPIN_LOCK_DEALLOC(&p_info->cmd_lock); + OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info); + return ECORE_NOMEM; + } +#endif + OSAL_SPIN_LOCK_INIT(&p_info->cmd_lock); + OSAL_SPIN_LOCK_INIT(&p_info->link_lock); + + OSAL_LIST_INIT(&p_info->cmd_list); + if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) { DP_NOTICE(p_hwfn, false, "MCP is not initialized\n"); /* Do not free mcp_info here, since public_base indicate that @@ -257,20 +277,10 @@ enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn, if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr) goto err; - /* Initialize the MFW spinlocks */ -#ifdef CONFIG_ECORE_LOCK_ALLOC - OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock); - OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock); -#endif - OSAL_SPIN_LOCK_INIT(&p_info->cmd_lock); - OSAL_SPIN_LOCK_INIT(&p_info->link_lock); - - OSAL_LIST_INIT(&p_info->cmd_list); - return ECORE_SUCCESS; err: - DP_NOTICE(p_hwfn, true, "Failed to allocate mcp memory\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate mcp memory\n"); ecore_mcp_free(p_hwfn); return ECORE_NOMEM; } @@ -446,6 +456,24 @@ static void ecore_mcp_cmd_set_blocking(struct ecore_hwfn *p_hwfn, block_cmd ? "Block" : "Unblock"); } +void ecore_mcp_print_cpu_info(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2; + + cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); + cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); + cpu_pc_0 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); + OSAL_UDELAY(CHIP_MCP_RESP_ITER_US); + cpu_pc_1 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); + OSAL_UDELAY(CHIP_MCP_RESP_ITER_US); + cpu_pc_2 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); + + DP_NOTICE(p_hwfn, false, + "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n", + cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2); +} + static enum _ecore_status_t _ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct ecore_mcp_mb_params *p_mb_params, @@ -476,6 +504,7 @@ _ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); OSAL_UDELAY(delay); + OSAL_MFW_CMD_PREEMPT(p_hwfn); } while (++cnt < max_retries); if (cnt >= max_retries) { @@ -517,12 +546,14 @@ _ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, goto err; OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); + OSAL_MFW_CMD_PREEMPT(p_hwfn); } while (++cnt < max_retries); if (cnt >= max_retries) { DP_NOTICE(p_hwfn, false, "The MFW failed to respond to command 0x%08x [param 0x%08x].\n", p_mb_params->cmd, p_mb_params->param); + ecore_mcp_print_cpu_info(p_hwfn, p_ptt); OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock); ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); @@ -570,7 +601,7 @@ ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, /* MCP not initialized */ if (!ecore_mcp_is_init(p_hwfn)) { - DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n"); + DP_NOTICE(p_hwfn, true, "MFW is not initialized!\n"); return ECORE_BUSY; } @@ -1067,8 +1098,6 @@ enum _ecore_status_t ecore_mcp_load_done(struct ecore_hwfn *p_hwfn, return rc; } -#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR (1 << 0) - /* Check if there is a DID mismatch between nvm-cfg/efuse */ if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR) DP_NOTICE(p_hwfn, false, @@ -1196,6 +1225,8 @@ static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn, DP_NOTICE(p_hwfn, false, "Transceiver is present.\n"); else DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n"); + + OSAL_TRANSCEIVER_UPDATE(p_hwfn); } static void ecore_mcp_read_eee_config(struct ecore_hwfn *p_hwfn, @@ -1221,6 +1252,28 @@ static void ecore_mcp_read_eee_config(struct ecore_hwfn *p_hwfn, p_link->eee_lp_adv_caps |= ECORE_EEE_10G_ADV; } +static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct public_func *p_data, + int pfid) +{ + u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, + PUBLIC_FUNC); + u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr); + u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid); + u32 i, size; + + OSAL_MEM_ZERO(p_data, sizeof(*p_data)); + + size = OSAL_MIN_T(u32, sizeof(*p_data), + SECTION_SIZE(mfw_path_offsize)); + for (i = 0; i < size / sizeof(u32); i++) + ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt, + func_addr + (i << 2)); + + return size; +} + static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, bool b_reset) @@ -1250,10 +1303,24 @@ static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn, goto out; } - if (p_hwfn->b_drv_link_init) - p_link->link_up = !!(status & LINK_STATUS_LINK_UP); - else + if (p_hwfn->b_drv_link_init) { + /* Link indication with modern MFW arrives as per-PF + * indication. + */ + if (p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_VLINK) { + struct public_func shmem_info; + + ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, + MCP_PF_ID(p_hwfn)); + p_link->link_up = !!(shmem_info.status & + FUNC_STATUS_VIRTUAL_LINK_UP); + } else { + p_link->link_up = !!(status & LINK_STATUS_LINK_UP); + } + } else { p_link->link_up = false; + } p_link->full_duplex = true; switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) { @@ -1300,7 +1367,7 @@ static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn, __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw); - /* Mintz bandwidth configuration */ + /* Min bandwidth configuration */ __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw); ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev, p_ptt, @@ -1361,7 +1428,7 @@ static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn, if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) ecore_mcp_read_eee_config(p_hwfn, p_ptt, p_link); - OSAL_LINK_UPDATE(p_hwfn, p_ptt); + OSAL_LINK_UPDATE(p_hwfn); out: OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->link_lock); } @@ -1516,7 +1583,8 @@ static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn, hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN; break; default: - DP_INFO(p_hwfn, "Invalid protocol type %d\n", type); + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "Invalid protocol type %d\n", type); return; } @@ -1566,28 +1634,6 @@ static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn, } } -static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn, - struct ecore_ptt *p_ptt, - struct public_func *p_data, - int pfid) -{ - u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, - PUBLIC_FUNC); - u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr); - u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid); - u32 i, size; - - OSAL_MEM_ZERO(p_data, sizeof(*p_data)); - - size = OSAL_MIN_T(u32, sizeof(*p_data), - SECTION_SIZE(mfw_path_offsize)); - for (i = 0; i < size / sizeof(u32); i++) - ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt, - func_addr + (i << 2)); - - return size; -} - static void ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { @@ -1610,6 +1656,49 @@ ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) ¶m); } +static void ecore_mcp_update_stag(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + struct public_func shmem_info; + u32 resp = 0, param = 0; + + ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, + MCP_PF_ID(p_hwfn)); + + p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag & + FUNC_MF_CFG_OV_STAG_MASK; + p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan; + if (OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS, &p_hwfn->p_dev->mf_bits)) { + if (p_hwfn->hw_info.ovlan != ECORE_MCP_VLAN_UNSET) { + ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, + p_hwfn->hw_info.ovlan); + ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 1); + + /* Configure DB to add external vlan to EDPM packets */ + ecore_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1); + ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, + p_hwfn->hw_info.ovlan); + } else { + ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 0); + ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, 0); + + /* Configure DB to add external vlan to EDPM packets */ + ecore_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 0); + ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, 0); + } + + ecore_sp_pf_update_stag(p_hwfn); + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "ovlan = %d hw_mode = 0x%x\n", + p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode); + OSAL_HW_INFO_CHANGE(p_hwfn, ECORE_HW_INFO_CHANGE_OVLAN); + + /* Acknowledge the MFW */ + ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0, + &resp, ¶m); +} + static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn) { /* A single notification should be sent to upper driver in CMT mode */ @@ -1860,6 +1949,74 @@ static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn, ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN); } +void +ecore_mcp_read_ufp_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) +{ + struct public_func shmem_info; + u32 port_cfg, val; + + if (!OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits)) + return; + + OSAL_MEMSET(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info)); + port_cfg = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + + OFFSETOF(struct public_port, oem_cfg_port)); + val = GET_MFW_FIELD(port_cfg, OEM_CFG_CHANNEL_TYPE); + if (val != OEM_CFG_CHANNEL_TYPE_STAGGED) + DP_NOTICE(p_hwfn, false, "Incorrect UFP Channel type %d\n", + val); + + val = GET_MFW_FIELD(port_cfg, OEM_CFG_SCHED_TYPE); + if (val == OEM_CFG_SCHED_TYPE_ETS) + p_hwfn->ufp_info.mode = ECORE_UFP_MODE_ETS; + else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW) + p_hwfn->ufp_info.mode = ECORE_UFP_MODE_VNIC_BW; + else + DP_NOTICE(p_hwfn, false, "Unknown UFP scheduling mode %d\n", + val); + + ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, + MCP_PF_ID(p_hwfn)); + val = GET_MFW_FIELD(shmem_info.oem_cfg_func, OEM_CFG_FUNC_TC); + p_hwfn->ufp_info.tc = (u8)val; + val = GET_MFW_FIELD(shmem_info.oem_cfg_func, + OEM_CFG_FUNC_HOST_PRI_CTRL); + if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC) + p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_VNIC; + else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS) + p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_OS; + else + DP_NOTICE(p_hwfn, false, "Unknown Host priority control %d\n", + val); + + DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, + "UFP shmem config: mode = %d tc = %d pri_type = %d\n", + p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc, + p_hwfn->ufp_info.pri_type); +} + +static enum _ecore_status_t +ecore_mcp_handle_ufp_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) +{ + ecore_mcp_read_ufp_config(p_hwfn, p_ptt); + + if (p_hwfn->ufp_info.mode == ECORE_UFP_MODE_VNIC_BW) { + p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc; + p_hwfn->hw_info.offload_tc = p_hwfn->ufp_info.tc; + + ecore_qm_reconf(p_hwfn, p_ptt); + } else { + /* Merge UFP TC with the dcbx TC data */ + ecore_dcbx_mib_update_event(p_hwfn, p_ptt, + ECORE_DCBX_OPERATIONAL_MIB); + } + + /* update storm FW with negotiation results */ + ecore_sp_pf_update_ufp(p_hwfn); + + return ECORE_SUCCESS; +} + enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { @@ -1902,6 +2059,15 @@ enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn, case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED: ecore_dcbx_mib_update_event(p_hwfn, p_ptt, ECORE_DCBX_OPERATIONAL_MIB); + /* clear the user-config cache */ + OSAL_MEMSET(&p_hwfn->p_dcbx_info->set, 0, + sizeof(struct ecore_dcbx_set)); + break; + case MFW_DRV_MSG_LLDP_RECEIVED_TLVS_UPDATED: + ecore_lldp_mib_update_event(p_hwfn, p_ptt); + break; + case MFW_DRV_MSG_OEM_CFG_UPDATE: + ecore_mcp_handle_ufp_event(p_hwfn, p_ptt); break; case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE: ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt); @@ -1918,6 +2084,9 @@ enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn, case MFW_DRV_MSG_BW_UPDATE: ecore_mcp_update_bw(p_hwfn, p_ptt); break; + case MFW_DRV_MSG_S_TAG_UPDATE: + ecore_mcp_update_stag(p_hwfn, p_ptt); + break; case MFW_DRV_MSG_FAILURE_DETECTED: ecore_mcp_handle_fan_failure(p_hwfn); break; @@ -2006,19 +2175,20 @@ enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *p_media_type) { + enum _ecore_status_t rc = ECORE_SUCCESS; /* TODO - Add support for VFs */ if (IS_VF(p_hwfn->p_dev)) return ECORE_INVAL; if (!ecore_mcp_is_init(p_hwfn)) { - DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n"); + DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n"); return ECORE_BUSY; } if (!p_ptt) { *p_media_type = MEDIA_UNSPECIFIED; - return ECORE_INVAL; + rc = ECORE_INVAL; } else { *p_media_type = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + @@ -2029,6 +2199,197 @@ enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn, return ECORE_SUCCESS; } +enum _ecore_status_t ecore_mcp_get_transceiver_data(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *p_tranceiver_type) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + + /* TODO - Add support for VFs */ + if (IS_VF(p_hwfn->p_dev)) + return ECORE_INVAL; + + if (!ecore_mcp_is_init(p_hwfn)) { + DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n"); + return ECORE_BUSY; + } + if (!p_ptt) { + *p_tranceiver_type = ETH_TRANSCEIVER_TYPE_NONE; + rc = ECORE_INVAL; + } else { + *p_tranceiver_type = ecore_rd(p_hwfn, p_ptt, + p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, + transceiver_data)); + } + + return rc; +} + +static int is_transceiver_ready(u32 transceiver_state, u32 transceiver_type) +{ + if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) && + ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) == 0x0) && + (transceiver_type != ETH_TRANSCEIVER_TYPE_NONE)) + return 1; + + return 0; +} + +enum _ecore_status_t ecore_mcp_trans_speed_mask(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *p_speed_mask) +{ + u32 transceiver_data, transceiver_type, transceiver_state; + + ecore_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_data); + + transceiver_state = GET_MFW_FIELD(transceiver_data, + ETH_TRANSCEIVER_STATE); + + transceiver_type = GET_MFW_FIELD(transceiver_data, + ETH_TRANSCEIVER_TYPE); + + if (is_transceiver_ready(transceiver_state, transceiver_type) == 0) + return ECORE_INVAL; + + switch (transceiver_type) { + case ETH_TRANSCEIVER_TYPE_1G_LX: + case ETH_TRANSCEIVER_TYPE_1G_SX: + case ETH_TRANSCEIVER_TYPE_1G_PCC: + case ETH_TRANSCEIVER_TYPE_1G_ACC: + case ETH_TRANSCEIVER_TYPE_1000BASET: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + break; + + case ETH_TRANSCEIVER_TYPE_10G_SR: + case ETH_TRANSCEIVER_TYPE_10G_LR: + case ETH_TRANSCEIVER_TYPE_10G_LRM: + case ETH_TRANSCEIVER_TYPE_10G_ER: + case ETH_TRANSCEIVER_TYPE_10G_PCC: + case ETH_TRANSCEIVER_TYPE_10G_ACC: + case ETH_TRANSCEIVER_TYPE_4x10G: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; + break; + + case ETH_TRANSCEIVER_TYPE_40G_LR4: + case ETH_TRANSCEIVER_TYPE_40G_SR4: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; + break; + + case ETH_TRANSCEIVER_TYPE_100G_AOC: + case ETH_TRANSCEIVER_TYPE_100G_SR4: + case ETH_TRANSCEIVER_TYPE_100G_LR4: + case ETH_TRANSCEIVER_TYPE_100G_ER4: + case ETH_TRANSCEIVER_TYPE_100G_ACC: + *p_speed_mask = + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; + break; + + case ETH_TRANSCEIVER_TYPE_25G_SR: + case ETH_TRANSCEIVER_TYPE_25G_LR: + case ETH_TRANSCEIVER_TYPE_25G_AOC: + case ETH_TRANSCEIVER_TYPE_25G_ACC_S: + case ETH_TRANSCEIVER_TYPE_25G_ACC_M: + case ETH_TRANSCEIVER_TYPE_25G_ACC_L: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; + break; + + case ETH_TRANSCEIVER_TYPE_25G_CA_N: + case ETH_TRANSCEIVER_TYPE_25G_CA_S: + case ETH_TRANSCEIVER_TYPE_25G_CA_L: + case ETH_TRANSCEIVER_TYPE_4x25G_CR: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + break; + + case ETH_TRANSCEIVER_TYPE_40G_CR4: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + break; + + case ETH_TRANSCEIVER_TYPE_100G_CR4: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR: + *p_speed_mask = + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + break; + + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC: + *p_speed_mask = + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; + break; + + case ETH_TRANSCEIVER_TYPE_XLPPI: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; + break; + + case ETH_TRANSCEIVER_TYPE_10G_BASET: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + break; + + default: + DP_INFO(p_hwfn, "Unknown transcevier type 0x%x\n", + transceiver_type); + *p_speed_mask = 0xff; + break; + } + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_mcp_get_board_config(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *p_board_config) +{ + u32 nvm_cfg_addr, nvm_cfg1_offset, port_cfg_addr; + enum _ecore_status_t rc = ECORE_SUCCESS; + + /* TODO - Add support for VFs */ + if (IS_VF(p_hwfn->p_dev)) + return ECORE_INVAL; + + if (!ecore_mcp_is_init(p_hwfn)) { + DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n"); + return ECORE_BUSY; + } + if (!p_ptt) { + *p_board_config = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED; + rc = ECORE_INVAL; + } else { + nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, + MISC_REG_GEN_PURP_CR0); + nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, + nvm_cfg_addr + 4); + port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + + offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); + *p_board_config = ecore_rd(p_hwfn, p_ptt, + port_cfg_addr + + offsetof(struct nvm_cfg1_port, + board_cfg)); + } + + return rc; +} + /* @DPDK */ /* Old MFW has a global configuration for all PFs regarding RDMA support */ static void @@ -2441,9 +2802,9 @@ ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, enum ecore_ov_client client) { - enum _ecore_status_t rc; u32 resp = 0, param = 0; u32 drv_mb_param; + enum _ecore_status_t rc; switch (client) { case ECORE_OV_CLIENT_DRV: @@ -2473,9 +2834,9 @@ ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, enum ecore_ov_driver_state drv_state) { - enum _ecore_status_t rc; u32 resp = 0, param = 0; u32 drv_mb_param; + enum _ecore_status_t rc; switch (drv_state) { case ECORE_OV_DRIVER_STATE_NOT_LOADED: @@ -2847,11 +3208,16 @@ enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn, DRV_MSG_CODE_TRANSCEIVER_READ, nvm_offset, &resp, ¶m, &buf_size, (u32 *)(p_buf + offset)); - if ((resp & FW_MSG_CODE_MASK) == - FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) { + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, false, + "Failed to send a transceiver read command to the MFW. rc = %d.\n", + rc); + return rc; + } + + if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) return ECORE_NODEV; - } else if ((resp & FW_MSG_CODE_MASK) != - FW_MSG_CODE_TRANSCEIVER_DIAG_OK) + else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK) return ECORE_UNKNOWN_ERROR; offset += buf_size; @@ -2885,11 +3251,16 @@ enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn, DRV_MSG_CODE_TRANSCEIVER_WRITE, nvm_offset, &resp, ¶m, buf_size, (u32 *)&p_buf[buf_idx]); - if ((resp & FW_MSG_CODE_MASK) == - FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) { + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, false, + "Failed to send a transceiver write command to the MFW. rc = %d.\n", + rc); + return rc; + } + + if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) return ECORE_NODEV; - } else if ((resp & FW_MSG_CODE_MASK) != - FW_MSG_CODE_TRANSCEIVER_DIAG_OK) + else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK) return ECORE_UNKNOWN_ERROR; buf_idx += buf_size; @@ -3597,8 +3968,108 @@ enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn, u32 mcp_resp, mcp_param, features; features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ | - DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE; + DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE | + DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK; return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT, features, &mcp_resp, &mcp_param); } + +enum _ecore_status_t +ecore_mcp_drv_attribute(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + struct ecore_mcp_drv_attr *p_drv_attr) +{ + struct attribute_cmd_write_stc attr_cmd_write; + enum _attribute_commands_e mfw_attr_cmd; + struct ecore_mcp_mb_params mb_params; + enum _ecore_status_t rc; + + switch (p_drv_attr->attr_cmd) { + case ECORE_MCP_DRV_ATTR_CMD_READ: + mfw_attr_cmd = ATTRIBUTE_CMD_READ; + break; + case ECORE_MCP_DRV_ATTR_CMD_WRITE: + mfw_attr_cmd = ATTRIBUTE_CMD_WRITE; + break; + case ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR: + mfw_attr_cmd = ATTRIBUTE_CMD_READ_CLEAR; + break; + case ECORE_MCP_DRV_ATTR_CMD_CLEAR: + mfw_attr_cmd = ATTRIBUTE_CMD_CLEAR; + break; + default: + DP_NOTICE(p_hwfn, false, "Unknown attribute command %d\n", + p_drv_attr->attr_cmd); + return ECORE_INVAL; + } + + OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); + mb_params.cmd = DRV_MSG_CODE_ATTRIBUTE; + SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_KEY, + p_drv_attr->attr_num); + SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_CMD, + mfw_attr_cmd); + if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_WRITE) { + OSAL_MEM_ZERO(&attr_cmd_write, sizeof(attr_cmd_write)); + attr_cmd_write.val = p_drv_attr->val; + attr_cmd_write.mask = p_drv_attr->mask; + attr_cmd_write.offset = p_drv_attr->offset; + + mb_params.p_data_src = &attr_cmd_write; + mb_params.data_src_size = sizeof(attr_cmd_write); + } + + rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc != ECORE_SUCCESS) + return rc; + + if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) { + DP_INFO(p_hwfn, + "The attribute command is not supported by the MFW\n"); + return ECORE_NOTIMPL; + } else if (mb_params.mcp_resp != FW_MSG_CODE_OK) { + DP_INFO(p_hwfn, + "Failed to send an attribute command [mcp_resp 0x%x, attr_cmd %d, attr_num %d]\n", + mb_params.mcp_resp, p_drv_attr->attr_cmd, + p_drv_attr->attr_num); + return ECORE_INVAL; + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "Attribute Command: cmd %d [mfw_cmd %d], num %d, in={val 0x%08x, mask 0x%08x, offset 0x%08x}, out={val 0x%08x}\n", + p_drv_attr->attr_cmd, mfw_attr_cmd, p_drv_attr->attr_num, + p_drv_attr->val, p_drv_attr->mask, p_drv_attr->offset, + mb_params.mcp_param); + + if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ || + p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR) + p_drv_attr->val = mb_params.mcp_param; + + return ECORE_SUCCESS; +} + +void ecore_mcp_wol_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + u32 offset, u32 val) +{ + struct ecore_mcp_mb_params mb_params = {0}; + enum _ecore_status_t rc = ECORE_SUCCESS; + u32 dword = val; + + mb_params.cmd = DRV_MSG_CODE_WRITE_WOL_REG; + mb_params.param = offset; + mb_params.p_data_src = &dword; + mb_params.data_src_size = sizeof(dword); + + rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, false, + "Failed to wol write request, rc = %d\n", rc); + } + + if (mb_params.mcp_resp != FW_MSG_CODE_WOL_READ_WRITE_OK) { + DP_NOTICE(p_hwfn, false, + "Failed to write value 0x%x to offset 0x%x [mcp_resp 0x%x]\n", + val, offset, mb_params.mcp_resp); + rc = ECORE_UNKNOWN_ERROR; + } +}