-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#include "bcm_osal.h"
#include "ecore.h"
#include "ecore_status.h"
+#include "nvm_cfg.h"
#include "ecore_mcp.h"
#include "mcp_public.h"
#include "reg_addr.h"
#include "ecore_gtt_reg_addr.h"
#include "ecore_iro.h"
#include "ecore_dcbx.h"
+#include "ecore_sp_commands.h"
+#include "ecore_cxt.h"
#define CHIP_MCP_RESP_ITER_US 10
#define EMUL_MCP_RESP_ITER_US (1000 * 1000)
OFFSETOF(struct public_drv_mb, _field))
#define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
- DRV_ID_PDA_COMP_VER_SHIFT)
+ DRV_ID_PDA_COMP_VER_OFFSET)
-#define MCP_BYTES_PER_MBIT_SHIFT 17
+#define MCP_BYTES_PER_MBIT_OFFSET 17
#ifndef ASIC_ONLY
static int loaded;
}
}
+struct ecore_mcp_cmd_elem {
+ osal_list_entry_t list;
+ struct ecore_mcp_mb_params *p_mb_params;
+ u16 expected_seq_num;
+ bool b_is_completed;
+};
+
+/* Must be called while cmd_lock is acquired */
+static struct ecore_mcp_cmd_elem *
+ecore_mcp_cmd_add_elem(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_mb_params *p_mb_params,
+ u16 expected_seq_num)
+{
+ struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
+
+ p_cmd_elem = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
+ sizeof(*p_cmd_elem));
+ if (!p_cmd_elem) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to allocate `struct ecore_mcp_cmd_elem'\n");
+ goto out;
+ }
+
+ p_cmd_elem->p_mb_params = p_mb_params;
+ p_cmd_elem->expected_seq_num = expected_seq_num;
+ OSAL_LIST_PUSH_HEAD(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
+out:
+ return p_cmd_elem;
+}
+
+/* Must be called while cmd_lock is acquired */
+static void ecore_mcp_cmd_del_elem(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_cmd_elem *p_cmd_elem)
+{
+ OSAL_LIST_REMOVE_ENTRY(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
+ OSAL_FREE(p_hwfn->p_dev, p_cmd_elem);
+}
+
+/* Must be called while cmd_lock is acquired */
+static struct ecore_mcp_cmd_elem *
+ecore_mcp_cmd_get_elem(struct ecore_hwfn *p_hwfn, u16 seq_num)
+{
+ struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
+
+ OSAL_LIST_FOR_EACH_ENTRY(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list,
+ struct ecore_mcp_cmd_elem) {
+ if (p_cmd_elem->expected_seq_num == seq_num)
+ return p_cmd_elem;
+ }
+
+ return OSAL_NULL;
+}
+
enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
{
if (p_hwfn->mcp_info) {
+ struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL, *p_tmp;
+
OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
- OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->lock);
+
+ OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
+ OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_cmd_elem, p_tmp,
+ &p_hwfn->mcp_info->cmd_list, list,
+ struct ecore_mcp_cmd_elem) {
+ ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
+ }
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
+
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->cmd_lock);
+ OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->link_lock);
+#endif
}
+
OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
return ECORE_SUCCESS;
p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
DRV_PULSE_SEQ_MASK;
- p_info->mcp_hist = (u16)ecore_rd(p_hwfn, p_ptt,
- MISCS_REG_GENERIC_POR_0);
+ p_info->mcp_hist = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
return ECORE_SUCCESS;
}
/* Allocate mcp_info structure */
p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
- sizeof(*p_hwfn->mcp_info));
- if (!p_hwfn->mcp_info)
- goto err;
+ sizeof(*p_hwfn->mcp_info));
+ if (!p_hwfn->mcp_info) {
+ DP_NOTICE(p_hwfn, false, "Failed to allocate mcp_info\n");
+ return ECORE_NOMEM;
+ }
p_info = p_hwfn->mcp_info;
+ /* Initialize the MFW spinlocks */
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock)) {
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
+ return ECORE_NOMEM;
+ }
+ if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock)) {
+ OSAL_SPIN_LOCK_DEALLOC(&p_info->cmd_lock);
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
+ return ECORE_NOMEM;
+ }
+#endif
+ OSAL_SPIN_LOCK_INIT(&p_info->cmd_lock);
+ OSAL_SPIN_LOCK_INIT(&p_info->link_lock);
+
+ OSAL_LIST_INIT(&p_info->cmd_list);
+
if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
/* Do not free mcp_info here, since public_base indicate that
if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
goto err;
- /* Initialize the MFW spinlock */
- OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->lock);
- OSAL_SPIN_LOCK_INIT(&p_info->lock);
-
return ECORE_SUCCESS;
err:
- DP_NOTICE(p_hwfn, true, "Failed to allocate mcp memory\n");
+ DP_NOTICE(p_hwfn, false, "Failed to allocate mcp memory\n");
ecore_mcp_free(p_hwfn);
return ECORE_NOMEM;
}
-/* Locks the MFW mailbox of a PF to ensure a single access.
- * The lock is achieved in most cases by holding a spinlock, causing other
- * threads to wait till a previous access is done.
- * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single
- * access is achieved by setting a blocking flag, which will fail other
- * competing contexts to send their mailboxes.
- */
-static enum _ecore_status_t ecore_mcp_mb_lock(struct ecore_hwfn *p_hwfn,
- u32 cmd)
-{
- OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
-
- /* The spinlock shouldn't be acquired when the mailbox command is
- * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel
- * pending [UN]LOAD_REQ command of another PF together with a spinlock
- * (i.e. interrupts are disabled) - can lead to a deadlock.
- * It is assumed that for a single PF, no other mailbox commands can be
- * sent from another context while sending LOAD_REQ, and that any
- * parallel commands to UNLOAD_REQ can be cancelled.
- */
- if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE)
- p_hwfn->mcp_info->block_mb_sending = false;
+static void ecore_mcp_reread_offsets(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 generic_por_0 = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
- if (p_hwfn->mcp_info->block_mb_sending) {
- DP_NOTICE(p_hwfn, false,
- "Trying to send a MFW mailbox command [0x%x]"
- " in parallel to [UN]LOAD_REQ. Aborting.\n",
- cmd);
- OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
- return ECORE_BUSY;
- }
+ /* Use MCP history register to check if MCP reset occurred between init
+ * time and now.
+ */
+ if (p_hwfn->mcp_info->mcp_hist != generic_por_0) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
+ p_hwfn->mcp_info->mcp_hist, generic_por_0);
- if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) {
- p_hwfn->mcp_info->block_mb_sending = true;
- OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
+ ecore_load_mcp_offsets(p_hwfn, p_ptt);
+ ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
}
-
- return ECORE_SUCCESS;
-}
-
-static void ecore_mcp_mb_unlock(struct ecore_hwfn *p_hwfn, u32 cmd)
-{
- if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
- OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
}
enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
- u32 delay = CHIP_MCP_RESP_ITER_US;
- u32 org_mcp_reset_seq, cnt = 0;
+ u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0;
enum _ecore_status_t rc = ECORE_SUCCESS;
#ifndef ASIC_ONLY
delay = EMUL_MCP_RESP_ITER_US;
#endif
- /* Ensure that only a single thread is accessing the mailbox at a
- * certain time.
- */
- rc = ecore_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
- if (rc != ECORE_SUCCESS)
- return rc;
+ if (p_hwfn->mcp_info->b_block_cmd) {
+ DP_NOTICE(p_hwfn, false,
+ "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
+ return ECORE_ABORTED;
+ }
+
+ /* Ensure that only a single thread is accessing the mailbox */
+ OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
- /* Set drv command along with the updated sequence */
org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
+
+ /* Set drv command along with the updated sequence */
+ ecore_mcp_reread_offsets(p_hwfn, p_ptt);
+ seq = ++p_hwfn->mcp_info->drv_mb_seq;
DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
do {
rc = ECORE_AGAIN;
}
- ecore_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
return rc;
}
-static enum _ecore_status_t ecore_do_mcp_cmd(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u32 cmd, u32 param,
- u32 *o_mcp_resp,
- u32 *o_mcp_param)
+/* Must be called while cmd_lock is acquired */
+static bool ecore_mcp_has_pending_cmd(struct ecore_hwfn *p_hwfn)
{
- u32 delay = CHIP_MCP_RESP_ITER_US;
- u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
- u32 seq, cnt = 1, actual_mb_seq;
+ struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
+
+ /* There is at most one pending command at a certain time, and if it
+ * exists - it is placed at the HEAD of the list.
+ */
+ if (!OSAL_LIST_IS_EMPTY(&p_hwfn->mcp_info->cmd_list)) {
+ p_cmd_elem = OSAL_LIST_FIRST_ENTRY(&p_hwfn->mcp_info->cmd_list,
+ struct ecore_mcp_cmd_elem,
+ list);
+ return !p_cmd_elem->b_is_completed;
+ }
+
+ return false;
+}
+
+/* Must be called while cmd_lock is acquired */
+static enum _ecore_status_t
+ecore_mcp_update_pending_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ struct ecore_mcp_mb_params *p_mb_params;
+ struct ecore_mcp_cmd_elem *p_cmd_elem;
+ u32 mcp_resp;
+ u16 seq_num;
+
+ mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
+ seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK);
+
+ /* Return if no new non-handled response has been received */
+ if (seq_num != p_hwfn->mcp_info->drv_mb_seq)
+ return ECORE_AGAIN;
+
+ p_cmd_elem = ecore_mcp_cmd_get_elem(p_hwfn, seq_num);
+ if (!p_cmd_elem) {
+ DP_ERR(p_hwfn,
+ "Failed to find a pending mailbox cmd that expects sequence number %d\n",
+ seq_num);
+ return ECORE_UNKNOWN_ERROR;
+ }
+
+ p_mb_params = p_cmd_elem->p_mb_params;
+
+ /* Get the MFW response along with the sequence number */
+ p_mb_params->mcp_resp = mcp_resp;
+
+ /* Get the MFW param */
+ p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
+
+ /* Get the union data */
+ if (p_mb_params->p_data_dst != OSAL_NULL &&
+ p_mb_params->data_dst_size) {
+ u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
+ OFFSETOF(struct public_drv_mb,
+ union_data);
+ ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
+ union_data_addr, p_mb_params->data_dst_size);
+ }
+
+ p_cmd_elem->b_is_completed = true;
+
+ return ECORE_SUCCESS;
+}
+
+/* Must be called while cmd_lock is acquired */
+static void __ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_mcp_mb_params *p_mb_params,
+ u16 seq_num)
+{
+ union drv_union_data union_data;
+ u32 union_data_addr;
+
+ /* Set the union data */
+ union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
+ OFFSETOF(struct public_drv_mb, union_data);
+ OSAL_MEM_ZERO(&union_data, sizeof(union_data));
+ if (p_mb_params->p_data_src != OSAL_NULL && p_mb_params->data_src_size)
+ OSAL_MEMCPY(&union_data, p_mb_params->p_data_src,
+ p_mb_params->data_src_size);
+ ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
+ sizeof(union_data));
+
+ /* Set the drv param */
+ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param);
+
+ /* Set the drv command along with the sequence number */
+ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "MFW mailbox: command 0x%08x param 0x%08x\n",
+ (p_mb_params->cmd | seq_num), p_mb_params->param);
+}
+
+static void ecore_mcp_cmd_set_blocking(struct ecore_hwfn *p_hwfn,
+ bool block_cmd)
+{
+ p_hwfn->mcp_info->b_block_cmd = block_cmd;
+
+ DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n",
+ block_cmd ? "Block" : "Unblock");
+}
+
+void ecore_mcp_print_cpu_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
+
+ cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
+ cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
+ cpu_pc_0 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
+ OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
+ cpu_pc_1 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
+ OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
+ cpu_pc_2 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
+
+ DP_NOTICE(p_hwfn, false,
+ "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
+ cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
+}
+
+static enum _ecore_status_t
+_ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_mcp_mb_params *p_mb_params,
+ u32 max_retries, u32 delay)
+{
+ struct ecore_mcp_cmd_elem *p_cmd_elem;
+ u32 cnt = 0;
+ u16 seq_num;
enum _ecore_status_t rc = ECORE_SUCCESS;
-#ifndef ASIC_ONLY
- if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
- delay = EMUL_MCP_RESP_ITER_US;
- /* There is a built-in delay of 100usec in each MFW response read */
- if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
- max_retries /= 10;
-#endif
+ /* Wait until the mailbox is non-occupied */
+ do {
+ /* Exit the loop if there is no pending command, or if the
+ * pending command is completed during this iteration.
+ * The spinlock stays locked until the command is sent.
+ */
- /* Get actual driver mailbox sequence */
- actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
- DRV_MSG_SEQ_NUMBER_MASK;
+ OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
- /* Use MCP history register to check if MCP reset occurred between
- * init time and now.
- */
- if (p_hwfn->mcp_info->mcp_hist !=
- ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
- DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Rereading MCP offsets\n");
- ecore_load_mcp_offsets(p_hwfn, p_ptt);
- ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
+ if (!ecore_mcp_has_pending_cmd(p_hwfn))
+ break;
+
+ rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt);
+ if (rc == ECORE_SUCCESS)
+ break;
+ else if (rc != ECORE_AGAIN)
+ goto err;
+
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
+ OSAL_UDELAY(delay);
+ OSAL_MFW_CMD_PREEMPT(p_hwfn);
+ } while (++cnt < max_retries);
+
+ if (cnt >= max_retries) {
+ DP_NOTICE(p_hwfn, false,
+ "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
+ p_mb_params->cmd, p_mb_params->param);
+ return ECORE_AGAIN;
}
- seq = ++p_hwfn->mcp_info->drv_mb_seq;
- /* Set drv param */
- DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
+ /* Send the mailbox command */
+ ecore_mcp_reread_offsets(p_hwfn, p_ptt);
+ seq_num = ++p_hwfn->mcp_info->drv_mb_seq;
+ p_cmd_elem = ecore_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num);
+ if (!p_cmd_elem) {
+ rc = ECORE_NOMEM;
+ goto err;
+ }
- /* Set drv command along with the updated sequence */
- DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
+ __ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num);
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
+ /* Wait for the MFW response */
do {
- /* Wait for MFW response */
+ /* Exit the loop if the command is already completed, or if the
+ * command is completed during this iteration.
+ * The spinlock stays locked until the list element is removed.
+ */
+
OSAL_UDELAY(delay);
- *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
+ OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
- /* Give the FW up to 5 second (500*10ms) */
- } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
- (cnt++ < max_retries));
+ if (p_cmd_elem->b_is_completed)
+ break;
- /* Is this a reply to our command? */
- if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
- *o_mcp_resp &= FW_MSG_CODE_MASK;
- /* Get the MCP param */
- *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
- } else {
- /* FW BUG! */
- DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
- cmd, param);
- *o_mcp_resp = 0;
- rc = ECORE_AGAIN;
+ rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt);
+ if (rc == ECORE_SUCCESS)
+ break;
+ else if (rc != ECORE_AGAIN)
+ goto err;
+
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
+ OSAL_MFW_CMD_PREEMPT(p_hwfn);
+ } while (++cnt < max_retries);
+
+ if (cnt >= max_retries) {
+ DP_NOTICE(p_hwfn, false,
+ "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
+ p_mb_params->cmd, p_mb_params->param);
+ ecore_mcp_print_cpu_info(p_hwfn, p_ptt);
+
+ OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
+ ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
+
+ ecore_mcp_cmd_set_blocking(p_hwfn, true);
ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
+ return ECORE_AGAIN;
}
+
+ ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
+ p_mb_params->mcp_resp, p_mb_params->mcp_param,
+ (cnt * delay) / 1000, (cnt * delay) % 1000);
+
+ /* Clear the sequence number from the MFW response */
+ p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
+
+ return ECORE_SUCCESS;
+
+err:
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
return rc;
}
struct ecore_ptt *p_ptt,
struct ecore_mcp_mb_params *p_mb_params)
{
- u32 union_data_addr;
- enum _ecore_status_t rc;
+ osal_size_t union_data_size = sizeof(union drv_union_data);
+ u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
+ u32 delay = CHIP_MCP_RESP_ITER_US;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+ delay = EMUL_MCP_RESP_ITER_US;
+ /* There is a built-in delay of 100usec in each MFW response read */
+ if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
+ max_retries /= 10;
+#endif
/* MCP not initialized */
if (!ecore_mcp_is_init(p_hwfn)) {
- DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
+ DP_NOTICE(p_hwfn, true, "MFW is not initialized!\n");
return ECORE_BUSY;
}
- union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
- OFFSETOF(struct public_drv_mb, union_data);
-
- /* Ensure that only a single thread is accessing the mailbox at a
- * certain time.
- */
- rc = ecore_mcp_mb_lock(p_hwfn, p_mb_params->cmd);
- if (rc != ECORE_SUCCESS)
- return rc;
-
- if (p_mb_params->p_data_src != OSAL_NULL)
- ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr,
- p_mb_params->p_data_src,
- sizeof(*p_mb_params->p_data_src));
-
- rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
- p_mb_params->param, &p_mb_params->mcp_resp,
- &p_mb_params->mcp_param);
-
- if (p_mb_params->p_data_dst != OSAL_NULL)
- ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
- union_data_addr,
- sizeof(*p_mb_params->p_data_dst));
+ if (p_mb_params->data_src_size > union_data_size ||
+ p_mb_params->data_dst_size > union_data_size) {
+ DP_ERR(p_hwfn,
+ "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
+ p_mb_params->data_src_size, p_mb_params->data_dst_size,
+ union_data_size);
+ return ECORE_INVAL;
+ }
- ecore_mcp_mb_unlock(p_hwfn, p_mb_params->cmd);
+ if (p_hwfn->mcp_info->b_block_cmd) {
+ DP_NOTICE(p_hwfn, false,
+ "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
+ p_mb_params->cmd, p_mb_params->param);
+ return ECORE_ABORTED;
+ }
- return rc;
+ return _ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
+ delay);
}
enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
u32 i_txn_size, u32 *i_buf)
{
struct ecore_mcp_mb_params mb_params;
- union drv_union_data union_data;
enum _ecore_status_t rc;
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
mb_params.cmd = cmd;
mb_params.param = param;
- OSAL_MEMCPY((u32 *)&union_data.raw_data, i_buf, i_txn_size);
- mb_params.p_data_src = &union_data;
+ mb_params.p_data_src = i_buf;
+ mb_params.data_src_size = (u8)i_txn_size;
rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc != ECORE_SUCCESS)
return rc;
u32 *o_txn_size, u32 *o_buf)
{
struct ecore_mcp_mb_params mb_params;
- union drv_union_data union_data;
+ u8 raw_data[MCP_DRV_NVM_BUF_LEN];
enum _ecore_status_t rc;
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
mb_params.cmd = cmd;
mb_params.param = param;
- mb_params.p_data_dst = &union_data;
+ mb_params.p_data_dst = raw_data;
+
+ /* Use the maximal value since the actual one is part of the response */
+ mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
+
rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc != ECORE_SUCCESS)
return rc;
*o_mcp_param = mb_params.mcp_param;
*o_txn_size = *o_mcp_param;
- OSAL_MEMCPY(o_buf, (u32 *)&union_data.raw_data, *o_txn_size);
+ /* @DPDK */
+ OSAL_MEMCPY(o_buf, raw_data, RTE_MIN(*o_txn_size, MCP_DRV_NVM_BUF_LEN));
return ECORE_SUCCESS;
}
load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
/* On CMT, always tell that it's engine */
- if (p_hwfn->p_dev->num_hwfns > 1)
+ if (ECORE_IS_CMT(p_hwfn->p_dev))
load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
*p_load_code = load_phase;
}
#endif
-static bool ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role)
+static bool
+ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role,
+ enum ecore_override_force_load override_force_load)
{
- return (drv_role == DRV_ROLE_OS &&
- exist_drv_role == DRV_ROLE_PREBOOT) ||
- (drv_role == DRV_ROLE_KDUMP && exist_drv_role == DRV_ROLE_OS);
+ bool can_force_load = false;
+
+ switch (override_force_load) {
+ case ECORE_OVERRIDE_FORCE_LOAD_ALWAYS:
+ can_force_load = true;
+ break;
+ case ECORE_OVERRIDE_FORCE_LOAD_NEVER:
+ can_force_load = false;
+ break;
+ default:
+ can_force_load = (drv_role == DRV_ROLE_OS &&
+ exist_drv_role == DRV_ROLE_PREBOOT) ||
+ (drv_role == DRV_ROLE_KDUMP &&
+ exist_drv_role == DRV_ROLE_OS);
+ break;
+ }
+
+ return can_force_load;
}
static enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn,
struct ecore_load_req_in_params *p_in_params,
struct ecore_load_req_out_params *p_out_params)
{
- union drv_union_data union_data_src, union_data_dst;
struct ecore_mcp_mb_params mb_params;
- struct load_req_stc *p_load_req;
- struct load_rsp_stc *p_load_rsp;
+ struct load_req_stc load_req;
+ struct load_rsp_stc load_rsp;
u32 hsi_ver;
enum _ecore_status_t rc;
- p_load_req = &union_data_src.load_req;
- OSAL_MEM_ZERO(p_load_req, sizeof(*p_load_req));
- p_load_req->drv_ver_0 = p_in_params->drv_ver_0;
- p_load_req->drv_ver_1 = p_in_params->drv_ver_1;
- p_load_req->fw_ver = p_in_params->fw_ver;
- ECORE_MFW_SET_FIELD(p_load_req->misc0, LOAD_REQ_ROLE,
- p_in_params->drv_role);
- ECORE_MFW_SET_FIELD(p_load_req->misc0, LOAD_REQ_LOCK_TO,
- p_in_params->timeout_val);
- ECORE_MFW_SET_FIELD(p_load_req->misc0, LOAD_REQ_FORCE,
- p_in_params->force_cmd);
- ECORE_MFW_SET_FIELD(p_load_req->misc0, LOAD_REQ_FLAGS0,
- p_in_params->avoid_eng_reset);
+ OSAL_MEM_ZERO(&load_req, sizeof(load_req));
+ load_req.drv_ver_0 = p_in_params->drv_ver_0;
+ load_req.drv_ver_1 = p_in_params->drv_ver_1;
+ load_req.fw_ver = p_in_params->fw_ver;
+ SET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role);
+ SET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
+ p_in_params->timeout_val);
+ SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE, p_in_params->force_cmd);
+ SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
+ p_in_params->avoid_eng_reset);
hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ?
DRV_ID_MCP_HSI_VER_CURRENT :
- (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT);
+ (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_OFFSET);
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
mb_params.param = PDA_COMP | hsi_ver | p_hwfn->p_dev->drv_type;
- mb_params.p_data_src = &union_data_src;
- mb_params.p_data_dst = &union_data_dst;
+ mb_params.p_data_src = &load_req;
+ mb_params.data_src_size = sizeof(load_req);
+ mb_params.p_data_dst = &load_rsp;
+ mb_params.data_dst_size = sizeof(load_rsp);
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
mb_params.param,
- ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
- ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
- ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
- ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
+ GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
+ GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
+ GET_MFW_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
+ GET_MFW_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1)
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
- p_load_req->drv_ver_0, p_load_req->drv_ver_1,
- p_load_req->fw_ver, p_load_req->misc0,
- ECORE_MFW_GET_FIELD(p_load_req->misc0,
- LOAD_REQ_ROLE),
- ECORE_MFW_GET_FIELD(p_load_req->misc0,
- LOAD_REQ_LOCK_TO),
- ECORE_MFW_GET_FIELD(p_load_req->misc0,
- LOAD_REQ_FORCE),
- ECORE_MFW_GET_FIELD(p_load_req->misc0,
- LOAD_REQ_FLAGS0));
+ load_req.drv_ver_0, load_req.drv_ver_1,
+ load_req.fw_ver, load_req.misc0,
+ GET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE),
+ GET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO),
+ GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE),
+ GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0));
rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc != ECORE_SUCCESS) {
if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
- p_load_rsp = &union_data_dst.load_rsp;
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
- p_load_rsp->drv_ver_0, p_load_rsp->drv_ver_1,
- p_load_rsp->fw_ver, p_load_rsp->misc0,
- ECORE_MFW_GET_FIELD(p_load_rsp->misc0,
- LOAD_RSP_ROLE),
- ECORE_MFW_GET_FIELD(p_load_rsp->misc0,
- LOAD_RSP_HSI),
- ECORE_MFW_GET_FIELD(p_load_rsp->misc0,
- LOAD_RSP_FLAGS0));
-
- p_out_params->exist_drv_ver_0 = p_load_rsp->drv_ver_0;
- p_out_params->exist_drv_ver_1 = p_load_rsp->drv_ver_1;
- p_out_params->exist_fw_ver = p_load_rsp->fw_ver;
+ load_rsp.drv_ver_0, load_rsp.drv_ver_1,
+ load_rsp.fw_ver, load_rsp.misc0,
+ GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
+ GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
+ GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0));
+
+ p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
+ p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
+ p_out_params->exist_fw_ver = load_rsp.fw_ver;
p_out_params->exist_drv_role =
- ECORE_MFW_GET_FIELD(p_load_rsp->misc0, LOAD_RSP_ROLE);
+ GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
p_out_params->mfw_hsi_ver =
- ECORE_MFW_GET_FIELD(p_load_rsp->misc0, LOAD_RSP_HSI);
+ GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
p_out_params->drv_exists =
- ECORE_MFW_GET_FIELD(p_load_rsp->misc0,
- LOAD_RSP_FLAGS0) &
+ GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
LOAD_RSP_FLAGS0_DRV_EXISTS;
}
return ECORE_SUCCESS;
}
-static enum _ecore_status_t eocre_get_mfw_drv_role(struct ecore_hwfn *p_hwfn,
- enum ecore_drv_role drv_role,
- u8 *p_mfw_drv_role)
+static void ecore_get_mfw_drv_role(enum ecore_drv_role drv_role,
+ u8 *p_mfw_drv_role)
{
switch (drv_role) {
case ECORE_DRV_ROLE_OS:
case ECORE_DRV_ROLE_KDUMP:
*p_mfw_drv_role = DRV_ROLE_KDUMP;
break;
- default:
- DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role);
- return ECORE_INVAL;
}
-
- return ECORE_SUCCESS;
}
enum ecore_load_req_force {
ECORE_LOAD_REQ_FORCE_ALL,
};
-static enum _ecore_status_t
-ecore_get_mfw_force_cmd(struct ecore_hwfn *p_hwfn,
- enum ecore_load_req_force force_cmd,
- u8 *p_mfw_force_cmd)
+static void ecore_get_mfw_force_cmd(enum ecore_load_req_force force_cmd,
+ u8 *p_mfw_force_cmd)
{
switch (force_cmd) {
case ECORE_LOAD_REQ_FORCE_NONE:
case ECORE_LOAD_REQ_FORCE_ALL:
*p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
break;
- default:
- DP_ERR(p_hwfn, "Unexpected force value %d\n", force_cmd);
- return ECORE_INVAL;
}
-
- return ECORE_SUCCESS;
}
enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
{
struct ecore_load_req_out_params out_params;
struct ecore_load_req_in_params in_params;
- u8 mfw_drv_role, mfw_force_cmd;
+ u8 mfw_drv_role = 0, mfw_force_cmd;
enum _ecore_status_t rc;
#ifndef ASIC_ONLY
in_params.drv_ver_0 = ECORE_VERSION;
in_params.drv_ver_1 = ecore_get_config_bitmap();
in_params.fw_ver = STORM_FW_VERSION;
- rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
- if (rc != ECORE_SUCCESS)
- return rc;
-
+ ecore_get_mfw_drv_role(p_params->drv_role, &mfw_drv_role);
in_params.drv_role = mfw_drv_role;
in_params.timeout_val = p_params->timeout_val;
- rc = ecore_get_mfw_force_cmd(p_hwfn, ECORE_LOAD_REQ_FORCE_NONE,
- &mfw_force_cmd);
- if (rc != ECORE_SUCCESS)
- return rc;
-
+ ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_NONE, &mfw_force_cmd);
in_params.force_cmd = mfw_force_cmd;
in_params.avoid_eng_reset = p_params->avoid_eng_reset;
DP_INFO(p_hwfn,
"MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n");
- /* The previous load request set the mailbox blocking */
- p_hwfn->mcp_info->block_mb_sending = false;
-
in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1;
OSAL_MEM_ZERO(&out_params, sizeof(out_params));
rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
return rc;
} else if (out_params.load_code ==
FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
- /* The previous load request set the mailbox blocking */
- p_hwfn->mcp_info->block_mb_sending = false;
-
if (ecore_mcp_can_force_load(in_params.drv_role,
- out_params.exist_drv_role)) {
+ out_params.exist_drv_role,
+ p_params->override_force_load)) {
DP_INFO(p_hwfn,
- "A force load is required [existing: role %d, fw_ver 0x%08x, drv_ver 0x%08x_0x%08x]. Sending a force load request.\n",
+ "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, 0x%08x_%08x}, existing={%d, 0x%08x, 0x%08x_%08x}]\n",
+ in_params.drv_role, in_params.fw_ver,
+ in_params.drv_ver_0, in_params.drv_ver_1,
out_params.exist_drv_role,
out_params.exist_fw_ver,
out_params.exist_drv_ver_0,
out_params.exist_drv_ver_1);
- rc = ecore_get_mfw_force_cmd(p_hwfn,
- ECORE_LOAD_REQ_FORCE_ALL,
- &mfw_force_cmd);
- if (rc != ECORE_SUCCESS)
- return rc;
+ ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_ALL,
+ &mfw_force_cmd);
in_params.force_cmd = mfw_force_cmd;
OSAL_MEM_ZERO(&out_params, sizeof(out_params));
return rc;
} else {
DP_NOTICE(p_hwfn, false,
- "A force load is required [existing: role %d, fw_ver 0x%08x, drv_ver 0x%08x_0x%08x]. Avoiding to prevent disruption of active PFs.\n",
+ "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
+ in_params.drv_role, in_params.fw_ver,
+ in_params.drv_ver_0, in_params.drv_ver_1,
out_params.exist_drv_role,
out_params.exist_fw_ver,
out_params.exist_drv_ver_0,
return ECORE_INVAL;
}
break;
- case FW_MSG_CODE_DRV_LOAD_REFUSED_PDA:
- case FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG:
- case FW_MSG_CODE_DRV_LOAD_REFUSED_HSI:
- case FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT:
- DP_NOTICE(p_hwfn, false,
- "MFW refused a load request [resp 0x%08x]. Aborting.\n",
- out_params.load_code);
- return ECORE_BUSY;
default:
DP_NOTICE(p_hwfn, false,
- "Unexpected response to load request [resp 0x%08x]. Aborting.\n",
+ "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
out_params.load_code);
- break;
+ return ECORE_BUSY;
}
p_params->load_code = out_params.load_code;
return ECORE_SUCCESS;
}
+enum _ecore_status_t ecore_mcp_load_done(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 resp = 0, param = 0;
+ enum _ecore_status_t rc;
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp,
+ ¶m);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to send a LOAD_DONE command, rc = %d\n", rc);
+ return rc;
+ }
+
+ /* Check if there is a DID mismatch between nvm-cfg/efuse */
+ if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
+ DP_NOTICE(p_hwfn, false,
+ "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 wol_param, mcp_resp, mcp_param;
+
+ /* @DPDK */
+ wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
+
+ return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param,
+ &mcp_resp, &mcp_param);
+}
+
+enum _ecore_status_t ecore_mcp_unload_done(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_mcp_mb_params mb_params;
+ struct mcp_mac wol_mac;
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
+
+ return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+}
+
static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
u32 func_addr = SECTION_ADDR(mfw_func_offsize,
MCP_PF_ID(p_hwfn));
struct ecore_mcp_mb_params mb_params;
- union drv_union_data union_data;
enum _ecore_status_t rc;
int i;
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
- OSAL_MEMCPY(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8);
- mb_params.p_data_src = &union_data;
+ mb_params.p_data_src = vfs_to_ack;
+ mb_params.data_src_size = VF_MAX_STATIC / 8;
rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt,
&mb_params);
if (rc != ECORE_SUCCESS) {
OFFSETOF(struct public_port,
transceiver_data)));
- transceiver_state = GET_FIELD(transceiver_state, ETH_TRANSCEIVER_STATE);
+ transceiver_state = GET_MFW_FIELD(transceiver_state,
+ ETH_TRANSCEIVER_STATE);
if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
else
DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
+
+ OSAL_TRANSCEIVER_UPDATE(p_hwfn);
+}
+
+static void ecore_mcp_read_eee_config(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_mcp_link_state *p_link)
+{
+ u32 eee_status, val;
+
+ p_link->eee_adv_caps = 0;
+ p_link->eee_lp_adv_caps = 0;
+ eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
+ OFFSETOF(struct public_port, eee_status));
+ p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT);
+ val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET;
+ if (val & EEE_1G_ADV)
+ p_link->eee_adv_caps |= ECORE_EEE_1G_ADV;
+ if (val & EEE_10G_ADV)
+ p_link->eee_adv_caps |= ECORE_EEE_10G_ADV;
+ val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET;
+ if (val & EEE_1G_ADV)
+ p_link->eee_lp_adv_caps |= ECORE_EEE_1G_ADV;
+ if (val & EEE_10G_ADV)
+ p_link->eee_lp_adv_caps |= ECORE_EEE_10G_ADV;
+}
+
+static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct public_func *p_data,
+ int pfid)
+{
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_FUNC);
+ u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
+ u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
+ u32 i, size;
+
+ OSAL_MEM_ZERO(p_data, sizeof(*p_data));
+
+ size = OSAL_MIN_T(u32, sizeof(*p_data),
+ SECTION_SIZE(mfw_path_offsize));
+ for (i = 0; i < size / sizeof(u32); i++)
+ ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
+ func_addr + (i << 2));
+
+ return size;
}
static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
u8 max_bw, min_bw;
u32 status = 0;
+ /* Prevent SW/attentions from doing this at the same time */
+ OSAL_SPIN_LOCK(&p_hwfn->mcp_info->link_lock);
+
p_link = &p_hwfn->mcp_info->link_output;
OSAL_MEMSET(p_link, 0, sizeof(*p_link));
if (!b_reset) {
} else {
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
"Resetting link indications\n");
- return;
+ goto out;
}
- if (p_hwfn->b_drv_link_init)
- p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
- else
+ if (p_hwfn->b_drv_link_init) {
+ /* Link indication with modern MFW arrives as per-PF
+ * indication.
+ */
+ if (p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_VLINK) {
+ struct public_func shmem_info;
+
+ ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+ MCP_PF_ID(p_hwfn));
+ p_link->link_up = !!(shmem_info.status &
+ FUNC_STATUS_VIRTUAL_LINK_UP);
+ } else {
+ p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
+ }
+ } else {
p_link->link_up = false;
+ }
p_link->full_duplex = true;
switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
__ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
p_link, max_bw);
- /* Mintz bandwidth configuration */
+ /* Min bandwidth configuration */
__ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
p_link, min_bw);
- ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev,
+ ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev, p_ptt,
p_link->min_pf_rate);
p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
+ if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
+ ecore_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
+
OSAL_LINK_UPDATE(p_hwfn);
+out:
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->link_lock);
}
enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
{
struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
struct ecore_mcp_mb_params mb_params;
- union drv_union_data union_data;
- struct eth_phy_cfg *p_phy_cfg;
+ struct eth_phy_cfg phy_cfg;
enum _ecore_status_t rc = ECORE_SUCCESS;
u32 cmd;
#endif
/* Set the shmem configuration according to params */
- p_phy_cfg = &union_data.drv_phy_cfg;
- OSAL_MEMSET(p_phy_cfg, 0, sizeof(*p_phy_cfg));
+ OSAL_MEM_ZERO(&phy_cfg, sizeof(phy_cfg));
cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
if (!params->speed.autoneg)
- p_phy_cfg->speed = params->speed.forced_speed;
- p_phy_cfg->pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
- p_phy_cfg->pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
- p_phy_cfg->pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
- p_phy_cfg->adv_speed = params->speed.advertised_speeds;
- p_phy_cfg->loopback_mode = params->loopback_mode;
+ phy_cfg.speed = params->speed.forced_speed;
+ phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
+ phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
+ phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
+ phy_cfg.adv_speed = params->speed.advertised_speeds;
+ phy_cfg.loopback_mode = params->loopback_mode;
+
+ /* There are MFWs that share this capability regardless of whether
+ * this is feasible or not. And given that at the very least adv_caps
+ * would be set internally by ecore, we want to make sure LFA would
+ * still work.
+ */
+ if ((p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_EEE) &&
+ params->eee.enable) {
+ phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
+ if (params->eee.tx_lpi_enable)
+ phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
+ if (params->eee.adv_caps & ECORE_EEE_1G_ADV)
+ phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G;
+ if (params->eee.adv_caps & ECORE_EEE_10G_ADV)
+ phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G;
+ phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer <<
+ EEE_TX_TIMER_USEC_OFFSET) &
+ EEE_TX_TIMER_USEC_MASK;
+ }
+
p_hwfn->b_drv_link_init = b_up;
if (b_up)
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
- "Configuring Link: Speed 0x%08x, Pause 0x%08x,"
- " adv_speed 0x%08x, loopback 0x%08x\n",
- p_phy_cfg->speed, p_phy_cfg->pause,
- p_phy_cfg->adv_speed, p_phy_cfg->loopback_mode);
+ "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x\n",
+ phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed,
+ phy_cfg.loopback_mode);
else
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
mb_params.cmd = cmd;
- mb_params.p_data_src = &union_data;
+ mb_params.p_data_src = &phy_cfg;
+ mb_params.data_src_size = sizeof(phy_cfg);
rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
/* if mcp fails to respond we must abort */
return rc;
}
- /* Reset the link status if needed */
- if (!b_up)
- ecore_mcp_handle_link_change(p_hwfn, p_ptt, true);
+ /* Mimic link-change attention, done for several reasons:
+ * - On reset, there's no guarantee MFW would trigger
+ * an attention.
+ * - On initialization, older MFWs might not indicate link change
+ * during LFA, so we'll never get an UP indication.
+ */
+ ecore_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
- return rc;
+ return ECORE_SUCCESS;
}
u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
enum ecore_mcp_protocol_type stats_type;
union ecore_mcp_protocol_stats stats;
struct ecore_mcp_mb_params mb_params;
- union drv_union_data union_data;
u32 hsi_param;
enum _ecore_status_t rc;
hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
break;
default:
- DP_INFO(p_hwfn, "Invalid protocol type %d\n", type);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Invalid protocol type %d\n", type);
return;
}
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_GET_STATS;
mb_params.param = hsi_param;
- OSAL_MEMCPY(&union_data, &stats, sizeof(stats));
- mb_params.p_data_src = &union_data;
+ mb_params.p_data_src = &stats;
+ mb_params.data_src_size = sizeof(stats);
rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc != ECORE_SUCCESS)
DP_ERR(p_hwfn, "Failed to send protocol stats, rc = %d\n", rc);
*/
p_info->bandwidth_min = (p_shmem_info->config &
FUNC_MF_CFG_MIN_BW_MASK) >>
- FUNC_MF_CFG_MIN_BW_SHIFT;
+ FUNC_MF_CFG_MIN_BW_OFFSET;
if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
DP_INFO(p_hwfn,
"bandwidth minimum out of bounds [%02x]. Set to 1\n",
p_info->bandwidth_max = (p_shmem_info->config &
FUNC_MF_CFG_MAX_BW_MASK) >>
- FUNC_MF_CFG_MAX_BW_SHIFT;
+ FUNC_MF_CFG_MAX_BW_OFFSET;
if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
DP_INFO(p_hwfn,
"bandwidth maximum out of bounds [%02x]. Set to 100\n",
}
}
-static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct public_func *p_data,
- int pfid)
-{
- u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
- PUBLIC_FUNC);
- u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
- u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
- u32 i, size;
-
- OSAL_MEM_ZERO(p_data, sizeof(*p_data));
-
- size = OSAL_MIN_T(u32, sizeof(*p_data),
- SECTION_SIZE(mfw_path_offsize));
- for (i = 0; i < size / sizeof(u32); i++)
- ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
- func_addr + (i << 2));
-
- return size;
-}
-
static void
ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
{
¶m);
}
-static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
+static void ecore_mcp_update_stag(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct public_func shmem_info;
+ u32 resp = 0, param = 0;
+
+ ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+ MCP_PF_ID(p_hwfn));
+
+ p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
+ FUNC_MF_CFG_OV_STAG_MASK;
+ p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
+ if (OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS, &p_hwfn->p_dev->mf_bits)) {
+ if (p_hwfn->hw_info.ovlan != ECORE_MCP_VLAN_UNSET) {
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE,
+ p_hwfn->hw_info.ovlan);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 1);
+
+ /* Configure DB to add external vlan to EDPM packets */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2,
+ p_hwfn->hw_info.ovlan);
+ } else {
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 0);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, 0);
+
+ /* Configure DB to add external vlan to EDPM packets */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 0);
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, 0);
+ }
+
+ ecore_sp_pf_update_stag(p_hwfn);
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "ovlan = %d hw_mode = 0x%x\n",
+ p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode);
+ OSAL_HW_INFO_CHANGE(p_hwfn, ECORE_HW_INFO_CHANGE_OVLAN);
+
+ /* Acknowledge the MFW */
+ ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
+ &resp, ¶m);
+}
+
+static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn)
{
/* A single notification should be sent to upper driver in CMT mode */
if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
}
+struct ecore_mdump_cmd_params {
+ u32 cmd;
+ void *p_data_src;
+ u8 data_src_size;
+ void *p_data_dst;
+ u8 data_dst_size;
+ u32 mcp_resp;
+};
+
static enum _ecore_status_t
ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
- u32 mdump_cmd, union drv_union_data *p_data_src,
- union drv_union_data *p_data_dst, u32 *p_mcp_resp)
+ struct ecore_mdump_cmd_params *p_mdump_cmd_params)
{
struct ecore_mcp_mb_params mb_params;
enum _ecore_status_t rc;
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
- mb_params.param = mdump_cmd;
- mb_params.p_data_src = p_data_src;
- mb_params.p_data_dst = p_data_dst;
+ mb_params.param = p_mdump_cmd_params->cmd;
+ mb_params.p_data_src = p_mdump_cmd_params->p_data_src;
+ mb_params.data_src_size = p_mdump_cmd_params->data_src_size;
+ mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst;
+ mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size;
rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc != ECORE_SUCCESS)
return rc;
- *p_mcp_resp = mb_params.mcp_resp;
- if (*p_mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
- DP_NOTICE(p_hwfn, false,
- "MFW claims that the mdump command is illegal [mdump_cmd 0x%x]\n",
- mdump_cmd);
- rc = ECORE_INVAL;
+ p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
+
+ if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
+ DP_INFO(p_hwfn,
+ "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n",
+ p_mdump_cmd_params->cmd);
+ rc = ECORE_NOTIMPL;
+ } else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The mdump command is not supported by the MFW\n");
+ rc = ECORE_NOTIMPL;
}
return rc;
static enum _ecore_status_t ecore_mcp_mdump_ack(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- u32 mcp_resp;
+ struct ecore_mdump_cmd_params mdump_cmd_params;
- return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_ACK,
- OSAL_NULL, OSAL_NULL, &mcp_resp);
+ OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK;
+
+ return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
}
enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 epoch)
{
- union drv_union_data union_data;
- u32 mcp_resp;
+ struct ecore_mdump_cmd_params mdump_cmd_params;
- OSAL_MEMCPY(&union_data.raw_data, &epoch, sizeof(epoch));
+ OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_SET_VALUES;
+ mdump_cmd_params.p_data_src = &epoch;
+ mdump_cmd_params.data_src_size = sizeof(epoch);
- return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_SET_VALUES,
- &union_data, OSAL_NULL, &mcp_resp);
+ return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
}
enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- u32 mcp_resp;
+ struct ecore_mdump_cmd_params mdump_cmd_params;
- p_hwfn->p_dev->mdump_en = true;
+ OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_TRIGGER;
- return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_TRIGGER,
- OSAL_NULL, OSAL_NULL, &mcp_resp);
+ return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
}
static enum _ecore_status_t
ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct mdump_config_stc *p_mdump_config)
{
- union drv_union_data union_data;
- u32 mcp_resp;
+ struct ecore_mdump_cmd_params mdump_cmd_params;
enum _ecore_status_t rc;
- rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_GET_CONFIG,
- OSAL_NULL, &union_data, &mcp_resp);
+ OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_CONFIG;
+ mdump_cmd_params.p_data_dst = p_mdump_config;
+ mdump_cmd_params.data_dst_size = sizeof(*p_mdump_config);
+
+ rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
if (rc != ECORE_SUCCESS)
return rc;
- if (mcp_resp == FW_MSG_CODE_UNSUPPORTED)
- return ECORE_NOTIMPL;
-
- if (mcp_resp != FW_MSG_CODE_OK) {
- DP_NOTICE(p_hwfn, false,
- "Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n",
- mcp_resp);
+ if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
+ DP_INFO(p_hwfn,
+ "Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n",
+ mdump_cmd_params.mcp_resp);
rc = ECORE_UNKNOWN_ERROR;
}
- OSAL_MEMCPY(p_mdump_config, &union_data.mdump_config,
- sizeof(*p_mdump_config));
-
return rc;
}
enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- u32 mcp_resp;
+ struct ecore_mdump_cmd_params mdump_cmd_params;
+
+ OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLEAR_LOGS;
+
+ return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
+}
+
+enum _ecore_status_t
+ecore_mcp_mdump_get_retain(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_mdump_retain_data *p_mdump_retain)
+{
+ struct ecore_mdump_cmd_params mdump_cmd_params;
+ struct mdump_retain_data_stc mfw_mdump_retain;
+ enum _ecore_status_t rc;
+
+ OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN;
+ mdump_cmd_params.p_data_dst = &mfw_mdump_retain;
+ mdump_cmd_params.data_dst_size = sizeof(mfw_mdump_retain);
+
+ rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
+ DP_INFO(p_hwfn,
+ "Failed to get the mdump retained data [mcp_resp 0x%x]\n",
+ mdump_cmd_params.mcp_resp);
+ return ECORE_UNKNOWN_ERROR;
+ }
- return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_CLEAR_LOGS,
- OSAL_NULL, OSAL_NULL, &mcp_resp);
+ p_mdump_retain->valid = mfw_mdump_retain.valid;
+ p_mdump_retain->epoch = mfw_mdump_retain.epoch;
+ p_mdump_retain->pf = mfw_mdump_retain.pf;
+ p_mdump_retain->status = mfw_mdump_retain.status;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_mdump_clr_retain(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_mdump_cmd_params mdump_cmd_params;
+
+ OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLR_RETAIN;
+
+ return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
}
static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
+ struct ecore_mdump_retain_data mdump_retain;
+ enum _ecore_status_t rc;
+
/* In CMT mode - no need for more than a single acknowledgment to the
* MFW, and no more than a single notification to the upper driver.
*/
if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
return;
- DP_NOTICE(p_hwfn, false,
- "Received a critical error notification from the MFW!\n");
+ rc = ecore_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain);
+ if (rc == ECORE_SUCCESS && mdump_retain.valid) {
+ DP_NOTICE(p_hwfn, false,
+ "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n",
+ mdump_retain.epoch, mdump_retain.pf,
+ mdump_retain.status);
+ } else {
+ DP_NOTICE(p_hwfn, false,
+ "The MFW notified that a critical error occurred in the device\n");
+ }
- if (p_hwfn->p_dev->mdump_en) {
+ if (p_hwfn->p_dev->allow_mdump) {
DP_NOTICE(p_hwfn, false,
"Not acknowledging the notification to allow the MFW crash dump\n");
- p_hwfn->p_dev->mdump_en = false;
return;
}
+ DP_NOTICE(p_hwfn, false,
+ "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n");
ecore_mcp_mdump_ack(p_hwfn, p_ptt);
ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
}
+void
+ecore_mcp_read_ufp_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ struct public_func shmem_info;
+ u32 port_cfg, val;
+
+ if (!OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits))
+ return;
+
+ OSAL_MEMSET(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info));
+ port_cfg = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
+ OFFSETOF(struct public_port, oem_cfg_port));
+ val = GET_MFW_FIELD(port_cfg, OEM_CFG_CHANNEL_TYPE);
+ if (val != OEM_CFG_CHANNEL_TYPE_STAGGED)
+ DP_NOTICE(p_hwfn, false, "Incorrect UFP Channel type %d\n",
+ val);
+
+ val = GET_MFW_FIELD(port_cfg, OEM_CFG_SCHED_TYPE);
+ if (val == OEM_CFG_SCHED_TYPE_ETS)
+ p_hwfn->ufp_info.mode = ECORE_UFP_MODE_ETS;
+ else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW)
+ p_hwfn->ufp_info.mode = ECORE_UFP_MODE_VNIC_BW;
+ else
+ DP_NOTICE(p_hwfn, false, "Unknown UFP scheduling mode %d\n",
+ val);
+
+ ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+ MCP_PF_ID(p_hwfn));
+ val = GET_MFW_FIELD(shmem_info.oem_cfg_func, OEM_CFG_FUNC_TC);
+ p_hwfn->ufp_info.tc = (u8)val;
+ val = GET_MFW_FIELD(shmem_info.oem_cfg_func,
+ OEM_CFG_FUNC_HOST_PRI_CTRL);
+ if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC)
+ p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_VNIC;
+ else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS)
+ p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_OS;
+ else
+ DP_NOTICE(p_hwfn, false, "Unknown Host priority control %d\n",
+ val);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "UFP shmem config: mode = %d tc = %d pri_type = %d\n",
+ p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc,
+ p_hwfn->ufp_info.pri_type);
+}
+
+static enum _ecore_status_t
+ecore_mcp_handle_ufp_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ ecore_mcp_read_ufp_config(p_hwfn, p_ptt);
+
+ if (p_hwfn->ufp_info.mode == ECORE_UFP_MODE_VNIC_BW) {
+ p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc;
+ p_hwfn->hw_info.offload_tc = p_hwfn->ufp_info.tc;
+
+ ecore_qm_reconf(p_hwfn, p_ptt);
+ } else {
+ /* Merge UFP TC with the dcbx TC data */
+ ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
+ ECORE_DCBX_OPERATIONAL_MIB);
+ }
+
+ /* update storm FW with negotiation results */
+ ecore_sp_pf_update_ufp(p_hwfn);
+
+ return ECORE_SUCCESS;
+}
+
enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
ECORE_DCBX_OPERATIONAL_MIB);
+ /* clear the user-config cache */
+ OSAL_MEMSET(&p_hwfn->p_dcbx_info->set, 0,
+ sizeof(struct ecore_dcbx_set));
+ break;
+ case MFW_DRV_MSG_LLDP_RECEIVED_TLVS_UPDATED:
+ ecore_lldp_mib_update_event(p_hwfn, p_ptt);
+ break;
+ case MFW_DRV_MSG_OEM_CFG_UPDATE:
+ ecore_mcp_handle_ufp_event(p_hwfn, p_ptt);
break;
case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
case MFW_DRV_MSG_BW_UPDATE:
ecore_mcp_update_bw(p_hwfn, p_ptt);
break;
+ case MFW_DRV_MSG_S_TAG_UPDATE:
+ ecore_mcp_update_stag(p_hwfn, p_ptt);
+ break;
case MFW_DRV_MSG_FAILURE_DETECTED:
- ecore_mcp_handle_fan_failure(p_hwfn, p_ptt);
+ ecore_mcp_handle_fan_failure(p_hwfn);
break;
case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
ecore_mcp_handle_critical_error(p_hwfn, p_ptt);
/* Copy the new mfw messages into the shadow */
OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
- return rc;
-}
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *p_mfw_ver,
+ u32 *p_running_bundle_id)
+{
+ u32 global_offsize;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+ DP_NOTICE(p_hwfn, false, "Emulation - can't get MFW version\n");
+ return ECORE_SUCCESS;
+ }
+#endif
+
+ if (IS_VF(p_hwfn->p_dev)) {
+ if (p_hwfn->vf_iov_info) {
+ struct pfvf_acquire_resp_tlv *p_resp;
+
+ p_resp = &p_hwfn->vf_iov_info->acquire_resp;
+ *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
+ return ECORE_SUCCESS;
+ } else {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF requested MFW version prior to ACQUIRE\n");
+ return ECORE_INVAL;
+ }
+ }
+
+ global_offsize = ecore_rd(p_hwfn, p_ptt,
+ SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
+ public_base,
+ PUBLIC_GLOBAL));
+ *p_mfw_ver =
+ ecore_rd(p_hwfn, p_ptt,
+ SECTION_ADDR(global_offsize,
+ 0) + OFFSETOF(struct public_global, mfw_ver));
+
+ if (p_running_bundle_id != OSAL_NULL) {
+ *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt,
+ SECTION_ADDR(global_offsize,
+ 0) +
+ OFFSETOF(struct public_global,
+ running_bundle_id));
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *p_media_type)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ /* TODO - Add support for VFs */
+ if (IS_VF(p_hwfn->p_dev))
+ return ECORE_INVAL;
+
+ if (!ecore_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
+ return ECORE_BUSY;
+ }
+
+ if (!p_ptt) {
+ *p_media_type = MEDIA_UNSPECIFIED;
+ rc = ECORE_INVAL;
+ } else {
+ *p_media_type = ecore_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->port_addr +
+ OFFSETOF(struct public_port,
+ media_type));
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_get_transceiver_data(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *p_tranceiver_type)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ /* TODO - Add support for VFs */
+ if (IS_VF(p_hwfn->p_dev))
+ return ECORE_INVAL;
+
+ if (!ecore_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
+ return ECORE_BUSY;
+ }
+ if (!p_ptt) {
+ *p_tranceiver_type = ETH_TRANSCEIVER_TYPE_NONE;
+ rc = ECORE_INVAL;
+ } else {
+ *p_tranceiver_type = ecore_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port,
+ transceiver_data));
+ }
+
+ return rc;
+}
+
+static int is_transceiver_ready(u32 transceiver_state, u32 transceiver_type)
+{
+ if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) &&
+ ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) == 0x0) &&
+ (transceiver_type != ETH_TRANSCEIVER_TYPE_NONE))
+ return 1;
+
+ return 0;
+}
+
+enum _ecore_status_t ecore_mcp_trans_speed_mask(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *p_speed_mask)
+{
+ u32 transceiver_data, transceiver_type, transceiver_state;
+
+ ecore_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_data);
+
+ transceiver_state = GET_MFW_FIELD(transceiver_data,
+ ETH_TRANSCEIVER_STATE);
+
+ transceiver_type = GET_MFW_FIELD(transceiver_data,
+ ETH_TRANSCEIVER_TYPE);
+
+ if (is_transceiver_ready(transceiver_state, transceiver_type) == 0)
+ return ECORE_INVAL;
+
+ switch (transceiver_type) {
+ case ETH_TRANSCEIVER_TYPE_1G_LX:
+ case ETH_TRANSCEIVER_TYPE_1G_SX:
+ case ETH_TRANSCEIVER_TYPE_1G_PCC:
+ case ETH_TRANSCEIVER_TYPE_1G_ACC:
+ case ETH_TRANSCEIVER_TYPE_1000BASET:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ break;
+
+ case ETH_TRANSCEIVER_TYPE_10G_SR:
+ case ETH_TRANSCEIVER_TYPE_10G_LR:
+ case ETH_TRANSCEIVER_TYPE_10G_LRM:
+ case ETH_TRANSCEIVER_TYPE_10G_ER:
+ case ETH_TRANSCEIVER_TYPE_10G_PCC:
+ case ETH_TRANSCEIVER_TYPE_10G_ACC:
+ case ETH_TRANSCEIVER_TYPE_4x10G:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+ break;
+
+ case ETH_TRANSCEIVER_TYPE_40G_LR4:
+ case ETH_TRANSCEIVER_TYPE_40G_SR4:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+ break;
+
+ case ETH_TRANSCEIVER_TYPE_100G_AOC:
+ case ETH_TRANSCEIVER_TYPE_100G_SR4:
+ case ETH_TRANSCEIVER_TYPE_100G_LR4:
+ case ETH_TRANSCEIVER_TYPE_100G_ER4:
+ case ETH_TRANSCEIVER_TYPE_100G_ACC:
+ *p_speed_mask =
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
+ break;
+
+ case ETH_TRANSCEIVER_TYPE_25G_SR:
+ case ETH_TRANSCEIVER_TYPE_25G_LR:
+ case ETH_TRANSCEIVER_TYPE_25G_AOC:
+ case ETH_TRANSCEIVER_TYPE_25G_ACC_S:
+ case ETH_TRANSCEIVER_TYPE_25G_ACC_M:
+ case ETH_TRANSCEIVER_TYPE_25G_ACC_L:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
+ break;
-enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u32 *p_mfw_ver,
- u32 *p_running_bundle_id)
-{
- u32 global_offsize;
+ case ETH_TRANSCEIVER_TYPE_25G_CA_N:
+ case ETH_TRANSCEIVER_TYPE_25G_CA_S:
+ case ETH_TRANSCEIVER_TYPE_25G_CA_L:
+ case ETH_TRANSCEIVER_TYPE_4x25G_CR:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ break;
-#ifndef ASIC_ONLY
- if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
- DP_NOTICE(p_hwfn, false, "Emulation - can't get MFW version\n");
- return ECORE_SUCCESS;
- }
-#endif
+ case ETH_TRANSCEIVER_TYPE_40G_CR4:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ break;
- if (IS_VF(p_hwfn->p_dev)) {
- if (p_hwfn->vf_iov_info) {
- struct pfvf_acquire_resp_tlv *p_resp;
+ case ETH_TRANSCEIVER_TYPE_100G_CR4:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
+ *p_speed_mask =
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ break;
- p_resp = &p_hwfn->vf_iov_info->acquire_resp;
- *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
- return ECORE_SUCCESS;
- } else {
- DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "VF requested MFW version prior to ACQUIRE\n");
- return ECORE_INVAL;
- }
- }
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC:
+ *p_speed_mask =
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+ break;
- global_offsize = ecore_rd(p_hwfn, p_ptt,
- SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
- public_base,
- PUBLIC_GLOBAL));
- *p_mfw_ver =
- ecore_rd(p_hwfn, p_ptt,
- SECTION_ADDR(global_offsize,
- 0) + OFFSETOF(struct public_global, mfw_ver));
+ case ETH_TRANSCEIVER_TYPE_XLPPI:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
+ break;
- if (p_running_bundle_id != OSAL_NULL) {
- *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt,
- SECTION_ADDR(global_offsize,
- 0) +
- OFFSETOF(struct public_global,
- running_bundle_id));
+ case ETH_TRANSCEIVER_TYPE_10G_BASET:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ break;
+
+ default:
+ DP_INFO(p_hwfn, "Unknown transcevier type 0x%x\n",
+ transceiver_type);
+ *p_speed_mask = 0xff;
+ break;
}
return ECORE_SUCCESS;
}
-enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
- u32 *p_media_type)
+enum _ecore_status_t ecore_mcp_get_board_config(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *p_board_config)
{
- struct ecore_hwfn *p_hwfn = &p_dev->hwfns[0];
- struct ecore_ptt *p_ptt;
+ u32 nvm_cfg_addr, nvm_cfg1_offset, port_cfg_addr;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
/* TODO - Add support for VFs */
- if (IS_VF(p_dev))
+ if (IS_VF(p_hwfn->p_dev))
return ECORE_INVAL;
if (!ecore_mcp_is_init(p_hwfn)) {
- DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
+ DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
return ECORE_BUSY;
}
+ if (!p_ptt) {
+ *p_board_config = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
+ rc = ECORE_INVAL;
+ } else {
+ nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt,
+ MISC_REG_GEN_PURP_CR0);
+ nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt,
+ nvm_cfg_addr + 4);
+ port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
+ *p_board_config = ecore_rd(p_hwfn, p_ptt,
+ port_cfg_addr +
+ offsetof(struct nvm_cfg1_port,
+ board_cfg));
+ }
- *p_media_type = MEDIA_UNSPECIFIED;
-
- p_ptt = ecore_ptt_acquire(p_hwfn);
- if (!p_ptt)
- return ECORE_BUSY;
-
- *p_media_type = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
- OFFSETOF(struct public_port, media_type));
-
- ecore_ptt_release(p_hwfn, p_ptt);
-
- return ECORE_SUCCESS;
+ return rc;
}
/* @DPDK */
return &p_hwfn->mcp_info->func_info;
}
-enum _ecore_status_t ecore_mcp_nvm_command(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct ecore_mcp_nvm_params *params)
-{
- enum _ecore_status_t rc;
-
- switch (params->type) {
- case ECORE_MCP_NVM_RD:
- rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
- params->nvm_common.offset,
- ¶ms->nvm_common.resp,
- ¶ms->nvm_common.param,
- params->nvm_rd.buf_size,
- params->nvm_rd.buf);
- break;
- case ECORE_MCP_CMD:
- rc = ecore_mcp_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
- params->nvm_common.offset,
- ¶ms->nvm_common.resp,
- ¶ms->nvm_common.param);
- break;
- case ECORE_MCP_NVM_WR:
- rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
- params->nvm_common.offset,
- ¶ms->nvm_common.resp,
- ¶ms->nvm_common.param,
- params->nvm_wr.buf_size,
- params->nvm_wr.buf);
- break;
- default:
- rc = ECORE_NOTIMPL;
- break;
- }
- return rc;
-}
-
int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u32 personalities)
{
flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
- MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
- flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
+ MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
+ flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_OFFSET));
*p_flash_size = flash_size;
return ECORE_SUCCESS;
}
-enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u8 vf_id, u8 num)
+static enum _ecore_status_t
+ecore_mcp_config_vf_msix_bb(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 vf_id, u8 num)
{
u32 resp = 0, param = 0, rc_param = 0;
enum _ecore_status_t rc;
return ECORE_SUCCESS;
num *= p_hwfn->p_dev->num_hwfns;
- param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
+ param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_OFFSET) &
DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
- param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
+ param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_OFFSET) &
DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
return rc;
}
+static enum _ecore_status_t
+ecore_mcp_config_vf_msix_ah(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 num)
+{
+ u32 resp = 0, param = num, rc_param = 0;
+ enum _ecore_status_t rc;
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX,
+ param, &resp, &rc_param);
+
+ if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) {
+ DP_NOTICE(p_hwfn, true, "MFW failed to set MSI-X for VFs\n");
+ rc = ECORE_INVAL;
+ } else {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Requested 0x%02x MSI-x interrupts for VFs\n",
+ num);
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 vf_id, u8 num)
+{
+ if (ECORE_IS_BB(p_hwfn->p_dev))
+ return ecore_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
+ else
+ return ecore_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num);
+}
+
enum _ecore_status_t
ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct ecore_mcp_drv_version *p_ver)
{
- struct drv_version_stc *p_drv_version;
struct ecore_mcp_mb_params mb_params;
- union drv_union_data union_data;
+ struct drv_version_stc drv_version;
u32 num_words, i;
void *p_name;
OSAL_BE32 val;
return ECORE_SUCCESS;
#endif
- p_drv_version = &union_data.drv_version;
- p_drv_version->version = p_ver->version;
+ OSAL_MEM_ZERO(&drv_version, sizeof(drv_version));
+ drv_version.version = p_ver->version;
num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4;
for (i = 0; i < num_words; i++) {
/* The driver name is expected to be in a big-endian format */
p_name = &p_ver->name[i * sizeof(u32)];
val = OSAL_CPU_TO_BE32(*(u32 *)p_name);
- *(u32 *)&p_drv_version->name[i * sizeof(u32)] = val;
+ *(u32 *)&drv_version.name[i * sizeof(u32)] = val;
}
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
- mb_params.p_data_src = &union_data;
+ mb_params.p_data_src = &drv_version;
+ mb_params.data_src_size = sizeof(drv_version);
rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc != ECORE_SUCCESS)
DP_ERR(p_hwfn, "MCP response failure, aborting\n");
return rc;
}
+/* A maximal 100 msec waiting time for the MCP to halt */
+#define ECORE_MCP_HALT_SLEEP_MS 10
+#define ECORE_MCP_HALT_MAX_RETRIES 10
+
enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
+ u32 resp = 0, param = 0, cpu_state, cnt = 0;
enum _ecore_status_t rc;
- u32 resp = 0, param = 0;
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
¶m);
- if (rc != ECORE_SUCCESS)
+ if (rc != ECORE_SUCCESS) {
DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+ return rc;
+ }
- return rc;
+ do {
+ OSAL_MSLEEP(ECORE_MCP_HALT_SLEEP_MS);
+ cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
+ if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
+ break;
+ } while (++cnt < ECORE_MCP_HALT_MAX_RETRIES);
+
+ if (cnt == ECORE_MCP_HALT_MAX_RETRIES) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
+ ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
+ return ECORE_BUSY;
+ }
+
+ ecore_mcp_cmd_set_blocking(p_hwfn, true);
+
+ return ECORE_SUCCESS;
}
+#define ECORE_MCP_RESUME_SLEEP_MS 10
+
enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- u32 value, cpu_mode;
+ u32 cpu_mode, cpu_state;
ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
- value = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
- value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
- ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
+ cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
+ ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
+
+ OSAL_MSLEEP(ECORE_MCP_RESUME_SLEEP_MS);
+ cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
+
+ if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
+ cpu_mode, cpu_state);
+ return ECORE_BUSY;
+ }
+
+ ecore_mcp_cmd_set_blocking(p_hwfn, false);
- return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -1 : 0;
+ return ECORE_SUCCESS;
}
enum _ecore_status_t
struct ecore_ptt *p_ptt,
enum ecore_ov_client client)
{
- enum _ecore_status_t rc;
u32 resp = 0, param = 0;
u32 drv_mb_param;
+ enum _ecore_status_t rc;
switch (client) {
case ECORE_OV_CLIENT_DRV:
struct ecore_ptt *p_ptt,
enum ecore_ov_driver_state drv_state)
{
- enum _ecore_status_t rc;
u32 resp = 0, param = 0;
u32 drv_mb_param;
+ enum _ecore_status_t rc;
switch (drv_state) {
case ECORE_OV_DRIVER_STATE_NOT_LOADED:
struct ecore_ptt *p_ptt,
u32 mask_parities)
{
- enum _ecore_status_t rc;
u32 resp = 0, param = 0;
+ enum _ecore_status_t rc;
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
mask_parities, &resp, ¶m);
{
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
u32 bytes_left, offset, bytes_to_copy, buf_size;
- struct ecore_mcp_nvm_params params;
+ u32 nvm_offset, resp, param;
struct ecore_ptt *p_ptt;
enum _ecore_status_t rc = ECORE_SUCCESS;
if (!p_ptt)
return ECORE_BUSY;
- OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
bytes_left = len;
offset = 0;
- params.type = ECORE_MCP_NVM_RD;
- params.nvm_rd.buf_size = &buf_size;
- params.nvm_common.cmd = DRV_MSG_CODE_NVM_READ_NVRAM;
while (bytes_left > 0) {
bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
MCP_DRV_NVM_BUF_LEN);
- params.nvm_common.offset = (addr + offset) |
- (bytes_to_copy << DRV_MB_PARAM_NVM_LEN_SHIFT);
- params.nvm_rd.buf = (u32 *)(p_buf + offset);
- rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
- if (rc != ECORE_SUCCESS || (params.nvm_common.resp !=
- FW_MSG_CODE_NVM_OK)) {
- DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
+ nvm_offset = (addr + offset) | (bytes_to_copy <<
+ DRV_MB_PARAM_NVM_LEN_OFFSET);
+ rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_NVM_READ_NVRAM,
+ nvm_offset, &resp, ¶m, &buf_size,
+ (u32 *)(p_buf + offset));
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_dev, false,
+ "ecore_mcp_nvm_rd_cmd() failed, rc = %d\n",
+ rc);
+ resp = FW_MSG_CODE_ERROR;
+ break;
+ }
+
+ if (resp != FW_MSG_CODE_NVM_OK) {
+ DP_NOTICE(p_dev, false,
+ "nvm read failed, resp = 0x%08x\n", resp);
+ rc = ECORE_UNKNOWN_ERROR;
break;
}
* isn't preemptible. Sleep a bit to prevent CPU hogging.
*/
if (bytes_left % 0x1000 <
- (bytes_left - *params.nvm_rd.buf_size) % 0x1000)
+ (bytes_left - buf_size) % 0x1000)
OSAL_MSLEEP(1);
- offset += *params.nvm_rd.buf_size;
- bytes_left -= *params.nvm_rd.buf_size;
+ offset += buf_size;
+ bytes_left -= buf_size;
}
- p_dev->mcp_nvm_resp = params.nvm_common.resp;
+ p_dev->mcp_nvm_resp = resp;
ecore_ptt_release(p_hwfn, p_ptt);
return rc;
u32 addr, u8 *p_buf, u32 len)
{
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
- struct ecore_mcp_nvm_params params;
struct ecore_ptt *p_ptt;
+ u32 resp, param;
enum _ecore_status_t rc;
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt)
return ECORE_BUSY;
- OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
- params.type = ECORE_MCP_NVM_RD;
- params.nvm_rd.buf_size = &len;
- params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_READ) ?
- DRV_MSG_CODE_PHY_CORE_READ : DRV_MSG_CODE_PHY_RAW_READ;
- params.nvm_common.offset = addr;
- params.nvm_rd.buf = (u32 *)p_buf;
- rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
+ rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
+ (cmd == ECORE_PHY_CORE_READ) ?
+ DRV_MSG_CODE_PHY_CORE_READ :
+ DRV_MSG_CODE_PHY_RAW_READ,
+ addr, &resp, ¶m, &len, (u32 *)p_buf);
if (rc != ECORE_SUCCESS)
DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
- p_dev->mcp_nvm_resp = params.nvm_common.resp;
+ p_dev->mcp_nvm_resp = resp;
ecore_ptt_release(p_hwfn, p_ptt);
return rc;
enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
{
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
- struct ecore_mcp_nvm_params params;
struct ecore_ptt *p_ptt;
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt)
return ECORE_BUSY;
- OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp));
ecore_ptt_release(p_hwfn, p_ptt);
enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr)
{
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
- struct ecore_mcp_nvm_params params;
struct ecore_ptt *p_ptt;
+ u32 resp, param;
enum _ecore_status_t rc;
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt)
return ECORE_BUSY;
- OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
- params.type = ECORE_MCP_CMD;
- params.nvm_common.cmd = DRV_MSG_CODE_NVM_DEL_FILE;
- params.nvm_common.offset = addr;
- rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
- p_dev->mcp_nvm_resp = params.nvm_common.resp;
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_DEL_FILE, addr,
+ &resp, ¶m);
+ p_dev->mcp_nvm_resp = resp;
ecore_ptt_release(p_hwfn, p_ptt);
return rc;
u32 addr)
{
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
- struct ecore_mcp_nvm_params params;
struct ecore_ptt *p_ptt;
+ u32 resp, param;
enum _ecore_status_t rc;
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt)
return ECORE_BUSY;
- OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
- params.type = ECORE_MCP_CMD;
- params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN;
- params.nvm_common.offset = addr;
- rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
- p_dev->mcp_nvm_resp = params.nvm_common.resp;
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, addr,
+ &resp, ¶m);
+ p_dev->mcp_nvm_resp = resp;
ecore_ptt_release(p_hwfn, p_ptt);
return rc;
enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
u32 addr, u8 *p_buf, u32 len)
{
+ u32 buf_idx, buf_size, nvm_cmd, nvm_offset, resp, param;
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
enum _ecore_status_t rc = ECORE_INVAL;
- struct ecore_mcp_nvm_params params;
struct ecore_ptt *p_ptt;
- u32 buf_idx, buf_size;
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt)
return ECORE_BUSY;
- OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
- params.type = ECORE_MCP_NVM_WR;
- if (cmd == ECORE_PUT_FILE_DATA)
- params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
- else
- params.nvm_common.cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
+ switch (cmd) {
+ case ECORE_PUT_FILE_DATA:
+ nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
+ break;
+ case ECORE_NVM_WRITE_NVRAM:
+ nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
+ break;
+ case ECORE_EXT_PHY_FW_UPGRADE:
+ nvm_cmd = DRV_MSG_CODE_EXT_PHY_FW_UPGRADE;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true, "Invalid nvm write command 0x%x\n",
+ cmd);
+ rc = ECORE_INVAL;
+ goto out;
+ }
+
buf_idx = 0;
while (buf_idx < len) {
buf_size = OSAL_MIN_T(u32, (len - buf_idx),
MCP_DRV_NVM_BUF_LEN);
- params.nvm_common.offset = ((buf_size <<
- DRV_MB_PARAM_NVM_LEN_SHIFT)
- | addr) + buf_idx;
- params.nvm_wr.buf_size = buf_size;
- params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
- rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
- if (rc != ECORE_SUCCESS ||
- ((params.nvm_common.resp != FW_MSG_CODE_NVM_OK) &&
- (params.nvm_common.resp !=
- FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK)))
- DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
+ nvm_offset = ((buf_size << DRV_MB_PARAM_NVM_LEN_OFFSET) |
+ addr) +
+ buf_idx;
+ rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset,
+ &resp, ¶m, buf_size,
+ (u32 *)&p_buf[buf_idx]);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_dev, false,
+ "ecore_mcp_nvm_write() failed, rc = %d\n",
+ rc);
+ resp = FW_MSG_CODE_ERROR;
+ break;
+ }
+
+ if (resp != FW_MSG_CODE_OK &&
+ resp != FW_MSG_CODE_NVM_OK &&
+ resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK) {
+ DP_NOTICE(p_dev, false,
+ "nvm write failed, resp = 0x%08x\n", resp);
+ rc = ECORE_UNKNOWN_ERROR;
+ break;
+ }
/* This can be a lengthy process, and it's possible scheduler
* isn't preemptible. Sleep a bit to prevent CPU hogging.
buf_idx += buf_size;
}
- p_dev->mcp_nvm_resp = params.nvm_common.resp;
+ p_dev->mcp_nvm_resp = resp;
+out:
ecore_ptt_release(p_hwfn, p_ptt);
return rc;
u32 addr, u8 *p_buf, u32 len)
{
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
- struct ecore_mcp_nvm_params params;
struct ecore_ptt *p_ptt;
+ u32 resp, param, nvm_cmd;
enum _ecore_status_t rc;
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt)
return ECORE_BUSY;
- OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
- params.type = ECORE_MCP_NVM_WR;
- params.nvm_wr.buf_size = len;
- params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_WRITE) ?
- DRV_MSG_CODE_PHY_CORE_WRITE : DRV_MSG_CODE_PHY_RAW_WRITE;
- params.nvm_common.offset = addr;
- params.nvm_wr.buf = (u32 *)p_buf;
- rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
+ nvm_cmd = (cmd == ECORE_PHY_CORE_WRITE) ? DRV_MSG_CODE_PHY_CORE_WRITE :
+ DRV_MSG_CODE_PHY_RAW_WRITE;
+ rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, addr,
+ &resp, ¶m, len, (u32 *)p_buf);
if (rc != ECORE_SUCCESS)
DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
- p_dev->mcp_nvm_resp = params.nvm_common.resp;
+ p_dev->mcp_nvm_resp = resp;
ecore_ptt_release(p_hwfn, p_ptt);
return rc;
u32 addr)
{
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
- struct ecore_mcp_nvm_params params;
struct ecore_ptt *p_ptt;
+ u32 resp, param;
enum _ecore_status_t rc;
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt)
return ECORE_BUSY;
- OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
- params.type = ECORE_MCP_CMD;
- params.nvm_common.cmd = DRV_MSG_CODE_SET_SECURE_MODE;
- params.nvm_common.offset = addr;
- rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
- p_dev->mcp_nvm_resp = params.nvm_common.resp;
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_SECURE_MODE, addr,
+ &resp, ¶m);
+ p_dev->mcp_nvm_resp = resp;
ecore_ptt_release(p_hwfn, p_ptt);
return rc;
u32 port, u32 addr, u32 offset,
u32 len, u8 *p_buf)
{
- struct ecore_mcp_nvm_params params;
+ u32 bytes_left, bytes_to_copy, buf_size, nvm_offset;
+ u32 resp, param;
enum _ecore_status_t rc;
- u32 bytes_left, bytes_to_copy, buf_size;
- OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
- params.nvm_common.offset =
- (port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) |
- (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT);
+ nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) |
+ (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET);
addr = offset;
offset = 0;
bytes_left = len;
- params.type = ECORE_MCP_NVM_RD;
- params.nvm_rd.buf_size = &buf_size;
- params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_READ;
while (bytes_left > 0) {
bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
MAX_I2C_TRANSACTION_SIZE);
- params.nvm_rd.buf = (u32 *)(p_buf + offset);
- params.nvm_common.offset &=
- (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
- DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
- params.nvm_common.offset |=
- ((addr + offset) <<
- DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT);
- params.nvm_common.offset |=
- (bytes_to_copy << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT);
- rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
- if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
- FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
+ nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
+ DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
+ nvm_offset |= ((addr + offset) <<
+ DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET);
+ nvm_offset |= (bytes_to_copy <<
+ DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET);
+ rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_TRANSCEIVER_READ,
+ nvm_offset, &resp, ¶m, &buf_size,
+ (u32 *)(p_buf + offset));
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to send a transceiver read command to the MFW. rc = %d.\n",
+ rc);
+ return rc;
+ }
+
+ if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
return ECORE_NODEV;
- } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
- FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
+ else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
return ECORE_UNKNOWN_ERROR;
- offset += *params.nvm_rd.buf_size;
- bytes_left -= *params.nvm_rd.buf_size;
+ offset += buf_size;
+ bytes_left -= buf_size;
}
return ECORE_SUCCESS;
u32 port, u32 addr, u32 offset,
u32 len, u8 *p_buf)
{
- struct ecore_mcp_nvm_params params;
+ u32 buf_idx, buf_size, nvm_offset, resp, param;
enum _ecore_status_t rc;
- u32 buf_idx, buf_size;
-
- OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
- params.nvm_common.offset =
- (port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) |
- (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT);
- params.type = ECORE_MCP_NVM_WR;
- params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_WRITE;
+
+ nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) |
+ (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET);
buf_idx = 0;
while (buf_idx < len) {
buf_size = OSAL_MIN_T(u32, (len - buf_idx),
MAX_I2C_TRANSACTION_SIZE);
- params.nvm_common.offset &=
- (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
- DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
- params.nvm_common.offset |=
- ((offset + buf_idx) <<
- DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT);
- params.nvm_common.offset |=
- (buf_size << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT);
- params.nvm_wr.buf_size = buf_size;
- params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
- rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
- if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
- FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
+ nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
+ DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
+ nvm_offset |= ((offset + buf_idx) <<
+ DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET);
+ nvm_offset |= (buf_size <<
+ DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET);
+ rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_TRANSCEIVER_WRITE,
+ nvm_offset, &resp, ¶m, buf_size,
+ (u32 *)&p_buf[buf_idx]);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to send a transceiver write command to the MFW. rc = %d.\n",
+ rc);
+ return rc;
+ }
+
+ if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
return ECORE_NODEV;
- } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
- FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
+ else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
return ECORE_UNKNOWN_ERROR;
buf_idx += buf_size;
enum _ecore_status_t rc = ECORE_SUCCESS;
u32 drv_mb_param = 0, rsp;
- drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT);
+ drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET);
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ,
drv_mb_param, &rsp, gpio_val);
enum _ecore_status_t rc = ECORE_SUCCESS;
u32 drv_mb_param = 0, param, rsp;
- drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT) |
- (gpio_val << DRV_MB_PARAM_GPIO_VALUE_SHIFT);
+ drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET) |
+ (gpio_val << DRV_MB_PARAM_GPIO_VALUE_OFFSET);
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE,
drv_mb_param, &rsp, ¶m);
u32 drv_mb_param = 0, rsp, val = 0;
enum _ecore_status_t rc = ECORE_SUCCESS;
- drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT;
+ drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET;
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO,
drv_mb_param, &rsp, &val);
return rc;
*gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >>
- DRV_MB_PARAM_GPIO_DIRECTION_SHIFT;
+ DRV_MB_PARAM_GPIO_DIRECTION_OFFSET;
*gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >>
- DRV_MB_PARAM_GPIO_CTRL_SHIFT;
+ DRV_MB_PARAM_GPIO_CTRL_OFFSET;
if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
return ECORE_UNKNOWN_ERROR;
enum _ecore_status_t rc = ECORE_SUCCESS;
drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
- DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
+ DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
drv_mb_param, &rsp, ¶m);
enum _ecore_status_t rc = ECORE_SUCCESS;
drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
- DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
+ DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
drv_mb_param, &rsp, ¶m);
enum _ecore_status_t rc = ECORE_SUCCESS;
drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
- DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
+ DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
drv_mb_param, &rsp, num_images);
struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct bist_nvm_image_att *p_image_att, u32 image_index)
{
- struct ecore_mcp_nvm_params params;
+ u32 buf_size, nvm_offset, resp, param;
enum _ecore_status_t rc;
- u32 buf_size;
- OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
- params.nvm_common.offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
- DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
- params.nvm_common.offset |= (image_index <<
- DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT);
-
- params.type = ECORE_MCP_NVM_RD;
- params.nvm_rd.buf_size = &buf_size;
- params.nvm_common.cmd = DRV_MSG_CODE_BIST_TEST;
- params.nvm_rd.buf = (u32 *)p_image_att;
-
- rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
+ nvm_offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
+ DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
+ nvm_offset |= (image_index <<
+ DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_OFFSET);
+ rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
+ nvm_offset, &resp, ¶m, &buf_size,
+ (u32 *)p_image_att);
if (rc != ECORE_SUCCESS)
return rc;
- if (((params.nvm_common.resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
+ if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
(p_image_att->return_code != 1))
rc = ECORE_UNKNOWN_ERROR;
struct ecore_temperature_info *p_temp_info)
{
struct ecore_temperature_sensor *p_temp_sensor;
- struct temperature_status_stc *p_mfw_temp_info;
+ struct temperature_status_stc mfw_temp_info;
struct ecore_mcp_mb_params mb_params;
- union drv_union_data union_data;
u32 val;
enum _ecore_status_t rc;
u8 i;
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_GET_TEMPERATURE;
- mb_params.p_data_dst = &union_data;
+ mb_params.p_data_dst = &mfw_temp_info;
+ mb_params.data_dst_size = sizeof(mfw_temp_info);
rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc != ECORE_SUCCESS)
return rc;
- p_mfw_temp_info = &union_data.temp_info;
-
OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS != MAX_NUM_OF_SENSORS);
- p_temp_info->num_sensors = OSAL_MIN_T(u32,
- p_mfw_temp_info->num_of_sensors,
+ p_temp_info->num_sensors = OSAL_MIN_T(u32, mfw_temp_info.num_of_sensors,
ECORE_MAX_NUM_OF_SENSORS);
for (i = 0; i < p_temp_info->num_sensors; i++) {
- val = p_mfw_temp_info->sensor[i];
+ val = mfw_temp_info.sensor[i];
p_temp_sensor = &p_temp_info->sensors[i];
p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >>
- SENSOR_LOCATION_SHIFT;
+ SENSOR_LOCATION_OFFSET;
p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >>
- THRESHOLD_HIGH_SHIFT;
+ THRESHOLD_HIGH_OFFSET;
p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >>
- CRITICAL_TEMPERATURE_SHIFT;
+ CRITICAL_TEMPERATURE_OFFSET;
p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >>
- CURRENT_TEMP_SHIFT;
+ CURRENT_TEMP_OFFSET;
}
return ECORE_SUCCESS;
struct ecore_ptt *p_ptt,
struct ecore_mba_vers *p_mba_vers)
{
- struct ecore_mcp_nvm_params params;
+ u32 buf_size, resp, param;
enum _ecore_status_t rc;
- u32 buf_size;
- OSAL_MEM_ZERO(¶ms, sizeof(params));
- params.type = ECORE_MCP_NVM_RD;
- params.nvm_common.cmd = DRV_MSG_CODE_GET_MBA_VERSION;
- params.nvm_common.offset = 0;
- params.nvm_rd.buf = &p_mba_vers->mba_vers[0];
- params.nvm_rd.buf_size = &buf_size;
- rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
+ rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MBA_VERSION,
+ 0, &resp, ¶m, &buf_size,
+ &p_mba_vers->mba_vers[0]);
if (rc != ECORE_SUCCESS)
return rc;
- if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
- FW_MSG_CODE_NVM_OK)
+ if ((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
rc = ECORE_UNKNOWN_ERROR;
if (buf_size != MCP_DRV_NVM_BUF_LEN)
#define ECORE_RESC_ALLOC_VERSION_MINOR 0
#define ECORE_RESC_ALLOC_VERSION \
((ECORE_RESC_ALLOC_VERSION_MAJOR << \
- DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
+ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_OFFSET) | \
(ECORE_RESC_ALLOC_VERSION_MINOR << \
- DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
+ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_OFFSET))
struct ecore_resc_alloc_in_params {
u32 cmd;
u32 flags;
};
+#define ECORE_RECOVERY_PROLOG_SLEEP_MS 100
+
+enum _ecore_status_t ecore_recovery_prolog(struct ecore_dev *p_dev)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
+ enum _ecore_status_t rc;
+
+ /* Allow ongoing PCIe transactions to complete */
+ OSAL_MSLEEP(ECORE_RECOVERY_PROLOG_SLEEP_MS);
+
+ /* Clear the PF's internal FID_enable in the PXP */
+ rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_ptt, false);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_hwfn, false,
+ "ecore_pglueb_set_pfid_enable() failed. rc = %d.\n",
+ rc);
+
+ return rc;
+}
+
static enum _ecore_status_t
ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_resc_alloc_in_params *p_in_params,
struct ecore_resc_alloc_out_params *p_out_params)
{
- struct resource_info *p_mfw_resc_info;
struct ecore_mcp_mb_params mb_params;
- union drv_union_data union_data;
+ struct resource_info mfw_resc_info;
enum _ecore_status_t rc;
- p_mfw_resc_info = &union_data.resource;
- OSAL_MEM_ZERO(p_mfw_resc_info, sizeof(*p_mfw_resc_info));
+ OSAL_MEM_ZERO(&mfw_resc_info, sizeof(mfw_resc_info));
- p_mfw_resc_info->res_id = ecore_mcp_get_mfw_res_id(p_in_params->res_id);
- if (p_mfw_resc_info->res_id == RESOURCE_NUM_INVALID) {
+ mfw_resc_info.res_id = ecore_mcp_get_mfw_res_id(p_in_params->res_id);
+ if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
DP_ERR(p_hwfn,
"Failed to match resource %d [%s] with the MFW resources\n",
p_in_params->res_id,
switch (p_in_params->cmd) {
case DRV_MSG_SET_RESOURCE_VALUE_MSG:
- p_mfw_resc_info->size = p_in_params->resc_max_val;
+ mfw_resc_info.size = p_in_params->resc_max_val;
/* Fallthrough */
case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
break;
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
mb_params.cmd = p_in_params->cmd;
mb_params.param = ECORE_RESC_ALLOC_VERSION;
- mb_params.p_data_src = &union_data;
- mb_params.p_data_dst = &union_data;
+ mb_params.p_data_src = &mfw_resc_info;
+ mb_params.data_src_size = sizeof(mfw_resc_info);
+ mb_params.p_data_dst = mb_params.p_data_src;
+ mb_params.data_dst_size = mb_params.data_src_size;
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
p_in_params->cmd, p_in_params->res_id,
ecore_hw_get_resc_name(p_in_params->res_id),
- ECORE_MFW_GET_FIELD(mb_params.param,
- DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
- ECORE_MFW_GET_FIELD(mb_params.param,
- DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
+ GET_MFW_FIELD(mb_params.param,
+ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
+ GET_MFW_FIELD(mb_params.param,
+ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
p_in_params->resc_max_val);
rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
p_out_params->mcp_resp = mb_params.mcp_resp;
p_out_params->mcp_param = mb_params.mcp_param;
- p_out_params->resc_num = p_mfw_resc_info->size;
- p_out_params->resc_start = p_mfw_resc_info->offset;
- p_out_params->vf_resc_num = p_mfw_resc_info->vf_size;
- p_out_params->vf_resc_start = p_mfw_resc_info->vf_offset;
- p_out_params->flags = p_mfw_resc_info->flags;
+ p_out_params->resc_num = mfw_resc_info.size;
+ p_out_params->resc_start = mfw_resc_info.offset;
+ p_out_params->vf_resc_num = mfw_resc_info.vf_size;
+ p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
+ p_out_params->flags = mfw_resc_info.flags;
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
- ECORE_MFW_GET_FIELD(p_out_params->mcp_param,
- FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
- ECORE_MFW_GET_FIELD(p_out_params->mcp_param,
- FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
+ GET_MFW_FIELD(p_out_params->mcp_param,
+ FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
+ GET_MFW_FIELD(p_out_params->mcp_param,
+ FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
p_out_params->resc_num, p_out_params->resc_start,
p_out_params->vf_resc_num, p_out_params->vf_resc_start,
p_out_params->flags);
}
if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
- u8 opcode = ECORE_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
+ u8 opcode = GET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
DP_NOTICE(p_hwfn, false,
"The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
break;
}
- ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
- ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
- ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
+ SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
+ SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
+ SET_MFW_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
return rc;
/* Analyze the response */
- p_params->owner = ECORE_MFW_GET_FIELD(mcp_param,
- RESOURCE_CMD_RSP_OWNER);
- opcode = ECORE_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
+ p_params->owner = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER);
+ opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
return ECORE_SUCCESS;
}
+void ecore_mcp_resc_lock_default_init(struct ecore_resc_lock_params *p_lock,
+ struct ecore_resc_unlock_params *p_unlock,
+ enum ecore_resc_lock resource,
+ bool b_is_permanent)
+{
+ if (p_lock != OSAL_NULL) {
+ OSAL_MEM_ZERO(p_lock, sizeof(*p_lock));
+
+ /* Permanent resources don't require aging, and there's no
+ * point in trying to acquire them more than once since it's
+ * unexpected another entity would release them.
+ */
+ if (b_is_permanent) {
+ p_lock->timeout = ECORE_MCP_RESC_LOCK_TO_NONE;
+ } else {
+ p_lock->retry_num = ECORE_MCP_RESC_LOCK_RETRY_CNT_DFLT;
+ p_lock->retry_interval =
+ ECORE_MCP_RESC_LOCK_RETRY_VAL_DFLT;
+ p_lock->sleep_b4_retry = true;
+ }
+
+ p_lock->resource = resource;
+ }
+
+ if (p_unlock != OSAL_NULL) {
+ OSAL_MEM_ZERO(p_unlock, sizeof(*p_unlock));
+ p_unlock->resource = resource;
+ }
+}
+
enum _ecore_status_t
ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct ecore_resc_unlock_params *p_params)
opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
: RESOURCE_OPCODE_RELEASE;
- ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
- ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
+ SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
+ SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
return rc;
/* Analyze the response */
- opcode = ECORE_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
+ opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
return ECORE_SUCCESS;
}
+
+bool ecore_mcp_is_smart_an_supported(struct ecore_hwfn *p_hwfn)
+{
+ return !!(p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ);
+}
+
+enum _ecore_status_t ecore_mcp_get_capabilities(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 mcp_resp;
+ enum _ecore_status_t rc;
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT,
+ 0, &mcp_resp, &p_hwfn->mcp_info->capabilities);
+ if (rc == ECORE_SUCCESS)
+ DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_PROBE),
+ "MFW supported features: %08x\n",
+ p_hwfn->mcp_info->capabilities);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 mcp_resp, mcp_param, features;
+
+ features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ |
+ DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE |
+ DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK;
+
+ return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
+ features, &mcp_resp, &mcp_param);
+}
+
+enum _ecore_status_t
+ecore_mcp_drv_attribute(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_mcp_drv_attr *p_drv_attr)
+{
+ struct attribute_cmd_write_stc attr_cmd_write;
+ enum _attribute_commands_e mfw_attr_cmd;
+ struct ecore_mcp_mb_params mb_params;
+ enum _ecore_status_t rc;
+
+ switch (p_drv_attr->attr_cmd) {
+ case ECORE_MCP_DRV_ATTR_CMD_READ:
+ mfw_attr_cmd = ATTRIBUTE_CMD_READ;
+ break;
+ case ECORE_MCP_DRV_ATTR_CMD_WRITE:
+ mfw_attr_cmd = ATTRIBUTE_CMD_WRITE;
+ break;
+ case ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR:
+ mfw_attr_cmd = ATTRIBUTE_CMD_READ_CLEAR;
+ break;
+ case ECORE_MCP_DRV_ATTR_CMD_CLEAR:
+ mfw_attr_cmd = ATTRIBUTE_CMD_CLEAR;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, false, "Unknown attribute command %d\n",
+ p_drv_attr->attr_cmd);
+ return ECORE_INVAL;
+ }
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_ATTRIBUTE;
+ SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_KEY,
+ p_drv_attr->attr_num);
+ SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_CMD,
+ mfw_attr_cmd);
+ if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_WRITE) {
+ OSAL_MEM_ZERO(&attr_cmd_write, sizeof(attr_cmd_write));
+ attr_cmd_write.val = p_drv_attr->val;
+ attr_cmd_write.mask = p_drv_attr->mask;
+ attr_cmd_write.offset = p_drv_attr->offset;
+
+ mb_params.p_data_src = &attr_cmd_write;
+ mb_params.data_src_size = sizeof(attr_cmd_write);
+ }
+
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The attribute command is not supported by the MFW\n");
+ return ECORE_NOTIMPL;
+ } else if (mb_params.mcp_resp != FW_MSG_CODE_OK) {
+ DP_INFO(p_hwfn,
+ "Failed to send an attribute command [mcp_resp 0x%x, attr_cmd %d, attr_num %d]\n",
+ mb_params.mcp_resp, p_drv_attr->attr_cmd,
+ p_drv_attr->attr_num);
+ return ECORE_INVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Attribute Command: cmd %d [mfw_cmd %d], num %d, in={val 0x%08x, mask 0x%08x, offset 0x%08x}, out={val 0x%08x}\n",
+ p_drv_attr->attr_cmd, mfw_attr_cmd, p_drv_attr->attr_num,
+ p_drv_attr->val, p_drv_attr->mask, p_drv_attr->offset,
+ mb_params.mcp_param);
+
+ if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ ||
+ p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR)
+ p_drv_attr->val = mb_params.mcp_param;
+
+ return ECORE_SUCCESS;
+}
+
+void ecore_mcp_wol_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u32 offset, u32 val)
+{
+ struct ecore_mcp_mb_params mb_params = {0};
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u32 dword = val;
+
+ mb_params.cmd = DRV_MSG_CODE_WRITE_WOL_REG;
+ mb_params.param = offset;
+ mb_params.p_data_src = &dword;
+ mb_params.data_src_size = sizeof(dword);
+
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to wol write request, rc = %d\n", rc);
+ }
+
+ if (mb_params.mcp_resp != FW_MSG_CODE_WOL_READ_WRITE_OK) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to write value 0x%x to offset 0x%x [mcp_resp 0x%x]\n",
+ val, offset, mb_params.mcp_resp);
+ rc = ECORE_UNKNOWN_ERROR;
+ }
+}