2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
11 #include "ecore_status.h"
12 #include "ecore_mcp.h"
13 #include "mcp_public.h"
16 #include "ecore_init_fw_funcs.h"
17 #include "ecore_sriov.h"
19 #include "ecore_iov_api.h"
20 #include "ecore_gtt_reg_addr.h"
21 #include "ecore_iro.h"
22 #include "ecore_dcbx.h"
24 #define CHIP_MCP_RESP_ITER_US 10
25 #define EMUL_MCP_RESP_ITER_US (1000 * 1000)
27 #define ECORE_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
28 #define ECORE_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
30 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
31 ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
34 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
35 ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
37 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
38 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
39 OFFSETOF(struct public_drv_mb, _field), _val)
41 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
42 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
43 OFFSETOF(struct public_drv_mb, _field))
45 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
46 DRV_ID_PDA_COMP_VER_SHIFT)
48 #define MCP_BYTES_PER_MBIT_SHIFT 17
52 static int loaded_port[MAX_NUM_PORTS] = { 0 };
55 bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn)
57 if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
62 void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
64 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
66 u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr);
68 p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
70 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
71 "port_addr = 0x%x, port_id 0x%02x\n",
72 p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
75 void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
77 u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
82 if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev))
86 if (!p_hwfn->mcp_info->public_base)
89 for (i = 0; i < length; i++) {
90 tmp = ecore_rd(p_hwfn, p_ptt,
91 p_hwfn->mcp_info->mfw_mb_addr +
92 (i << 2) + sizeof(u32));
94 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
95 OSAL_BE32_TO_CPU(tmp);
99 enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
101 if (p_hwfn->mcp_info) {
102 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
103 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
104 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->lock);
106 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
108 return ECORE_SUCCESS;
111 static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
112 struct ecore_ptt *p_ptt)
114 struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
115 u32 drv_mb_offsize, mfw_mb_offsize;
116 u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
119 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
120 DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n");
121 p_info->public_base = 0;
126 p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
127 if (!p_info->public_base)
130 p_info->public_base |= GRCBASE_MCP;
132 /* Calculate the driver and MFW mailbox address */
133 drv_mb_offsize = ecore_rd(p_hwfn, p_ptt,
134 SECTION_OFFSIZE_ADDR(p_info->public_base,
136 p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
137 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
138 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x"
139 " mcp_pf_id = 0x%x\n",
140 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
142 /* Set the MFW MB address */
143 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
144 SECTION_OFFSIZE_ADDR(p_info->public_base,
146 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
147 p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
148 p_info->mfw_mb_addr);
150 /* Get the current driver mailbox sequence before sending
153 p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
154 DRV_MSG_SEQ_NUMBER_MASK;
156 /* Get current FW pulse sequence */
157 p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
160 p_info->mcp_hist = (u16)ecore_rd(p_hwfn, p_ptt,
161 MISCS_REG_GENERIC_POR_0);
163 return ECORE_SUCCESS;
166 enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
167 struct ecore_ptt *p_ptt)
169 struct ecore_mcp_info *p_info;
172 /* Allocate mcp_info structure */
173 p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
174 sizeof(*p_hwfn->mcp_info));
175 if (!p_hwfn->mcp_info)
177 p_info = p_hwfn->mcp_info;
179 if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
180 DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
181 /* Do not free mcp_info here, since public_base indicate that
182 * the MCP is not initialized
184 return ECORE_SUCCESS;
187 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
188 p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
189 p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
190 if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
193 /* Initialize the MFW spinlock */
194 OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->lock);
195 OSAL_SPIN_LOCK_INIT(&p_info->lock);
197 return ECORE_SUCCESS;
200 DP_NOTICE(p_hwfn, true, "Failed to allocate mcp memory\n");
201 ecore_mcp_free(p_hwfn);
205 /* Locks the MFW mailbox of a PF to ensure a single access.
206 * The lock is achieved in most cases by holding a spinlock, causing other
207 * threads to wait till a previous access is done.
208 * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single
209 * access is achieved by setting a blocking flag, which will fail other
210 * competing contexts to send their mailboxes.
212 static enum _ecore_status_t ecore_mcp_mb_lock(struct ecore_hwfn *p_hwfn,
215 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
217 /* The spinlock shouldn't be acquired when the mailbox command is
218 * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel
219 * pending [UN]LOAD_REQ command of another PF together with a spinlock
220 * (i.e. interrupts are disabled) - can lead to a deadlock.
221 * It is assumed that for a single PF, no other mailbox commands can be
222 * sent from another context while sending LOAD_REQ, and that any
223 * parallel commands to UNLOAD_REQ can be cancelled.
225 if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE)
226 p_hwfn->mcp_info->block_mb_sending = false;
228 if (p_hwfn->mcp_info->block_mb_sending) {
229 DP_NOTICE(p_hwfn, false,
230 "Trying to send a MFW mailbox command [0x%x]"
231 " in parallel to [UN]LOAD_REQ. Aborting.\n",
233 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
237 if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) {
238 p_hwfn->mcp_info->block_mb_sending = true;
239 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
242 return ECORE_SUCCESS;
245 static void ecore_mcp_mb_unlock(struct ecore_hwfn *p_hwfn, u32 cmd)
247 if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
248 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
251 enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
252 struct ecore_ptt *p_ptt)
254 u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
255 u32 delay = CHIP_MCP_RESP_ITER_US;
256 u32 org_mcp_reset_seq, cnt = 0;
257 enum _ecore_status_t rc = ECORE_SUCCESS;
260 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
261 delay = EMUL_MCP_RESP_ITER_US;
264 /* Ensure that only a single thread is accessing the mailbox at a
267 rc = ecore_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
268 if (rc != ECORE_SUCCESS)
271 /* Set drv command along with the updated sequence */
272 org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
273 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
276 /* Wait for MFW response */
278 /* Give the FW up to 500 second (50*1000*10usec) */
279 } while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt,
280 MISCS_REG_GENERIC_POR_0)) &&
281 (cnt++ < ECORE_MCP_RESET_RETRIES));
283 if (org_mcp_reset_seq !=
284 ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
285 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
286 "MCP was reset after %d usec\n", cnt * delay);
288 DP_ERR(p_hwfn, "Failed to reset MCP\n");
292 ecore_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
297 static enum _ecore_status_t ecore_do_mcp_cmd(struct ecore_hwfn *p_hwfn,
298 struct ecore_ptt *p_ptt,
303 u32 delay = CHIP_MCP_RESP_ITER_US;
304 u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
305 u32 seq, cnt = 1, actual_mb_seq;
306 enum _ecore_status_t rc = ECORE_SUCCESS;
309 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
310 delay = EMUL_MCP_RESP_ITER_US;
311 /* There is a built-in delay of 100usec in each MFW response read */
312 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
316 /* Get actual driver mailbox sequence */
317 actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
318 DRV_MSG_SEQ_NUMBER_MASK;
320 /* Use MCP history register to check if MCP reset occurred between
323 if (p_hwfn->mcp_info->mcp_hist !=
324 ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
325 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Rereading MCP offsets\n");
326 ecore_load_mcp_offsets(p_hwfn, p_ptt);
327 ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
329 seq = ++p_hwfn->mcp_info->drv_mb_seq;
332 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
334 /* Set drv command along with the updated sequence */
335 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
338 /* Wait for MFW response */
340 *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
342 /* Give the FW up to 5 second (500*10ms) */
343 } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
344 (cnt++ < max_retries));
346 /* Is this a reply to our command? */
347 if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
348 *o_mcp_resp &= FW_MSG_CODE_MASK;
349 /* Get the MCP param */
350 *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
353 DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
357 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
362 static enum _ecore_status_t
363 ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
364 struct ecore_ptt *p_ptt,
365 struct ecore_mcp_mb_params *p_mb_params)
368 enum _ecore_status_t rc;
370 /* MCP not initialized */
371 if (!ecore_mcp_is_init(p_hwfn)) {
372 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
376 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
377 OFFSETOF(struct public_drv_mb, union_data);
379 /* Ensure that only a single thread is accessing the mailbox at a
382 rc = ecore_mcp_mb_lock(p_hwfn, p_mb_params->cmd);
383 if (rc != ECORE_SUCCESS)
386 if (p_mb_params->p_data_src != OSAL_NULL)
387 ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr,
388 p_mb_params->p_data_src,
389 sizeof(*p_mb_params->p_data_src));
391 rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
392 p_mb_params->param, &p_mb_params->mcp_resp,
393 &p_mb_params->mcp_param);
395 if (p_mb_params->p_data_dst != OSAL_NULL)
396 ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
398 sizeof(*p_mb_params->p_data_dst));
400 ecore_mcp_mb_unlock(p_hwfn, p_mb_params->cmd);
405 enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
406 struct ecore_ptt *p_ptt, u32 cmd, u32 param,
407 u32 *o_mcp_resp, u32 *o_mcp_param)
409 struct ecore_mcp_mb_params mb_params;
410 enum _ecore_status_t rc;
413 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
414 if (cmd == DRV_MSG_CODE_UNLOAD_REQ) {
416 loaded_port[p_hwfn->port_id]--;
417 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n",
420 return ECORE_SUCCESS;
424 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
426 mb_params.param = param;
427 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
428 if (rc != ECORE_SUCCESS)
431 *o_mcp_resp = mb_params.mcp_resp;
432 *o_mcp_param = mb_params.mcp_param;
434 return ECORE_SUCCESS;
437 enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
438 struct ecore_ptt *p_ptt,
443 u32 i_txn_size, u32 *i_buf)
445 struct ecore_mcp_mb_params mb_params;
446 union drv_union_data union_data;
447 enum _ecore_status_t rc;
449 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
451 mb_params.param = param;
452 OSAL_MEMCPY((u32 *)&union_data.raw_data, i_buf, i_txn_size);
453 mb_params.p_data_src = &union_data;
454 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
455 if (rc != ECORE_SUCCESS)
458 *o_mcp_resp = mb_params.mcp_resp;
459 *o_mcp_param = mb_params.mcp_param;
461 return ECORE_SUCCESS;
464 enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
465 struct ecore_ptt *p_ptt,
470 u32 *o_txn_size, u32 *o_buf)
472 struct ecore_mcp_mb_params mb_params;
473 union drv_union_data union_data;
474 enum _ecore_status_t rc;
476 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
478 mb_params.param = param;
479 mb_params.p_data_dst = &union_data;
480 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
481 if (rc != ECORE_SUCCESS)
484 *o_mcp_resp = mb_params.mcp_resp;
485 *o_mcp_param = mb_params.mcp_param;
487 *o_txn_size = *o_mcp_param;
488 OSAL_MEMCPY(o_buf, (u32 *)&union_data.raw_data, *o_txn_size);
490 return ECORE_SUCCESS;
494 static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
497 static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
500 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
501 else if (!loaded_port[p_hwfn->port_id])
502 load_phase = FW_MSG_CODE_DRV_LOAD_PORT;
504 load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
506 /* On CMT, always tell that it's engine */
507 if (p_hwfn->p_dev->num_hwfns > 1)
508 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
510 *p_load_code = load_phase;
512 loaded_port[p_hwfn->port_id]++;
514 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
515 "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n",
516 *p_load_code, loaded, p_hwfn->port_id,
517 loaded_port[p_hwfn->port_id]);
521 static bool ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role)
523 return (drv_role == DRV_ROLE_OS &&
524 exist_drv_role == DRV_ROLE_PREBOOT) ||
525 (drv_role == DRV_ROLE_KDUMP && exist_drv_role == DRV_ROLE_OS);
528 static enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn,
529 struct ecore_ptt *p_ptt)
531 u32 resp = 0, param = 0;
532 enum _ecore_status_t rc;
534 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
536 if (rc != ECORE_SUCCESS)
537 DP_NOTICE(p_hwfn, false,
538 "Failed to send cancel load request, rc = %d\n", rc);
543 #define CONFIG_ECORE_L2_BITMAP_IDX (0x1 << 0)
544 #define CONFIG_ECORE_SRIOV_BITMAP_IDX (0x1 << 1)
545 #define CONFIG_ECORE_ROCE_BITMAP_IDX (0x1 << 2)
546 #define CONFIG_ECORE_IWARP_BITMAP_IDX (0x1 << 3)
547 #define CONFIG_ECORE_FCOE_BITMAP_IDX (0x1 << 4)
548 #define CONFIG_ECORE_ISCSI_BITMAP_IDX (0x1 << 5)
549 #define CONFIG_ECORE_LL2_BITMAP_IDX (0x1 << 6)
551 static u32 ecore_get_config_bitmap(void)
553 u32 config_bitmap = 0x0;
555 #ifdef CONFIG_ECORE_L2
556 config_bitmap |= CONFIG_ECORE_L2_BITMAP_IDX;
558 #ifdef CONFIG_ECORE_SRIOV
559 config_bitmap |= CONFIG_ECORE_SRIOV_BITMAP_IDX;
561 #ifdef CONFIG_ECORE_ROCE
562 config_bitmap |= CONFIG_ECORE_ROCE_BITMAP_IDX;
564 #ifdef CONFIG_ECORE_IWARP
565 config_bitmap |= CONFIG_ECORE_IWARP_BITMAP_IDX;
567 #ifdef CONFIG_ECORE_FCOE
568 config_bitmap |= CONFIG_ECORE_FCOE_BITMAP_IDX;
570 #ifdef CONFIG_ECORE_ISCSI
571 config_bitmap |= CONFIG_ECORE_ISCSI_BITMAP_IDX;
573 #ifdef CONFIG_ECORE_LL2
574 config_bitmap |= CONFIG_ECORE_LL2_BITMAP_IDX;
577 return config_bitmap;
580 struct ecore_load_req_in_params {
582 #define ECORE_LOAD_REQ_HSI_VER_DEFAULT 0
583 #define ECORE_LOAD_REQ_HSI_VER_1 1
590 bool avoid_eng_reset;
593 struct ecore_load_req_out_params {
603 static enum _ecore_status_t
604 __ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
605 struct ecore_load_req_in_params *p_in_params,
606 struct ecore_load_req_out_params *p_out_params)
608 union drv_union_data union_data_src, union_data_dst;
609 struct ecore_mcp_mb_params mb_params;
610 struct load_req_stc *p_load_req;
611 struct load_rsp_stc *p_load_rsp;
613 enum _ecore_status_t rc;
615 p_load_req = &union_data_src.load_req;
616 OSAL_MEM_ZERO(p_load_req, sizeof(*p_load_req));
617 p_load_req->drv_ver_0 = p_in_params->drv_ver_0;
618 p_load_req->drv_ver_1 = p_in_params->drv_ver_1;
619 p_load_req->fw_ver = p_in_params->fw_ver;
620 ECORE_MFW_SET_FIELD(p_load_req->misc0, LOAD_REQ_ROLE,
621 p_in_params->drv_role);
622 ECORE_MFW_SET_FIELD(p_load_req->misc0, LOAD_REQ_LOCK_TO,
623 p_in_params->timeout_val);
624 ECORE_MFW_SET_FIELD(p_load_req->misc0, LOAD_REQ_FORCE,
625 p_in_params->force_cmd);
626 ECORE_MFW_SET_FIELD(p_load_req->misc0, LOAD_REQ_FLAGS0,
627 p_in_params->avoid_eng_reset);
629 hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ?
630 DRV_ID_MCP_HSI_VER_CURRENT :
631 (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT);
633 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
634 mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
635 mb_params.param = PDA_COMP | hsi_ver | p_hwfn->p_dev->drv_type;
636 mb_params.p_data_src = &union_data_src;
637 mb_params.p_data_dst = &union_data_dst;
639 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
640 "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
642 ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
643 ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
644 ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
645 ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
647 if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1)
648 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
649 "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
650 p_load_req->drv_ver_0, p_load_req->drv_ver_1,
651 p_load_req->fw_ver, p_load_req->misc0,
652 ECORE_MFW_GET_FIELD(p_load_req->misc0,
654 ECORE_MFW_GET_FIELD(p_load_req->misc0,
656 ECORE_MFW_GET_FIELD(p_load_req->misc0,
658 ECORE_MFW_GET_FIELD(p_load_req->misc0,
661 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
662 if (rc != ECORE_SUCCESS) {
663 DP_NOTICE(p_hwfn, false,
664 "Failed to send load request, rc = %d\n", rc);
668 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
669 "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
670 p_out_params->load_code = mb_params.mcp_resp;
672 if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
673 p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
674 p_load_rsp = &union_data_dst.load_rsp;
675 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
676 "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
677 p_load_rsp->drv_ver_0, p_load_rsp->drv_ver_1,
678 p_load_rsp->fw_ver, p_load_rsp->misc0,
679 ECORE_MFW_GET_FIELD(p_load_rsp->misc0,
681 ECORE_MFW_GET_FIELD(p_load_rsp->misc0,
683 ECORE_MFW_GET_FIELD(p_load_rsp->misc0,
686 p_out_params->exist_drv_ver_0 = p_load_rsp->drv_ver_0;
687 p_out_params->exist_drv_ver_1 = p_load_rsp->drv_ver_1;
688 p_out_params->exist_fw_ver = p_load_rsp->fw_ver;
689 p_out_params->exist_drv_role =
690 ECORE_MFW_GET_FIELD(p_load_rsp->misc0, LOAD_RSP_ROLE);
691 p_out_params->mfw_hsi_ver =
692 ECORE_MFW_GET_FIELD(p_load_rsp->misc0, LOAD_RSP_HSI);
693 p_out_params->drv_exists =
694 ECORE_MFW_GET_FIELD(p_load_rsp->misc0,
696 LOAD_RSP_FLAGS0_DRV_EXISTS;
699 return ECORE_SUCCESS;
702 static enum _ecore_status_t eocre_get_mfw_drv_role(struct ecore_hwfn *p_hwfn,
703 enum ecore_drv_role drv_role,
707 case ECORE_DRV_ROLE_OS:
708 *p_mfw_drv_role = DRV_ROLE_OS;
710 case ECORE_DRV_ROLE_KDUMP:
711 *p_mfw_drv_role = DRV_ROLE_KDUMP;
714 DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role);
718 return ECORE_SUCCESS;
721 enum ecore_load_req_force {
722 ECORE_LOAD_REQ_FORCE_NONE,
723 ECORE_LOAD_REQ_FORCE_PF,
724 ECORE_LOAD_REQ_FORCE_ALL,
727 static enum _ecore_status_t
728 ecore_get_mfw_force_cmd(struct ecore_hwfn *p_hwfn,
729 enum ecore_load_req_force force_cmd,
733 case ECORE_LOAD_REQ_FORCE_NONE:
734 *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
736 case ECORE_LOAD_REQ_FORCE_PF:
737 *p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
739 case ECORE_LOAD_REQ_FORCE_ALL:
740 *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
743 DP_ERR(p_hwfn, "Unexpected force value %d\n", force_cmd);
747 return ECORE_SUCCESS;
750 enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
751 struct ecore_ptt *p_ptt,
752 struct ecore_load_req_params *p_params)
754 struct ecore_load_req_out_params out_params;
755 struct ecore_load_req_in_params in_params;
756 u8 mfw_drv_role, mfw_force_cmd;
757 enum _ecore_status_t rc;
760 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
761 ecore_mcp_mf_workaround(p_hwfn, &p_params->load_code);
762 return ECORE_SUCCESS;
766 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
767 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT;
768 in_params.drv_ver_0 = ECORE_VERSION;
769 in_params.drv_ver_1 = ecore_get_config_bitmap();
770 in_params.fw_ver = STORM_FW_VERSION;
771 rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
772 if (rc != ECORE_SUCCESS)
775 in_params.drv_role = mfw_drv_role;
776 in_params.timeout_val = p_params->timeout_val;
777 rc = ecore_get_mfw_force_cmd(p_hwfn, ECORE_LOAD_REQ_FORCE_NONE,
779 if (rc != ECORE_SUCCESS)
782 in_params.force_cmd = mfw_force_cmd;
783 in_params.avoid_eng_reset = p_params->avoid_eng_reset;
785 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
786 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
787 if (rc != ECORE_SUCCESS)
790 /* First handle cases where another load request should/might be sent:
791 * - MFW expects the old interface [HSI version = 1]
792 * - MFW responds that a force load request is required
794 if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
796 "MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n");
798 /* The previous load request set the mailbox blocking */
799 p_hwfn->mcp_info->block_mb_sending = false;
801 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1;
802 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
803 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
805 if (rc != ECORE_SUCCESS)
807 } else if (out_params.load_code ==
808 FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
809 /* The previous load request set the mailbox blocking */
810 p_hwfn->mcp_info->block_mb_sending = false;
812 if (ecore_mcp_can_force_load(in_params.drv_role,
813 out_params.exist_drv_role)) {
815 "A force load is required [existing: role %d, fw_ver 0x%08x, drv_ver 0x%08x_0x%08x]. Sending a force load request.\n",
816 out_params.exist_drv_role,
817 out_params.exist_fw_ver,
818 out_params.exist_drv_ver_0,
819 out_params.exist_drv_ver_1);
821 rc = ecore_get_mfw_force_cmd(p_hwfn,
822 ECORE_LOAD_REQ_FORCE_ALL,
824 if (rc != ECORE_SUCCESS)
827 in_params.force_cmd = mfw_force_cmd;
828 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
829 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
831 if (rc != ECORE_SUCCESS)
834 DP_NOTICE(p_hwfn, false,
835 "A force load is required [existing: role %d, fw_ver 0x%08x, drv_ver 0x%08x_0x%08x]. Avoiding to prevent disruption of active PFs.\n",
836 out_params.exist_drv_role,
837 out_params.exist_fw_ver,
838 out_params.exist_drv_ver_0,
839 out_params.exist_drv_ver_1);
841 ecore_mcp_cancel_load_req(p_hwfn, p_ptt);
846 /* Now handle the other types of responses.
847 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
848 * expected here after the additional revised load requests were sent.
850 switch (out_params.load_code) {
851 case FW_MSG_CODE_DRV_LOAD_ENGINE:
852 case FW_MSG_CODE_DRV_LOAD_PORT:
853 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
854 if (out_params.mfw_hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
855 out_params.drv_exists) {
856 /* The role and fw/driver version match, but the PF is
857 * already loaded and has not been unloaded gracefully.
858 * This is unexpected since a quasi-FLR request was
859 * previously sent as part of ecore_hw_prepare().
861 DP_NOTICE(p_hwfn, false,
862 "PF is already loaded - shouldn't have got here since a quasi-FLR request was previously sent!\n");
866 case FW_MSG_CODE_DRV_LOAD_REFUSED_PDA:
867 case FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG:
868 case FW_MSG_CODE_DRV_LOAD_REFUSED_HSI:
869 case FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT:
870 DP_NOTICE(p_hwfn, false,
871 "MFW refused a load request [resp 0x%08x]. Aborting.\n",
872 out_params.load_code);
875 DP_NOTICE(p_hwfn, false,
876 "Unexpected response to load request [resp 0x%08x]. Aborting.\n",
877 out_params.load_code);
881 p_params->load_code = out_params.load_code;
883 return ECORE_SUCCESS;
886 static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
887 struct ecore_ptt *p_ptt)
889 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
891 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
892 u32 path_addr = SECTION_ADDR(mfw_path_offsize,
893 ECORE_PATH_ID(p_hwfn));
894 u32 disabled_vfs[VF_MAX_STATIC / 32];
897 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
898 "Reading Disabled VF information from [offset %08x],"
900 mfw_path_offsize, path_addr);
902 for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
903 disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,
905 OFFSETOF(struct public_path,
908 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
909 "FLR-ed VFs [%08x,...,%08x] - %08x\n",
910 i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
913 if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs))
914 OSAL_VF_FLR_UPDATE(p_hwfn);
917 enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
918 struct ecore_ptt *p_ptt,
921 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
923 u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);
924 u32 func_addr = SECTION_ADDR(mfw_func_offsize,
926 struct ecore_mcp_mb_params mb_params;
927 union drv_union_data union_data;
928 enum _ecore_status_t rc;
931 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
932 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
933 "Acking VFs [%08x,...,%08x] - %08x\n",
934 i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
936 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
937 mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
938 OSAL_MEMCPY(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8);
939 mb_params.p_data_src = &union_data;
940 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt,
942 if (rc != ECORE_SUCCESS) {
943 DP_NOTICE(p_hwfn, false,
944 "Failed to pass ACK for VF flr to MFW\n");
945 return ECORE_TIMEOUT;
948 /* TMP - clear the ACK bits; should be done by MFW */
949 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
950 ecore_wr(p_hwfn, p_ptt,
952 OFFSETOF(struct public_func, drv_ack_vf_disabled) +
958 static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
959 struct ecore_ptt *p_ptt)
961 u32 transceiver_state;
963 transceiver_state = ecore_rd(p_hwfn, p_ptt,
964 p_hwfn->mcp_info->port_addr +
965 OFFSETOF(struct public_port,
968 DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP),
969 "Received transceiver state update [0x%08x] from mfw"
971 transceiver_state, (u32)(p_hwfn->mcp_info->port_addr +
972 OFFSETOF(struct public_port,
975 transceiver_state = GET_FIELD(transceiver_state, ETH_TRANSCEIVER_STATE);
977 if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
978 DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
980 DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
983 static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
984 struct ecore_ptt *p_ptt,
987 struct ecore_mcp_link_state *p_link;
991 p_link = &p_hwfn->mcp_info->link_output;
992 OSAL_MEMSET(p_link, 0, sizeof(*p_link));
994 status = ecore_rd(p_hwfn, p_ptt,
995 p_hwfn->mcp_info->port_addr +
996 OFFSETOF(struct public_port, link_status));
997 DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP),
998 "Received link update [0x%08x] from mfw"
1000 status, (u32)(p_hwfn->mcp_info->port_addr +
1001 OFFSETOF(struct public_port,
1004 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1005 "Resetting link indications\n");
1009 if (p_hwfn->b_drv_link_init)
1010 p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1012 p_link->link_up = false;
1014 p_link->full_duplex = true;
1015 switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
1016 case LINK_STATUS_SPEED_AND_DUPLEX_100G:
1017 p_link->speed = 100000;
1019 case LINK_STATUS_SPEED_AND_DUPLEX_50G:
1020 p_link->speed = 50000;
1022 case LINK_STATUS_SPEED_AND_DUPLEX_40G:
1023 p_link->speed = 40000;
1025 case LINK_STATUS_SPEED_AND_DUPLEX_25G:
1026 p_link->speed = 25000;
1028 case LINK_STATUS_SPEED_AND_DUPLEX_20G:
1029 p_link->speed = 20000;
1031 case LINK_STATUS_SPEED_AND_DUPLEX_10G:
1032 p_link->speed = 10000;
1034 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
1035 p_link->full_duplex = false;
1037 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
1038 p_link->speed = 1000;
1044 /* We never store total line speed as p_link->speed is
1045 * again changes according to bandwidth allocation.
1047 if (p_link->link_up && p_link->speed)
1048 p_link->line_speed = p_link->speed;
1050 p_link->line_speed = 0;
1052 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
1053 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
1055 /* Max bandwidth configuration */
1056 __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
1059 /* Mintz bandwidth configuration */
1060 __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
1062 ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev,
1063 p_link->min_pf_rate);
1065 p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
1066 p_link->an_complete = !!(status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
1067 p_link->parallel_detection = !!(status &
1068 LINK_STATUS_PARALLEL_DETECTION_USED);
1069 p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
1071 p_link->partner_adv_speed |=
1072 (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
1073 ECORE_LINK_PARTNER_SPEED_1G_FD : 0;
1074 p_link->partner_adv_speed |=
1075 (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
1076 ECORE_LINK_PARTNER_SPEED_1G_HD : 0;
1077 p_link->partner_adv_speed |=
1078 (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
1079 ECORE_LINK_PARTNER_SPEED_10G : 0;
1080 p_link->partner_adv_speed |=
1081 (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
1082 ECORE_LINK_PARTNER_SPEED_20G : 0;
1083 p_link->partner_adv_speed |=
1084 (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
1085 ECORE_LINK_PARTNER_SPEED_25G : 0;
1086 p_link->partner_adv_speed |=
1087 (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
1088 ECORE_LINK_PARTNER_SPEED_40G : 0;
1089 p_link->partner_adv_speed |=
1090 (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
1091 ECORE_LINK_PARTNER_SPEED_50G : 0;
1092 p_link->partner_adv_speed |=
1093 (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
1094 ECORE_LINK_PARTNER_SPEED_100G : 0;
1096 p_link->partner_tx_flow_ctrl_en =
1097 !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
1098 p_link->partner_rx_flow_ctrl_en =
1099 !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
1101 switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
1102 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
1103 p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE;
1105 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
1106 p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE;
1108 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
1109 p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE;
1112 p_link->partner_adv_pause = 0;
1115 p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
1117 OSAL_LINK_UPDATE(p_hwfn);
1120 enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
1121 struct ecore_ptt *p_ptt, bool b_up)
1123 struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
1124 struct ecore_mcp_mb_params mb_params;
1125 union drv_union_data union_data;
1126 struct eth_phy_cfg *p_phy_cfg;
1127 enum _ecore_status_t rc = ECORE_SUCCESS;
1131 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
1132 return ECORE_SUCCESS;
1135 /* Set the shmem configuration according to params */
1136 p_phy_cfg = &union_data.drv_phy_cfg;
1137 OSAL_MEMSET(p_phy_cfg, 0, sizeof(*p_phy_cfg));
1138 cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
1139 if (!params->speed.autoneg)
1140 p_phy_cfg->speed = params->speed.forced_speed;
1141 p_phy_cfg->pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
1142 p_phy_cfg->pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
1143 p_phy_cfg->pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1144 p_phy_cfg->adv_speed = params->speed.advertised_speeds;
1145 p_phy_cfg->loopback_mode = params->loopback_mode;
1146 p_hwfn->b_drv_link_init = b_up;
1149 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1150 "Configuring Link: Speed 0x%08x, Pause 0x%08x,"
1151 " adv_speed 0x%08x, loopback 0x%08x\n",
1152 p_phy_cfg->speed, p_phy_cfg->pause,
1153 p_phy_cfg->adv_speed, p_phy_cfg->loopback_mode);
1155 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
1157 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1158 mb_params.cmd = cmd;
1159 mb_params.p_data_src = &union_data;
1160 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1162 /* if mcp fails to respond we must abort */
1163 if (rc != ECORE_SUCCESS) {
1164 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1168 /* Reset the link status if needed */
1170 ecore_mcp_handle_link_change(p_hwfn, p_ptt, true);
1175 u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
1176 struct ecore_ptt *p_ptt)
1178 u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
1180 /* TODO - Add support for VFs */
1181 if (IS_VF(p_hwfn->p_dev))
1184 path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1186 path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr);
1187 path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn));
1189 proc_kill_cnt = ecore_rd(p_hwfn, p_ptt,
1191 OFFSETOF(struct public_path, process_kill)) &
1192 PROCESS_KILL_COUNTER_MASK;
1194 return proc_kill_cnt;
1197 static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn,
1198 struct ecore_ptt *p_ptt)
1200 struct ecore_dev *p_dev = p_hwfn->p_dev;
1203 /* Prevent possible attentions/interrupts during the recovery handling
1204 * and till its load phase, during which they will be re-enabled.
1206 ecore_int_igu_disable_int(p_hwfn, p_ptt);
1208 DP_NOTICE(p_hwfn, false, "Received a process kill indication\n");
1210 /* The following operations should be done once, and thus in CMT mode
1211 * are carried out by only the first HW function.
1213 if (p_hwfn != ECORE_LEADING_HWFN(p_dev))
1216 if (p_dev->recov_in_prog) {
1217 DP_NOTICE(p_hwfn, false,
1218 "Ignoring the indication since a recovery"
1219 " process is already in progress\n");
1223 p_dev->recov_in_prog = true;
1225 proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt);
1226 DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt);
1228 OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn);
1231 static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
1232 struct ecore_ptt *p_ptt,
1233 enum MFW_DRV_MSG_TYPE type)
1235 enum ecore_mcp_protocol_type stats_type;
1236 union ecore_mcp_protocol_stats stats;
1237 struct ecore_mcp_mb_params mb_params;
1238 union drv_union_data union_data;
1242 case MFW_DRV_MSG_GET_LAN_STATS:
1243 stats_type = ECORE_MCP_LAN_STATS;
1244 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1247 DP_INFO(p_hwfn, "Invalid protocol type %d\n", type);
1251 OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats);
1253 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1254 mb_params.cmd = DRV_MSG_CODE_GET_STATS;
1255 mb_params.param = hsi_param;
1256 OSAL_MEMCPY(&union_data, &stats, sizeof(stats));
1257 mb_params.p_data_src = &union_data;
1258 ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1261 static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
1262 struct public_func *p_shmem_info)
1264 struct ecore_mcp_function_info *p_info;
1266 p_info = &p_hwfn->mcp_info->func_info;
1268 /* TODO - bandwidth min/max should have valid values of 1-100,
1269 * as well as some indication that the feature is disabled.
1270 * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS
1271 * limit and correct value to min `1' and max `100' if limit isn't in
1274 p_info->bandwidth_min = (p_shmem_info->config &
1275 FUNC_MF_CFG_MIN_BW_MASK) >>
1276 FUNC_MF_CFG_MIN_BW_SHIFT;
1277 if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1279 "bandwidth minimum out of bounds [%02x]. Set to 1\n",
1280 p_info->bandwidth_min);
1281 p_info->bandwidth_min = 1;
1284 p_info->bandwidth_max = (p_shmem_info->config &
1285 FUNC_MF_CFG_MAX_BW_MASK) >>
1286 FUNC_MF_CFG_MAX_BW_SHIFT;
1287 if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1289 "bandwidth maximum out of bounds [%02x]. Set to 100\n",
1290 p_info->bandwidth_max);
1291 p_info->bandwidth_max = 100;
1295 static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
1296 struct ecore_ptt *p_ptt,
1297 struct public_func *p_data,
1300 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1302 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1303 u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1306 OSAL_MEM_ZERO(p_data, sizeof(*p_data));
1308 size = OSAL_MIN_T(u32, sizeof(*p_data),
1309 SECTION_SIZE(mfw_path_offsize));
1310 for (i = 0; i < size / sizeof(u32); i++)
1311 ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
1312 func_addr + (i << 2));
1318 ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1320 struct ecore_mcp_function_info *p_info;
1321 struct public_func shmem_info;
1322 u32 resp = 0, param = 0;
1324 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1326 ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1328 p_info = &p_hwfn->mcp_info->func_info;
1330 ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min);
1332 ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max);
1334 /* Acknowledge the MFW */
1335 ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1339 static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn,
1340 struct ecore_ptt *p_ptt)
1342 /* A single notification should be sent to upper driver in CMT mode */
1343 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1346 DP_NOTICE(p_hwfn, false,
1347 "Fan failure was detected on the network interface card"
1348 " and it's going to be shut down.\n");
1350 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
1353 static enum _ecore_status_t
1354 ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1355 u32 mdump_cmd, union drv_union_data *p_data_src,
1356 union drv_union_data *p_data_dst, u32 *p_mcp_resp)
1358 struct ecore_mcp_mb_params mb_params;
1359 enum _ecore_status_t rc;
1361 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1362 mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
1363 mb_params.param = mdump_cmd;
1364 mb_params.p_data_src = p_data_src;
1365 mb_params.p_data_dst = p_data_dst;
1366 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1367 if (rc != ECORE_SUCCESS)
1370 *p_mcp_resp = mb_params.mcp_resp;
1371 if (*p_mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
1372 DP_NOTICE(p_hwfn, false,
1373 "MFW claims that the mdump command is illegal [mdump_cmd 0x%x]\n",
1381 static enum _ecore_status_t ecore_mcp_mdump_ack(struct ecore_hwfn *p_hwfn,
1382 struct ecore_ptt *p_ptt)
1386 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_ACK,
1387 OSAL_NULL, OSAL_NULL, &mcp_resp);
1390 enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn,
1391 struct ecore_ptt *p_ptt,
1394 union drv_union_data union_data;
1397 OSAL_MEMCPY(&union_data.raw_data, &epoch, sizeof(epoch));
1399 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_SET_VALUES,
1400 &union_data, OSAL_NULL, &mcp_resp);
1403 enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
1404 struct ecore_ptt *p_ptt)
1408 p_hwfn->p_dev->mdump_en = true;
1410 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_TRIGGER,
1411 OSAL_NULL, OSAL_NULL, &mcp_resp);
1414 static enum _ecore_status_t
1415 ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1416 struct mdump_config_stc *p_mdump_config)
1418 union drv_union_data union_data;
1420 enum _ecore_status_t rc;
1422 rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_GET_CONFIG,
1423 OSAL_NULL, &union_data, &mcp_resp);
1424 if (rc != ECORE_SUCCESS)
1427 /* A zero response implies that the mdump command is not supported */
1429 return ECORE_NOTIMPL;
1431 if (mcp_resp != FW_MSG_CODE_OK) {
1432 DP_NOTICE(p_hwfn, false,
1433 "Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n",
1435 rc = ECORE_UNKNOWN_ERROR;
1438 OSAL_MEMCPY(p_mdump_config, &union_data.mdump_config,
1439 sizeof(*p_mdump_config));
1444 enum _ecore_status_t
1445 ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1446 struct ecore_mdump_info *p_mdump_info)
1448 u32 addr, global_offsize, global_addr;
1449 struct mdump_config_stc mdump_config;
1450 enum _ecore_status_t rc;
1452 OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info));
1454 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1456 global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1457 global_addr = SECTION_ADDR(global_offsize, 0);
1458 p_mdump_info->reason = ecore_rd(p_hwfn, p_ptt,
1460 OFFSETOF(struct public_global,
1463 if (p_mdump_info->reason) {
1464 rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config);
1465 if (rc != ECORE_SUCCESS)
1468 p_mdump_info->version = mdump_config.version;
1469 p_mdump_info->config = mdump_config.config;
1470 p_mdump_info->epoch = mdump_config.epoc;
1471 p_mdump_info->num_of_logs = mdump_config.num_of_logs;
1472 p_mdump_info->valid_logs = mdump_config.valid_logs;
1474 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1475 "MFW mdump info: reason %d, version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n",
1476 p_mdump_info->reason, p_mdump_info->version,
1477 p_mdump_info->config, p_mdump_info->epoch,
1478 p_mdump_info->num_of_logs, p_mdump_info->valid_logs);
1480 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1481 "MFW mdump info: reason %d\n", p_mdump_info->reason);
1484 return ECORE_SUCCESS;
1487 enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
1488 struct ecore_ptt *p_ptt)
1492 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_CLEAR_LOGS,
1493 OSAL_NULL, OSAL_NULL, &mcp_resp);
1496 static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
1497 struct ecore_ptt *p_ptt)
1499 /* In CMT mode - no need for more than a single acknowledgment to the
1500 * MFW, and no more than a single notification to the upper driver.
1502 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1505 DP_NOTICE(p_hwfn, false,
1506 "Received a critical error notification from the MFW!\n");
1508 if (p_hwfn->p_dev->mdump_en) {
1509 DP_NOTICE(p_hwfn, false,
1510 "Not acknowledging the notification to allow the MFW crash dump\n");
1511 p_hwfn->p_dev->mdump_en = false;
1515 ecore_mcp_mdump_ack(p_hwfn, p_ptt);
1516 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
1519 enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
1520 struct ecore_ptt *p_ptt)
1522 struct ecore_mcp_info *info = p_hwfn->mcp_info;
1523 enum _ecore_status_t rc = ECORE_SUCCESS;
1527 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n");
1529 /* Read Messages from MFW */
1530 ecore_mcp_read_mb(p_hwfn, p_ptt);
1532 /* Compare current messages to old ones */
1533 for (i = 0; i < info->mfw_mb_length; i++) {
1534 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
1539 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1540 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
1541 i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
1544 case MFW_DRV_MSG_LINK_CHANGE:
1545 ecore_mcp_handle_link_change(p_hwfn, p_ptt, false);
1547 case MFW_DRV_MSG_VF_DISABLED:
1548 ecore_mcp_handle_vf_flr(p_hwfn, p_ptt);
1550 case MFW_DRV_MSG_LLDP_DATA_UPDATED:
1551 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1552 ECORE_DCBX_REMOTE_LLDP_MIB);
1554 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
1555 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1556 ECORE_DCBX_REMOTE_MIB);
1558 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
1559 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1560 ECORE_DCBX_OPERATIONAL_MIB);
1562 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
1563 ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
1565 case MFW_DRV_MSG_ERROR_RECOVERY:
1566 ecore_mcp_handle_process_kill(p_hwfn, p_ptt);
1568 case MFW_DRV_MSG_GET_LAN_STATS:
1569 case MFW_DRV_MSG_GET_FCOE_STATS:
1570 case MFW_DRV_MSG_GET_ISCSI_STATS:
1571 case MFW_DRV_MSG_GET_RDMA_STATS:
1572 ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
1574 case MFW_DRV_MSG_BW_UPDATE:
1575 ecore_mcp_update_bw(p_hwfn, p_ptt);
1577 case MFW_DRV_MSG_FAILURE_DETECTED:
1578 ecore_mcp_handle_fan_failure(p_hwfn, p_ptt);
1580 case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
1581 ecore_mcp_handle_critical_error(p_hwfn, p_ptt);
1584 DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
1589 /* ACK everything */
1590 for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
1591 OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]);
1593 /* MFW expect answer in BE, so we force write in that format */
1594 ecore_wr(p_hwfn, p_ptt,
1595 info->mfw_mb_addr + sizeof(u32) +
1596 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
1597 sizeof(u32) + i * sizeof(u32), val);
1601 DP_NOTICE(p_hwfn, false,
1602 "Received an MFW message indication but no"
1607 /* Copy the new mfw messages into the shadow */
1608 OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
1613 enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
1614 struct ecore_ptt *p_ptt,
1616 u32 *p_running_bundle_id)
1621 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1622 DP_NOTICE(p_hwfn, false, "Emulation - can't get MFW version\n");
1623 return ECORE_SUCCESS;
1627 if (IS_VF(p_hwfn->p_dev)) {
1628 if (p_hwfn->vf_iov_info) {
1629 struct pfvf_acquire_resp_tlv *p_resp;
1631 p_resp = &p_hwfn->vf_iov_info->acquire_resp;
1632 *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
1633 return ECORE_SUCCESS;
1635 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1636 "VF requested MFW version prior to ACQUIRE\n");
1641 global_offsize = ecore_rd(p_hwfn, p_ptt,
1642 SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
1646 ecore_rd(p_hwfn, p_ptt,
1647 SECTION_ADDR(global_offsize,
1648 0) + OFFSETOF(struct public_global, mfw_ver));
1650 if (p_running_bundle_id != OSAL_NULL) {
1651 *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt,
1652 SECTION_ADDR(global_offsize,
1654 OFFSETOF(struct public_global,
1655 running_bundle_id));
1658 return ECORE_SUCCESS;
1661 enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
1664 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[0];
1665 struct ecore_ptt *p_ptt;
1667 /* TODO - Add support for VFs */
1671 if (!ecore_mcp_is_init(p_hwfn)) {
1672 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
1676 *p_media_type = MEDIA_UNSPECIFIED;
1678 p_ptt = ecore_ptt_acquire(p_hwfn);
1682 *p_media_type = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1683 OFFSETOF(struct public_port, media_type));
1685 ecore_ptt_release(p_hwfn, p_ptt);
1687 return ECORE_SUCCESS;
1691 /* Old MFW has a global configuration for all PFs regarding RDMA support */
1693 ecore_mcp_get_shmem_proto_legacy(struct ecore_hwfn *p_hwfn,
1694 enum ecore_pci_personality *p_proto)
1696 *p_proto = ECORE_PCI_ETH;
1698 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
1699 "According to Legacy capabilities, L2 personality is %08x\n",
1704 static enum _ecore_status_t
1705 ecore_mcp_get_shmem_proto_mfw(struct ecore_hwfn *p_hwfn,
1706 struct ecore_ptt *p_ptt,
1707 enum ecore_pci_personality *p_proto)
1709 u32 resp = 0, param = 0;
1710 enum _ecore_status_t rc;
1712 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
1713 "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
1714 (u32)*p_proto, resp, param);
1715 return ECORE_SUCCESS;
1718 static enum _ecore_status_t
1719 ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn,
1720 struct public_func *p_info,
1721 struct ecore_ptt *p_ptt,
1722 enum ecore_pci_personality *p_proto)
1724 enum _ecore_status_t rc = ECORE_SUCCESS;
1726 switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
1727 case FUNC_MF_CFG_PROTOCOL_ETHERNET:
1728 if (ecore_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto) !=
1730 ecore_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
1739 enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
1740 struct ecore_ptt *p_ptt)
1742 struct ecore_mcp_function_info *info;
1743 struct public_func shmem_info;
1745 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1746 info = &p_hwfn->mcp_info->func_info;
1748 info->pause_on_host = (shmem_info.config &
1749 FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
1751 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
1753 DP_ERR(p_hwfn, "Unknown personality %08x\n",
1754 (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
1758 ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1760 if (shmem_info.mac_upper || shmem_info.mac_lower) {
1761 info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
1762 info->mac[1] = (u8)(shmem_info.mac_upper);
1763 info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
1764 info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
1765 info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
1766 info->mac[5] = (u8)(shmem_info.mac_lower);
1768 /* TODO - are there protocols for which there's no MAC? */
1769 DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n");
1772 /* TODO - are these calculations true for BE machine? */
1773 info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
1774 (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
1775 info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
1776 (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
1778 info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
1780 info->mtu = (u16)shmem_info.mtu_size;
1785 info->mtu = (u16)shmem_info.mtu_size;
1787 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP),
1788 "Read configuration from shmem: pause_on_host %02x"
1789 " protocol %02x BW [%02x - %02x]"
1790 " MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %lx"
1791 " node %lx ovlan %04x\n",
1792 info->pause_on_host, info->protocol,
1793 info->bandwidth_min, info->bandwidth_max,
1794 info->mac[0], info->mac[1], info->mac[2],
1795 info->mac[3], info->mac[4], info->mac[5],
1796 (unsigned long)info->wwn_port,
1797 (unsigned long)info->wwn_node, info->ovlan);
1799 return ECORE_SUCCESS;
1802 struct ecore_mcp_link_params
1803 *ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn)
1805 if (!p_hwfn || !p_hwfn->mcp_info)
1807 return &p_hwfn->mcp_info->link_input;
1810 struct ecore_mcp_link_state
1811 *ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn)
1813 if (!p_hwfn || !p_hwfn->mcp_info)
1817 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
1818 DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n");
1819 p_hwfn->mcp_info->link_output.link_up = true;
1823 return &p_hwfn->mcp_info->link_output;
1826 struct ecore_mcp_link_capabilities
1827 *ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn)
1829 if (!p_hwfn || !p_hwfn->mcp_info)
1831 return &p_hwfn->mcp_info->link_capabilities;
1834 enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
1835 struct ecore_ptt *p_ptt)
1837 u32 resp = 0, param = 0;
1838 enum _ecore_status_t rc;
1840 rc = ecore_mcp_cmd(p_hwfn, p_ptt,
1841 DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, ¶m);
1843 /* Wait for the drain to complete before returning */
1849 const struct ecore_mcp_function_info
1850 *ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn)
1852 if (!p_hwfn || !p_hwfn->mcp_info)
1854 return &p_hwfn->mcp_info->func_info;
1857 enum _ecore_status_t ecore_mcp_nvm_command(struct ecore_hwfn *p_hwfn,
1858 struct ecore_ptt *p_ptt,
1859 struct ecore_mcp_nvm_params *params)
1861 enum _ecore_status_t rc;
1863 switch (params->type) {
1864 case ECORE_MCP_NVM_RD:
1865 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1866 params->nvm_common.offset,
1867 ¶ms->nvm_common.resp,
1868 ¶ms->nvm_common.param,
1869 params->nvm_rd.buf_size,
1870 params->nvm_rd.buf);
1873 rc = ecore_mcp_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1874 params->nvm_common.offset,
1875 ¶ms->nvm_common.resp,
1876 ¶ms->nvm_common.param);
1878 case ECORE_MCP_NVM_WR:
1879 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1880 params->nvm_common.offset,
1881 ¶ms->nvm_common.resp,
1882 ¶ms->nvm_common.param,
1883 params->nvm_wr.buf_size,
1884 params->nvm_wr.buf);
1893 int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
1894 struct ecore_ptt *p_ptt, u32 personalities)
1896 enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT;
1897 struct public_func shmem_info;
1898 int i, count = 0, num_pfs;
1900 num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
1902 for (i = 0; i < num_pfs; i++) {
1903 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1904 MCP_PF_ID_BY_REL(p_hwfn, i));
1905 if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
1908 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
1913 if ((1 << ((u32)protocol)) & personalities)
1920 enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
1921 struct ecore_ptt *p_ptt,
1927 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1928 DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n");
1933 if (IS_VF(p_hwfn->p_dev))
1936 flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
1937 flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
1938 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
1939 flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
1941 *p_flash_size = flash_size;
1943 return ECORE_SUCCESS;
1946 enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
1947 struct ecore_ptt *p_ptt)
1949 struct ecore_dev *p_dev = p_hwfn->p_dev;
1951 if (p_dev->recov_in_prog) {
1952 DP_NOTICE(p_hwfn, false,
1953 "Avoid triggering a recovery since such a process"
1954 " is already in progress\n");
1958 DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n");
1959 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
1961 return ECORE_SUCCESS;
1964 enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
1965 struct ecore_ptt *p_ptt,
1968 u32 resp = 0, param = 0, rc_param = 0;
1969 enum _ecore_status_t rc;
1971 /* Only Leader can configure MSIX, and need to take CMT into account */
1973 if (!IS_LEAD_HWFN(p_hwfn))
1974 return ECORE_SUCCESS;
1975 num *= p_hwfn->p_dev->num_hwfns;
1977 param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
1978 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
1979 param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
1980 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
1982 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
1985 if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
1986 DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n",
1990 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1991 "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
1998 enum _ecore_status_t
1999 ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2000 struct ecore_mcp_drv_version *p_ver)
2002 struct drv_version_stc *p_drv_version;
2003 struct ecore_mcp_mb_params mb_params;
2004 union drv_union_data union_data;
2008 enum _ecore_status_t rc;
2011 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
2012 return ECORE_SUCCESS;
2015 p_drv_version = &union_data.drv_version;
2016 p_drv_version->version = p_ver->version;
2017 num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4;
2018 for (i = 0; i < num_words; i++) {
2019 /* The driver name is expected to be in a big-endian format */
2020 p_name = &p_ver->name[i * sizeof(u32)];
2021 val = OSAL_CPU_TO_BE32(*(u32 *)p_name);
2022 *(u32 *)&p_drv_version->name[i * sizeof(u32)] = val;
2025 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2026 mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
2027 mb_params.p_data_src = &union_data;
2028 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2029 if (rc != ECORE_SUCCESS)
2030 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2035 enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
2036 struct ecore_ptt *p_ptt)
2038 enum _ecore_status_t rc;
2039 u32 resp = 0, param = 0;
2041 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2043 if (rc != ECORE_SUCCESS)
2044 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2049 enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
2050 struct ecore_ptt *p_ptt)
2052 u32 value, cpu_mode;
2054 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
2056 value = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2057 value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2058 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
2059 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2061 return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -1 : 0;
2064 enum _ecore_status_t
2065 ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
2066 struct ecore_ptt *p_ptt,
2067 enum ecore_ov_client client)
2069 enum _ecore_status_t rc;
2070 u32 resp = 0, param = 0;
2074 case ECORE_OV_CLIENT_DRV:
2075 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
2077 case ECORE_OV_CLIENT_USER:
2078 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
2080 case ECORE_OV_CLIENT_VENDOR_SPEC:
2081 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
2084 DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", client);
2088 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
2089 drv_mb_param, &resp, ¶m);
2090 if (rc != ECORE_SUCCESS)
2091 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2096 enum _ecore_status_t
2097 ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
2098 struct ecore_ptt *p_ptt,
2099 enum ecore_ov_driver_state drv_state)
2101 enum _ecore_status_t rc;
2102 u32 resp = 0, param = 0;
2105 switch (drv_state) {
2106 case ECORE_OV_DRIVER_STATE_NOT_LOADED:
2107 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
2109 case ECORE_OV_DRIVER_STATE_DISABLED:
2110 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
2112 case ECORE_OV_DRIVER_STATE_ACTIVE:
2113 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
2116 DP_NOTICE(p_hwfn, true, "Invalid driver state %d\n", drv_state);
2120 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
2121 drv_mb_param, &resp, ¶m);
2122 if (rc != ECORE_SUCCESS)
2123 DP_ERR(p_hwfn, "Failed to send driver state\n");
2128 enum _ecore_status_t
2129 ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2130 struct ecore_fc_npiv_tbl *p_table)
2135 enum _ecore_status_t
2136 ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn,
2137 struct ecore_ptt *p_ptt, u16 mtu)
2142 enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
2143 struct ecore_ptt *p_ptt,
2144 enum ecore_led_mode mode)
2146 u32 resp = 0, param = 0, drv_mb_param;
2147 enum _ecore_status_t rc;
2150 case ECORE_LED_MODE_ON:
2151 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
2153 case ECORE_LED_MODE_OFF:
2154 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
2156 case ECORE_LED_MODE_RESTORE:
2157 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
2160 DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode);
2164 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
2165 drv_mb_param, &resp, ¶m);
2166 if (rc != ECORE_SUCCESS)
2167 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2172 enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
2173 struct ecore_ptt *p_ptt,
2176 enum _ecore_status_t rc;
2177 u32 resp = 0, param = 0;
2179 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
2180 mask_parities, &resp, ¶m);
2182 if (rc != ECORE_SUCCESS) {
2184 "MCP response failure for mask parities, aborting\n");
2185 } else if (resp != FW_MSG_CODE_OK) {
2187 "MCP did not ack mask parity request. Old MFW?\n");
2194 enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
2197 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2198 u32 bytes_left, offset, bytes_to_copy, buf_size;
2199 struct ecore_mcp_nvm_params params;
2200 struct ecore_ptt *p_ptt;
2201 enum _ecore_status_t rc = ECORE_SUCCESS;
2203 p_ptt = ecore_ptt_acquire(p_hwfn);
2207 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2210 params.type = ECORE_MCP_NVM_RD;
2211 params.nvm_rd.buf_size = &buf_size;
2212 params.nvm_common.cmd = DRV_MSG_CODE_NVM_READ_NVRAM;
2213 while (bytes_left > 0) {
2214 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
2215 MCP_DRV_NVM_BUF_LEN);
2216 params.nvm_common.offset = (addr + offset) |
2217 (bytes_to_copy << DRV_MB_PARAM_NVM_LEN_SHIFT);
2218 params.nvm_rd.buf = (u32 *)(p_buf + offset);
2219 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2220 if (rc != ECORE_SUCCESS || (params.nvm_common.resp !=
2221 FW_MSG_CODE_NVM_OK)) {
2222 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2226 /* This can be a lengthy process, and it's possible scheduler
2227 * isn't preemptible. Sleep a bit to prevent CPU hogging.
2229 if (bytes_left % 0x1000 <
2230 (bytes_left - *params.nvm_rd.buf_size) % 0x1000)
2233 offset += *params.nvm_rd.buf_size;
2234 bytes_left -= *params.nvm_rd.buf_size;
2237 p_dev->mcp_nvm_resp = params.nvm_common.resp;
2238 ecore_ptt_release(p_hwfn, p_ptt);
2243 enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
2244 u32 addr, u8 *p_buf, u32 len)
2246 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2247 struct ecore_mcp_nvm_params params;
2248 struct ecore_ptt *p_ptt;
2249 enum _ecore_status_t rc;
2251 p_ptt = ecore_ptt_acquire(p_hwfn);
2255 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2256 params.type = ECORE_MCP_NVM_RD;
2257 params.nvm_rd.buf_size = &len;
2258 params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_READ) ?
2259 DRV_MSG_CODE_PHY_CORE_READ : DRV_MSG_CODE_PHY_RAW_READ;
2260 params.nvm_common.offset = addr;
2261 params.nvm_rd.buf = (u32 *)p_buf;
2262 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2263 if (rc != ECORE_SUCCESS)
2264 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2266 p_dev->mcp_nvm_resp = params.nvm_common.resp;
2267 ecore_ptt_release(p_hwfn, p_ptt);
2272 enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
2274 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2275 struct ecore_mcp_nvm_params params;
2276 struct ecore_ptt *p_ptt;
2278 p_ptt = ecore_ptt_acquire(p_hwfn);
2282 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2283 OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp));
2284 ecore_ptt_release(p_hwfn, p_ptt);
2286 return ECORE_SUCCESS;
2289 enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr)
2291 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2292 struct ecore_mcp_nvm_params params;
2293 struct ecore_ptt *p_ptt;
2294 enum _ecore_status_t rc;
2296 p_ptt = ecore_ptt_acquire(p_hwfn);
2299 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2300 params.type = ECORE_MCP_CMD;
2301 params.nvm_common.cmd = DRV_MSG_CODE_NVM_DEL_FILE;
2302 params.nvm_common.offset = addr;
2303 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2304 p_dev->mcp_nvm_resp = params.nvm_common.resp;
2305 ecore_ptt_release(p_hwfn, p_ptt);
2310 enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
2313 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2314 struct ecore_mcp_nvm_params params;
2315 struct ecore_ptt *p_ptt;
2316 enum _ecore_status_t rc;
2318 p_ptt = ecore_ptt_acquire(p_hwfn);
2321 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2322 params.type = ECORE_MCP_CMD;
2323 params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN;
2324 params.nvm_common.offset = addr;
2325 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2326 p_dev->mcp_nvm_resp = params.nvm_common.resp;
2327 ecore_ptt_release(p_hwfn, p_ptt);
2332 /* rc receives ECORE_INVAL as default parameter because
2333 * it might not enter the while loop if the len is 0
2335 enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
2336 u32 addr, u8 *p_buf, u32 len)
2338 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2339 enum _ecore_status_t rc = ECORE_INVAL;
2340 struct ecore_mcp_nvm_params params;
2341 struct ecore_ptt *p_ptt;
2342 u32 buf_idx, buf_size;
2344 p_ptt = ecore_ptt_acquire(p_hwfn);
2348 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2349 params.type = ECORE_MCP_NVM_WR;
2350 if (cmd == ECORE_PUT_FILE_DATA)
2351 params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
2353 params.nvm_common.cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
2355 while (buf_idx < len) {
2356 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
2357 MCP_DRV_NVM_BUF_LEN);
2358 params.nvm_common.offset = ((buf_size <<
2359 DRV_MB_PARAM_NVM_LEN_SHIFT)
2361 params.nvm_wr.buf_size = buf_size;
2362 params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
2363 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2364 if (rc != ECORE_SUCCESS ||
2365 ((params.nvm_common.resp != FW_MSG_CODE_NVM_OK) &&
2366 (params.nvm_common.resp !=
2367 FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK)))
2368 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2370 /* This can be a lengthy process, and it's possible scheduler
2371 * isn't preemptible. Sleep a bit to prevent CPU hogging.
2373 if (buf_idx % 0x1000 >
2374 (buf_idx + buf_size) % 0x1000)
2377 buf_idx += buf_size;
2380 p_dev->mcp_nvm_resp = params.nvm_common.resp;
2381 ecore_ptt_release(p_hwfn, p_ptt);
2386 enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
2387 u32 addr, u8 *p_buf, u32 len)
2389 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2390 struct ecore_mcp_nvm_params params;
2391 struct ecore_ptt *p_ptt;
2392 enum _ecore_status_t rc;
2394 p_ptt = ecore_ptt_acquire(p_hwfn);
2398 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2399 params.type = ECORE_MCP_NVM_WR;
2400 params.nvm_wr.buf_size = len;
2401 params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_WRITE) ?
2402 DRV_MSG_CODE_PHY_CORE_WRITE : DRV_MSG_CODE_PHY_RAW_WRITE;
2403 params.nvm_common.offset = addr;
2404 params.nvm_wr.buf = (u32 *)p_buf;
2405 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2406 if (rc != ECORE_SUCCESS)
2407 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2408 p_dev->mcp_nvm_resp = params.nvm_common.resp;
2409 ecore_ptt_release(p_hwfn, p_ptt);
2414 enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
2417 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2418 struct ecore_mcp_nvm_params params;
2419 struct ecore_ptt *p_ptt;
2420 enum _ecore_status_t rc;
2422 p_ptt = ecore_ptt_acquire(p_hwfn);
2426 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2427 params.type = ECORE_MCP_CMD;
2428 params.nvm_common.cmd = DRV_MSG_CODE_SET_SECURE_MODE;
2429 params.nvm_common.offset = addr;
2430 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2431 p_dev->mcp_nvm_resp = params.nvm_common.resp;
2432 ecore_ptt_release(p_hwfn, p_ptt);
2437 enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
2438 struct ecore_ptt *p_ptt,
2439 u32 port, u32 addr, u32 offset,
2442 struct ecore_mcp_nvm_params params;
2443 enum _ecore_status_t rc;
2444 u32 bytes_left, bytes_to_copy, buf_size;
2446 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2447 params.nvm_common.offset =
2448 (port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) |
2449 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT);
2453 params.type = ECORE_MCP_NVM_RD;
2454 params.nvm_rd.buf_size = &buf_size;
2455 params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_READ;
2456 while (bytes_left > 0) {
2457 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
2458 MAX_I2C_TRANSACTION_SIZE);
2459 params.nvm_rd.buf = (u32 *)(p_buf + offset);
2460 params.nvm_common.offset &=
2461 (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
2462 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
2463 params.nvm_common.offset |=
2465 DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT);
2466 params.nvm_common.offset |=
2467 (bytes_to_copy << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT);
2468 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2469 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
2470 FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
2472 } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
2473 FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
2474 return ECORE_UNKNOWN_ERROR;
2476 offset += *params.nvm_rd.buf_size;
2477 bytes_left -= *params.nvm_rd.buf_size;
2480 return ECORE_SUCCESS;
2483 enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
2484 struct ecore_ptt *p_ptt,
2485 u32 port, u32 addr, u32 offset,
2488 struct ecore_mcp_nvm_params params;
2489 enum _ecore_status_t rc;
2490 u32 buf_idx, buf_size;
2492 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2493 params.nvm_common.offset =
2494 (port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) |
2495 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT);
2496 params.type = ECORE_MCP_NVM_WR;
2497 params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_WRITE;
2499 while (buf_idx < len) {
2500 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
2501 MAX_I2C_TRANSACTION_SIZE);
2502 params.nvm_common.offset &=
2503 (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
2504 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
2505 params.nvm_common.offset |=
2506 ((offset + buf_idx) <<
2507 DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT);
2508 params.nvm_common.offset |=
2509 (buf_size << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT);
2510 params.nvm_wr.buf_size = buf_size;
2511 params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
2512 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2513 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
2514 FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
2516 } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
2517 FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
2518 return ECORE_UNKNOWN_ERROR;
2520 buf_idx += buf_size;
2523 return ECORE_SUCCESS;
2526 enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
2527 struct ecore_ptt *p_ptt,
2528 u16 gpio, u32 *gpio_val)
2530 enum _ecore_status_t rc = ECORE_SUCCESS;
2531 u32 drv_mb_param = 0, rsp;
2533 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT);
2535 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ,
2536 drv_mb_param, &rsp, gpio_val);
2538 if (rc != ECORE_SUCCESS)
2541 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
2542 return ECORE_UNKNOWN_ERROR;
2544 return ECORE_SUCCESS;
2547 enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
2548 struct ecore_ptt *p_ptt,
2549 u16 gpio, u16 gpio_val)
2551 enum _ecore_status_t rc = ECORE_SUCCESS;
2552 u32 drv_mb_param = 0, param, rsp;
2554 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT) |
2555 (gpio_val << DRV_MB_PARAM_GPIO_VALUE_SHIFT);
2557 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE,
2558 drv_mb_param, &rsp, ¶m);
2560 if (rc != ECORE_SUCCESS)
2563 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
2564 return ECORE_UNKNOWN_ERROR;
2566 return ECORE_SUCCESS;
2569 enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn,
2570 struct ecore_ptt *p_ptt,
2571 u16 gpio, u32 *gpio_direction,
2574 u32 drv_mb_param = 0, rsp, val = 0;
2575 enum _ecore_status_t rc = ECORE_SUCCESS;
2577 drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT;
2579 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO,
2580 drv_mb_param, &rsp, &val);
2581 if (rc != ECORE_SUCCESS)
2584 *gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >>
2585 DRV_MB_PARAM_GPIO_DIRECTION_SHIFT;
2586 *gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >>
2587 DRV_MB_PARAM_GPIO_CTRL_SHIFT;
2589 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
2590 return ECORE_UNKNOWN_ERROR;
2592 return ECORE_SUCCESS;
2595 enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn,
2596 struct ecore_ptt *p_ptt)
2598 u32 drv_mb_param = 0, rsp, param;
2599 enum _ecore_status_t rc = ECORE_SUCCESS;
2601 drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
2602 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2604 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2605 drv_mb_param, &rsp, ¶m);
2607 if (rc != ECORE_SUCCESS)
2610 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2611 (param != DRV_MB_PARAM_BIST_RC_PASSED))
2612 rc = ECORE_UNKNOWN_ERROR;
2617 enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn,
2618 struct ecore_ptt *p_ptt)
2620 u32 drv_mb_param, rsp, param;
2621 enum _ecore_status_t rc = ECORE_SUCCESS;
2623 drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
2624 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2626 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2627 drv_mb_param, &rsp, ¶m);
2629 if (rc != ECORE_SUCCESS)
2632 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2633 (param != DRV_MB_PARAM_BIST_RC_PASSED))
2634 rc = ECORE_UNKNOWN_ERROR;
2639 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images(
2640 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *num_images)
2642 u32 drv_mb_param = 0, rsp;
2643 enum _ecore_status_t rc = ECORE_SUCCESS;
2645 drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
2646 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2648 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2649 drv_mb_param, &rsp, num_images);
2651 if (rc != ECORE_SUCCESS)
2654 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
2655 rc = ECORE_UNKNOWN_ERROR;
2660 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att(
2661 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2662 struct bist_nvm_image_att *p_image_att, u32 image_index)
2664 struct ecore_mcp_nvm_params params;
2665 enum _ecore_status_t rc;
2668 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2669 params.nvm_common.offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
2670 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2671 params.nvm_common.offset |= (image_index <<
2672 DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT);
2674 params.type = ECORE_MCP_NVM_RD;
2675 params.nvm_rd.buf_size = &buf_size;
2676 params.nvm_common.cmd = DRV_MSG_CODE_BIST_TEST;
2677 params.nvm_rd.buf = (u32 *)p_image_att;
2679 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2680 if (rc != ECORE_SUCCESS)
2683 if (((params.nvm_common.resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2684 (p_image_att->return_code != 1))
2685 rc = ECORE_UNKNOWN_ERROR;
2690 enum _ecore_status_t
2691 ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn,
2692 struct ecore_ptt *p_ptt,
2693 struct ecore_temperature_info *p_temp_info)
2695 struct ecore_temperature_sensor *p_temp_sensor;
2696 struct temperature_status_stc *p_mfw_temp_info;
2697 struct ecore_mcp_mb_params mb_params;
2698 union drv_union_data union_data;
2700 enum _ecore_status_t rc;
2703 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2704 mb_params.cmd = DRV_MSG_CODE_GET_TEMPERATURE;
2705 mb_params.p_data_dst = &union_data;
2706 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2707 if (rc != ECORE_SUCCESS)
2710 p_mfw_temp_info = &union_data.temp_info;
2712 OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS != MAX_NUM_OF_SENSORS);
2713 p_temp_info->num_sensors = OSAL_MIN_T(u32,
2714 p_mfw_temp_info->num_of_sensors,
2715 ECORE_MAX_NUM_OF_SENSORS);
2716 for (i = 0; i < p_temp_info->num_sensors; i++) {
2717 val = p_mfw_temp_info->sensor[i];
2718 p_temp_sensor = &p_temp_info->sensors[i];
2719 p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >>
2720 SENSOR_LOCATION_SHIFT;
2721 p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >>
2722 THRESHOLD_HIGH_SHIFT;
2723 p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >>
2724 CRITICAL_TEMPERATURE_SHIFT;
2725 p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >>
2729 return ECORE_SUCCESS;
2732 enum _ecore_status_t ecore_mcp_get_mba_versions(
2733 struct ecore_hwfn *p_hwfn,
2734 struct ecore_ptt *p_ptt,
2735 struct ecore_mba_vers *p_mba_vers)
2737 struct ecore_mcp_nvm_params params;
2738 enum _ecore_status_t rc;
2741 OSAL_MEM_ZERO(¶ms, sizeof(params));
2742 params.type = ECORE_MCP_NVM_RD;
2743 params.nvm_common.cmd = DRV_MSG_CODE_GET_MBA_VERSION;
2744 params.nvm_common.offset = 0;
2745 params.nvm_rd.buf = &p_mba_vers->mba_vers[0];
2746 params.nvm_rd.buf_size = &buf_size;
2747 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2749 if (rc != ECORE_SUCCESS)
2752 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
2754 rc = ECORE_UNKNOWN_ERROR;
2756 if (buf_size != MCP_DRV_NVM_BUF_LEN)
2757 rc = ECORE_UNKNOWN_ERROR;
2762 enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
2763 struct ecore_ptt *p_ptt,
2768 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MEM_ECC_EVENTS,
2769 0, &rsp, (u32 *)num_events);
2772 #define ECORE_RESC_ALLOC_VERSION_MAJOR 1
2773 #define ECORE_RESC_ALLOC_VERSION_MINOR 0
2774 #define ECORE_RESC_ALLOC_VERSION \
2775 ((ECORE_RESC_ALLOC_VERSION_MAJOR << \
2776 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
2777 (ECORE_RESC_ALLOC_VERSION_MINOR << \
2778 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
2780 enum _ecore_status_t ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn,
2781 struct ecore_ptt *p_ptt,
2782 struct resource_info *p_resc_info,
2783 u32 *p_mcp_resp, u32 *p_mcp_param)
2785 struct ecore_mcp_mb_params mb_params;
2786 union drv_union_data union_data;
2787 enum _ecore_status_t rc;
2789 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2790 mb_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
2791 mb_params.param = ECORE_RESC_ALLOC_VERSION;
2792 OSAL_MEMCPY(&union_data.resource, p_resc_info, sizeof(*p_resc_info));
2793 mb_params.p_data_src = &union_data;
2794 mb_params.p_data_dst = &union_data;
2795 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2796 if (rc != ECORE_SUCCESS)
2799 *p_mcp_resp = mb_params.mcp_resp;
2800 *p_mcp_param = mb_params.mcp_param;
2802 OSAL_MEMCPY(p_resc_info, &union_data.resource, sizeof(*p_resc_info));
2804 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2805 "MFW resource_info: version 0x%x, res_id 0x%x, size 0x%x,"
2806 " offset 0x%x, vf_size 0x%x, vf_offset 0x%x, flags 0x%x\n",
2807 *p_mcp_param, p_resc_info->res_id, p_resc_info->size,
2808 p_resc_info->offset, p_resc_info->vf_size,
2809 p_resc_info->vf_offset, p_resc_info->flags);
2811 return ECORE_SUCCESS;
2814 enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn,
2815 struct ecore_ptt *p_ptt)
2817 u32 mcp_resp, mcp_param;
2819 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
2820 &mcp_resp, &mcp_param);
2823 static enum _ecore_status_t ecore_mcp_resource_cmd(struct ecore_hwfn *p_hwfn,
2824 struct ecore_ptt *p_ptt,
2825 u32 param, u32 *p_mcp_resp,
2828 enum _ecore_status_t rc;
2830 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
2831 p_mcp_resp, p_mcp_param);
2832 if (rc != ECORE_SUCCESS)
2835 /* A zero response implies that the resource command is not supported */
2837 return ECORE_NOTIMPL;
2839 if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
2840 u8 opcode = ECORE_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
2842 DP_NOTICE(p_hwfn, false,
2843 "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
2851 enum _ecore_status_t ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn,
2852 struct ecore_ptt *p_ptt,
2853 u8 resource_num, u8 timeout,
2854 bool *p_granted, u8 *p_owner)
2856 u32 param = 0, mcp_resp, mcp_param;
2858 enum _ecore_status_t rc;
2861 case ECORE_MCP_RESC_LOCK_TO_DEFAULT:
2862 opcode = RESOURCE_OPCODE_REQ;
2865 case ECORE_MCP_RESC_LOCK_TO_NONE:
2866 opcode = RESOURCE_OPCODE_REQ_WO_AGING;
2870 opcode = RESOURCE_OPCODE_REQ_W_AGING;
2874 ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, resource_num);
2875 ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
2876 ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, timeout);
2878 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2879 "Resource lock request: param 0x%08x [age %d, opcode %d, resc_num %d]\n",
2880 param, timeout, opcode, resource_num);
2882 /* Attempt to acquire the resource */
2883 rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
2885 if (rc != ECORE_SUCCESS)
2888 /* Analyze the response */
2889 *p_owner = ECORE_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER);
2890 opcode = ECORE_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
2892 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2893 "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
2894 mcp_param, opcode, *p_owner);
2897 case RESOURCE_OPCODE_GNT:
2900 case RESOURCE_OPCODE_BUSY:
2904 DP_NOTICE(p_hwfn, false,
2905 "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
2910 return ECORE_SUCCESS;
2913 enum _ecore_status_t ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn,
2914 struct ecore_ptt *p_ptt,
2915 u8 resource_num, bool force,
2918 u32 param = 0, mcp_resp, mcp_param;
2920 enum _ecore_status_t rc;
2922 opcode = force ? RESOURCE_OPCODE_FORCE_RELEASE
2923 : RESOURCE_OPCODE_RELEASE;
2924 ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, resource_num);
2925 ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
2927 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2928 "Resource unlock request: param 0x%08x [opcode %d, resc_num %d]\n",
2929 param, opcode, resource_num);
2931 /* Attempt to release the resource */
2932 rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
2934 if (rc != ECORE_SUCCESS)
2937 /* Analyze the response */
2938 opcode = ECORE_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
2940 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2941 "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
2945 case RESOURCE_OPCODE_RELEASED_PREVIOUS:
2947 "Resource unlock request for an already released resource [resc_num %d]\n",
2950 case RESOURCE_OPCODE_RELEASED:
2953 case RESOURCE_OPCODE_WRONG_OWNER:
2954 *p_released = false;
2957 DP_NOTICE(p_hwfn, false,
2958 "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
2963 return ECORE_SUCCESS;