2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
11 #include "ecore_status.h"
12 #include "ecore_mcp.h"
13 #include "mcp_public.h"
16 #include "ecore_init_fw_funcs.h"
17 #include "ecore_sriov.h"
19 #include "ecore_iov_api.h"
20 #include "ecore_gtt_reg_addr.h"
21 #include "ecore_iro.h"
22 #include "ecore_dcbx.h"
24 #define CHIP_MCP_RESP_ITER_US 10
25 #define EMUL_MCP_RESP_ITER_US (1000 * 1000)
27 #define ECORE_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
28 #define ECORE_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
30 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
31 ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
34 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
35 ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
37 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
38 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
39 OFFSETOF(struct public_drv_mb, _field), _val)
41 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
42 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
43 OFFSETOF(struct public_drv_mb, _field))
45 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
46 DRV_ID_PDA_COMP_VER_SHIFT)
48 #define MCP_BYTES_PER_MBIT_SHIFT 17
52 static int loaded_port[MAX_NUM_PORTS] = { 0 };
55 bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn)
57 if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
62 void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
64 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
66 u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr);
68 p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
70 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
71 "port_addr = 0x%x, port_id 0x%02x\n",
72 p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
75 void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
77 u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
82 if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev))
86 if (!p_hwfn->mcp_info->public_base)
89 for (i = 0; i < length; i++) {
90 tmp = ecore_rd(p_hwfn, p_ptt,
91 p_hwfn->mcp_info->mfw_mb_addr +
92 (i << 2) + sizeof(u32));
94 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
95 OSAL_BE32_TO_CPU(tmp);
99 enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
101 if (p_hwfn->mcp_info) {
102 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
103 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
104 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->lock);
106 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
107 p_hwfn->mcp_info = OSAL_NULL;
109 return ECORE_SUCCESS;
112 static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
113 struct ecore_ptt *p_ptt)
115 struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
116 u32 drv_mb_offsize, mfw_mb_offsize;
117 u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
120 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
121 DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n");
122 p_info->public_base = 0;
127 p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
128 if (!p_info->public_base)
131 p_info->public_base |= GRCBASE_MCP;
133 /* Calculate the driver and MFW mailbox address */
134 drv_mb_offsize = ecore_rd(p_hwfn, p_ptt,
135 SECTION_OFFSIZE_ADDR(p_info->public_base,
137 p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
138 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
139 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x"
140 " mcp_pf_id = 0x%x\n",
141 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
143 /* Set the MFW MB address */
144 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
145 SECTION_OFFSIZE_ADDR(p_info->public_base,
147 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
148 p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
149 p_info->mfw_mb_addr);
151 /* Get the current driver mailbox sequence before sending
154 p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
155 DRV_MSG_SEQ_NUMBER_MASK;
157 /* Get current FW pulse sequence */
158 p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
161 p_info->mcp_hist = (u16)ecore_rd(p_hwfn, p_ptt,
162 MISCS_REG_GENERIC_POR_0);
164 return ECORE_SUCCESS;
167 enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
168 struct ecore_ptt *p_ptt)
170 struct ecore_mcp_info *p_info;
173 /* Allocate mcp_info structure */
174 p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
175 sizeof(*p_hwfn->mcp_info));
176 if (!p_hwfn->mcp_info)
178 p_info = p_hwfn->mcp_info;
180 if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
181 DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
182 /* Do not free mcp_info here, since public_base indicate that
183 * the MCP is not initialized
185 return ECORE_SUCCESS;
188 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
189 p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
190 p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
191 if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
194 /* Initialize the MFW spinlock */
195 OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->lock);
196 OSAL_SPIN_LOCK_INIT(&p_info->lock);
198 return ECORE_SUCCESS;
201 DP_NOTICE(p_hwfn, true, "Failed to allocate mcp memory\n");
202 ecore_mcp_free(p_hwfn);
206 /* Locks the MFW mailbox of a PF to ensure a single access.
207 * The lock is achieved in most cases by holding a spinlock, causing other
208 * threads to wait till a previous access is done.
209 * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single
210 * access is achieved by setting a blocking flag, which will fail other
211 * competing contexts to send their mailboxes.
213 static enum _ecore_status_t ecore_mcp_mb_lock(struct ecore_hwfn *p_hwfn,
216 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
218 /* The spinlock shouldn't be acquired when the mailbox command is
219 * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel
220 * pending [UN]LOAD_REQ command of another PF together with a spinlock
221 * (i.e. interrupts are disabled) - can lead to a deadlock.
222 * It is assumed that for a single PF, no other mailbox commands can be
223 * sent from another context while sending LOAD_REQ, and that any
224 * parallel commands to UNLOAD_REQ can be cancelled.
226 if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE)
227 p_hwfn->mcp_info->block_mb_sending = false;
229 if (p_hwfn->mcp_info->block_mb_sending) {
230 DP_NOTICE(p_hwfn, false,
231 "Trying to send a MFW mailbox command [0x%x]"
232 " in parallel to [UN]LOAD_REQ. Aborting.\n",
234 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
238 if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) {
239 p_hwfn->mcp_info->block_mb_sending = true;
240 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
243 return ECORE_SUCCESS;
246 static void ecore_mcp_mb_unlock(struct ecore_hwfn *p_hwfn, u32 cmd)
248 if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
249 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
252 enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
253 struct ecore_ptt *p_ptt)
255 u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
256 u32 delay = CHIP_MCP_RESP_ITER_US;
257 u32 org_mcp_reset_seq, cnt = 0;
258 enum _ecore_status_t rc = ECORE_SUCCESS;
261 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
262 delay = EMUL_MCP_RESP_ITER_US;
265 /* Ensure that only a single thread is accessing the mailbox at a
268 rc = ecore_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
269 if (rc != ECORE_SUCCESS)
272 /* Set drv command along with the updated sequence */
273 org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
274 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
277 /* Wait for MFW response */
279 /* Give the FW up to 500 second (50*1000*10usec) */
280 } while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt,
281 MISCS_REG_GENERIC_POR_0)) &&
282 (cnt++ < ECORE_MCP_RESET_RETRIES));
284 if (org_mcp_reset_seq !=
285 ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
286 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
287 "MCP was reset after %d usec\n", cnt * delay);
289 DP_ERR(p_hwfn, "Failed to reset MCP\n");
293 ecore_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
298 static enum _ecore_status_t ecore_do_mcp_cmd(struct ecore_hwfn *p_hwfn,
299 struct ecore_ptt *p_ptt,
304 u32 delay = CHIP_MCP_RESP_ITER_US;
305 u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
306 u32 seq, cnt = 1, actual_mb_seq;
307 enum _ecore_status_t rc = ECORE_SUCCESS;
310 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
311 delay = EMUL_MCP_RESP_ITER_US;
312 /* There is a built-in delay of 100usec in each MFW response read */
313 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
317 /* Get actual driver mailbox sequence */
318 actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
319 DRV_MSG_SEQ_NUMBER_MASK;
321 /* Use MCP history register to check if MCP reset occurred between
324 if (p_hwfn->mcp_info->mcp_hist !=
325 ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
326 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Rereading MCP offsets\n");
327 ecore_load_mcp_offsets(p_hwfn, p_ptt);
328 ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
330 seq = ++p_hwfn->mcp_info->drv_mb_seq;
333 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
335 /* Set drv command along with the updated sequence */
336 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
339 /* Wait for MFW response */
341 *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
343 /* Give the FW up to 5 second (500*10ms) */
344 } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
345 (cnt++ < max_retries));
347 /* Is this a reply to our command? */
348 if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
349 *o_mcp_resp &= FW_MSG_CODE_MASK;
350 /* Get the MCP param */
351 *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
354 DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
358 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
363 static enum _ecore_status_t
364 ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
365 struct ecore_ptt *p_ptt,
366 struct ecore_mcp_mb_params *p_mb_params)
369 enum _ecore_status_t rc;
371 /* MCP not initialized */
372 if (!ecore_mcp_is_init(p_hwfn)) {
373 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
377 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
378 OFFSETOF(struct public_drv_mb, union_data);
380 /* Ensure that only a single thread is accessing the mailbox at a
383 rc = ecore_mcp_mb_lock(p_hwfn, p_mb_params->cmd);
384 if (rc != ECORE_SUCCESS)
387 if (p_mb_params->p_data_src != OSAL_NULL)
388 ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr,
389 p_mb_params->p_data_src,
390 sizeof(*p_mb_params->p_data_src));
392 rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
393 p_mb_params->param, &p_mb_params->mcp_resp,
394 &p_mb_params->mcp_param);
396 if (p_mb_params->p_data_dst != OSAL_NULL)
397 ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
399 sizeof(*p_mb_params->p_data_dst));
401 ecore_mcp_mb_unlock(p_hwfn, p_mb_params->cmd);
406 enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
407 struct ecore_ptt *p_ptt, u32 cmd, u32 param,
408 u32 *o_mcp_resp, u32 *o_mcp_param)
410 struct ecore_mcp_mb_params mb_params;
411 enum _ecore_status_t rc;
414 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
415 if (cmd == DRV_MSG_CODE_UNLOAD_REQ) {
417 loaded_port[p_hwfn->port_id]--;
418 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n",
421 return ECORE_SUCCESS;
425 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
427 mb_params.param = param;
428 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
429 if (rc != ECORE_SUCCESS)
432 *o_mcp_resp = mb_params.mcp_resp;
433 *o_mcp_param = mb_params.mcp_param;
435 return ECORE_SUCCESS;
438 enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
439 struct ecore_ptt *p_ptt,
444 u32 i_txn_size, u32 *i_buf)
446 struct ecore_mcp_mb_params mb_params;
447 union drv_union_data union_data;
448 enum _ecore_status_t rc;
450 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
452 mb_params.param = param;
453 OSAL_MEMCPY((u32 *)&union_data.raw_data, i_buf, i_txn_size);
454 mb_params.p_data_src = &union_data;
455 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
456 if (rc != ECORE_SUCCESS)
459 *o_mcp_resp = mb_params.mcp_resp;
460 *o_mcp_param = mb_params.mcp_param;
462 return ECORE_SUCCESS;
465 enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
466 struct ecore_ptt *p_ptt,
471 u32 *o_txn_size, u32 *o_buf)
473 struct ecore_mcp_mb_params mb_params;
474 union drv_union_data union_data;
475 enum _ecore_status_t rc;
477 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
479 mb_params.param = param;
480 mb_params.p_data_dst = &union_data;
481 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
482 if (rc != ECORE_SUCCESS)
485 *o_mcp_resp = mb_params.mcp_resp;
486 *o_mcp_param = mb_params.mcp_param;
488 *o_txn_size = *o_mcp_param;
489 OSAL_MEMCPY(o_buf, (u32 *)&union_data.raw_data, *o_txn_size);
491 return ECORE_SUCCESS;
495 static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
498 static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
501 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
502 else if (!loaded_port[p_hwfn->port_id])
503 load_phase = FW_MSG_CODE_DRV_LOAD_PORT;
505 load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
507 /* On CMT, always tell that it's engine */
508 if (p_hwfn->p_dev->num_hwfns > 1)
509 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
511 *p_load_code = load_phase;
513 loaded_port[p_hwfn->port_id]++;
515 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
516 "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n",
517 *p_load_code, loaded, p_hwfn->port_id,
518 loaded_port[p_hwfn->port_id]);
522 enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
523 struct ecore_ptt *p_ptt,
526 struct ecore_dev *p_dev = p_hwfn->p_dev;
527 struct ecore_mcp_mb_params mb_params;
528 union drv_union_data union_data;
529 enum _ecore_status_t rc;
532 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
533 ecore_mcp_mf_workaround(p_hwfn, p_load_code);
534 return ECORE_SUCCESS;
538 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
539 mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
540 mb_params.param = PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
542 OSAL_MEMCPY(&union_data.ver_str, p_dev->ver_str, MCP_DRV_VER_STR_SIZE);
543 mb_params.p_data_src = &union_data;
544 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
546 /* if mcp fails to respond we must abort */
547 if (rc != ECORE_SUCCESS) {
548 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
552 *p_load_code = mb_params.mcp_resp;
554 /* If MFW refused (e.g. other port is in diagnostic mode) we
555 * must abort. This can happen in the following cases:
556 * - Other port is in diagnostic mode
557 * - Previously loaded function on the engine is not compliant with
559 * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION.
562 if (!(*p_load_code) ||
563 ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) ||
564 ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) ||
565 ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) {
566 DP_ERR(p_hwfn, "MCP refused load request, aborting\n");
570 return ECORE_SUCCESS;
573 static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
574 struct ecore_ptt *p_ptt)
576 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
578 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
579 u32 path_addr = SECTION_ADDR(mfw_path_offsize,
580 ECORE_PATH_ID(p_hwfn));
581 u32 disabled_vfs[VF_MAX_STATIC / 32];
584 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
585 "Reading Disabled VF information from [offset %08x],"
587 mfw_path_offsize, path_addr);
589 for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
590 disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,
592 OFFSETOF(struct public_path,
595 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
596 "FLR-ed VFs [%08x,...,%08x] - %08x\n",
597 i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
600 if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs))
601 OSAL_VF_FLR_UPDATE(p_hwfn);
604 enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
605 struct ecore_ptt *p_ptt,
608 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
610 u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);
611 u32 func_addr = SECTION_ADDR(mfw_func_offsize,
613 struct ecore_mcp_mb_params mb_params;
614 union drv_union_data union_data;
615 enum _ecore_status_t rc;
618 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
619 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
620 "Acking VFs [%08x,...,%08x] - %08x\n",
621 i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
623 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
624 mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
625 OSAL_MEMCPY(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8);
626 mb_params.p_data_src = &union_data;
627 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt,
629 if (rc != ECORE_SUCCESS) {
630 DP_NOTICE(p_hwfn, false,
631 "Failed to pass ACK for VF flr to MFW\n");
632 return ECORE_TIMEOUT;
635 /* TMP - clear the ACK bits; should be done by MFW */
636 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
637 ecore_wr(p_hwfn, p_ptt,
639 OFFSETOF(struct public_func, drv_ack_vf_disabled) +
645 static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
646 struct ecore_ptt *p_ptt)
648 u32 transceiver_state;
650 transceiver_state = ecore_rd(p_hwfn, p_ptt,
651 p_hwfn->mcp_info->port_addr +
652 OFFSETOF(struct public_port,
655 DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP),
656 "Received transceiver state update [0x%08x] from mfw"
658 transceiver_state, (u32)(p_hwfn->mcp_info->port_addr +
659 OFFSETOF(struct public_port,
662 transceiver_state = GET_FIELD(transceiver_state, ETH_TRANSCEIVER_STATE);
664 if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
665 DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
667 DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
670 static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
671 struct ecore_ptt *p_ptt,
674 struct ecore_mcp_link_state *p_link;
678 p_link = &p_hwfn->mcp_info->link_output;
679 OSAL_MEMSET(p_link, 0, sizeof(*p_link));
681 status = ecore_rd(p_hwfn, p_ptt,
682 p_hwfn->mcp_info->port_addr +
683 OFFSETOF(struct public_port, link_status));
684 DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP),
685 "Received link update [0x%08x] from mfw"
687 status, (u32)(p_hwfn->mcp_info->port_addr +
688 OFFSETOF(struct public_port,
691 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
692 "Resetting link indications\n");
696 if (p_hwfn->b_drv_link_init)
697 p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
699 p_link->link_up = false;
701 p_link->full_duplex = true;
702 switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
703 case LINK_STATUS_SPEED_AND_DUPLEX_100G:
704 p_link->speed = 100000;
706 case LINK_STATUS_SPEED_AND_DUPLEX_50G:
707 p_link->speed = 50000;
709 case LINK_STATUS_SPEED_AND_DUPLEX_40G:
710 p_link->speed = 40000;
712 case LINK_STATUS_SPEED_AND_DUPLEX_25G:
713 p_link->speed = 25000;
715 case LINK_STATUS_SPEED_AND_DUPLEX_20G:
716 p_link->speed = 20000;
718 case LINK_STATUS_SPEED_AND_DUPLEX_10G:
719 p_link->speed = 10000;
721 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
722 p_link->full_duplex = false;
724 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
725 p_link->speed = 1000;
731 /* We never store total line speed as p_link->speed is
732 * again changes according to bandwidth allocation.
734 if (p_link->link_up && p_link->speed)
735 p_link->line_speed = p_link->speed;
737 p_link->line_speed = 0;
739 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
740 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
742 /* Max bandwidth configuration */
743 __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
746 /* Mintz bandwidth configuration */
747 __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
749 ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev,
750 p_link->min_pf_rate);
752 p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
753 p_link->an_complete = !!(status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
754 p_link->parallel_detection = !!(status &
755 LINK_STATUS_PARALLEL_DETECTION_USED);
756 p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
758 p_link->partner_adv_speed |=
759 (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
760 ECORE_LINK_PARTNER_SPEED_1G_FD : 0;
761 p_link->partner_adv_speed |=
762 (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
763 ECORE_LINK_PARTNER_SPEED_1G_HD : 0;
764 p_link->partner_adv_speed |=
765 (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
766 ECORE_LINK_PARTNER_SPEED_10G : 0;
767 p_link->partner_adv_speed |=
768 (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
769 ECORE_LINK_PARTNER_SPEED_20G : 0;
770 p_link->partner_adv_speed |=
771 (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
772 ECORE_LINK_PARTNER_SPEED_25G : 0;
773 p_link->partner_adv_speed |=
774 (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
775 ECORE_LINK_PARTNER_SPEED_40G : 0;
776 p_link->partner_adv_speed |=
777 (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
778 ECORE_LINK_PARTNER_SPEED_50G : 0;
779 p_link->partner_adv_speed |=
780 (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
781 ECORE_LINK_PARTNER_SPEED_100G : 0;
783 p_link->partner_tx_flow_ctrl_en =
784 !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
785 p_link->partner_rx_flow_ctrl_en =
786 !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
788 switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
789 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
790 p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE;
792 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
793 p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE;
795 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
796 p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE;
799 p_link->partner_adv_pause = 0;
802 p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
804 OSAL_LINK_UPDATE(p_hwfn);
807 enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
808 struct ecore_ptt *p_ptt, bool b_up)
810 struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
811 struct ecore_mcp_mb_params mb_params;
812 union drv_union_data union_data;
813 struct eth_phy_cfg *p_phy_cfg;
814 enum _ecore_status_t rc = ECORE_SUCCESS;
818 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
819 return ECORE_SUCCESS;
822 /* Set the shmem configuration according to params */
823 p_phy_cfg = &union_data.drv_phy_cfg;
824 OSAL_MEMSET(p_phy_cfg, 0, sizeof(*p_phy_cfg));
825 cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
826 if (!params->speed.autoneg)
827 p_phy_cfg->speed = params->speed.forced_speed;
828 p_phy_cfg->pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
829 p_phy_cfg->pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
830 p_phy_cfg->pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
831 p_phy_cfg->adv_speed = params->speed.advertised_speeds;
832 p_phy_cfg->loopback_mode = params->loopback_mode;
833 p_hwfn->b_drv_link_init = b_up;
836 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
837 "Configuring Link: Speed 0x%08x, Pause 0x%08x,"
838 " adv_speed 0x%08x, loopback 0x%08x\n",
839 p_phy_cfg->speed, p_phy_cfg->pause,
840 p_phy_cfg->adv_speed, p_phy_cfg->loopback_mode);
842 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
844 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
846 mb_params.p_data_src = &union_data;
847 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
849 /* if mcp fails to respond we must abort */
850 if (rc != ECORE_SUCCESS) {
851 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
855 /* Reset the link status if needed */
857 ecore_mcp_handle_link_change(p_hwfn, p_ptt, true);
862 u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
863 struct ecore_ptt *p_ptt)
865 u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
867 /* TODO - Add support for VFs */
868 if (IS_VF(p_hwfn->p_dev))
871 path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
873 path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr);
874 path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn));
876 proc_kill_cnt = ecore_rd(p_hwfn, p_ptt,
878 OFFSETOF(struct public_path, process_kill)) &
879 PROCESS_KILL_COUNTER_MASK;
881 return proc_kill_cnt;
884 static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn,
885 struct ecore_ptt *p_ptt)
887 struct ecore_dev *p_dev = p_hwfn->p_dev;
890 /* Prevent possible attentions/interrupts during the recovery handling
891 * and till its load phase, during which they will be re-enabled.
893 ecore_int_igu_disable_int(p_hwfn, p_ptt);
895 DP_NOTICE(p_hwfn, false, "Received a process kill indication\n");
897 /* The following operations should be done once, and thus in CMT mode
898 * are carried out by only the first HW function.
900 if (p_hwfn != ECORE_LEADING_HWFN(p_dev))
903 if (p_dev->recov_in_prog) {
904 DP_NOTICE(p_hwfn, false,
905 "Ignoring the indication since a recovery"
906 " process is already in progress\n");
910 p_dev->recov_in_prog = true;
912 proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt);
913 DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt);
915 OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn);
918 static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
919 struct ecore_ptt *p_ptt,
920 enum MFW_DRV_MSG_TYPE type)
922 enum ecore_mcp_protocol_type stats_type;
923 union ecore_mcp_protocol_stats stats;
924 struct ecore_mcp_mb_params mb_params;
925 union drv_union_data union_data;
929 case MFW_DRV_MSG_GET_LAN_STATS:
930 stats_type = ECORE_MCP_LAN_STATS;
931 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
934 DP_INFO(p_hwfn, "Invalid protocol type %d\n", type);
938 OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats);
940 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
941 mb_params.cmd = DRV_MSG_CODE_GET_STATS;
942 mb_params.param = hsi_param;
943 OSAL_MEMCPY(&union_data, &stats, sizeof(stats));
944 mb_params.p_data_src = &union_data;
945 ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
948 static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
949 struct public_func *p_shmem_info)
951 struct ecore_mcp_function_info *p_info;
953 p_info = &p_hwfn->mcp_info->func_info;
955 /* TODO - bandwidth min/max should have valid values of 1-100,
956 * as well as some indication that the feature is disabled.
957 * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS
958 * limit and correct value to min `1' and max `100' if limit isn't in
961 p_info->bandwidth_min = (p_shmem_info->config &
962 FUNC_MF_CFG_MIN_BW_MASK) >>
963 FUNC_MF_CFG_MIN_BW_SHIFT;
964 if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
966 "bandwidth minimum out of bounds [%02x]. Set to 1\n",
967 p_info->bandwidth_min);
968 p_info->bandwidth_min = 1;
971 p_info->bandwidth_max = (p_shmem_info->config &
972 FUNC_MF_CFG_MAX_BW_MASK) >>
973 FUNC_MF_CFG_MAX_BW_SHIFT;
974 if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
976 "bandwidth maximum out of bounds [%02x]. Set to 100\n",
977 p_info->bandwidth_max);
978 p_info->bandwidth_max = 100;
982 static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
983 struct ecore_ptt *p_ptt,
984 struct public_func *p_data,
987 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
989 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
990 u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
993 OSAL_MEM_ZERO(p_data, sizeof(*p_data));
995 size = OSAL_MIN_T(u32, sizeof(*p_data),
996 SECTION_SIZE(mfw_path_offsize));
997 for (i = 0; i < size / sizeof(u32); i++)
998 ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
999 func_addr + (i << 2));
1005 ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1007 struct ecore_mcp_function_info *p_info;
1008 struct public_func shmem_info;
1009 u32 resp = 0, param = 0;
1011 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1013 ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1015 p_info = &p_hwfn->mcp_info->func_info;
1017 ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min);
1019 ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max);
1021 /* Acknowledge the MFW */
1022 ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1026 static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn,
1027 struct ecore_ptt *p_ptt)
1029 /* A single notification should be sent to upper driver in CMT mode */
1030 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1033 DP_NOTICE(p_hwfn, false,
1034 "Fan failure was detected on the network interface card"
1035 " and it's going to be shut down.\n");
1037 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
1040 static enum _ecore_status_t
1041 ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1042 u32 mdump_cmd, union drv_union_data *p_data_src,
1043 union drv_union_data *p_data_dst, u32 *p_mcp_resp)
1045 struct ecore_mcp_mb_params mb_params;
1046 enum _ecore_status_t rc;
1048 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1049 mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
1050 mb_params.param = mdump_cmd;
1051 mb_params.p_data_src = p_data_src;
1052 mb_params.p_data_dst = p_data_dst;
1053 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1054 if (rc != ECORE_SUCCESS)
1057 *p_mcp_resp = mb_params.mcp_resp;
1058 if (*p_mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
1059 DP_NOTICE(p_hwfn, false,
1060 "MFW claims that the mdump command is illegal [mdump_cmd 0x%x]\n",
1068 static enum _ecore_status_t ecore_mcp_mdump_ack(struct ecore_hwfn *p_hwfn,
1069 struct ecore_ptt *p_ptt)
1073 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_ACK,
1074 OSAL_NULL, OSAL_NULL, &mcp_resp);
1077 enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn,
1078 struct ecore_ptt *p_ptt,
1081 union drv_union_data union_data;
1084 OSAL_MEMCPY(&union_data.raw_data, &epoch, sizeof(epoch));
1086 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_SET_VALUES,
1087 &union_data, OSAL_NULL, &mcp_resp);
1090 enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
1091 struct ecore_ptt *p_ptt)
1095 p_hwfn->p_dev->mdump_en = true;
1097 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_TRIGGER,
1098 OSAL_NULL, OSAL_NULL, &mcp_resp);
1101 static enum _ecore_status_t
1102 ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1103 struct mdump_config_stc *p_mdump_config)
1105 union drv_union_data union_data;
1107 enum _ecore_status_t rc;
1109 rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_GET_CONFIG,
1110 OSAL_NULL, &union_data, &mcp_resp);
1111 if (rc != ECORE_SUCCESS)
1114 /* A zero response implies that the mdump command is not supported */
1116 return ECORE_NOTIMPL;
1118 if (mcp_resp != FW_MSG_CODE_OK) {
1119 DP_NOTICE(p_hwfn, false,
1120 "Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n",
1122 rc = ECORE_UNKNOWN_ERROR;
1125 OSAL_MEMCPY(p_mdump_config, &union_data.mdump_config,
1126 sizeof(*p_mdump_config));
1131 enum _ecore_status_t
1132 ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1133 struct ecore_mdump_info *p_mdump_info)
1135 u32 addr, global_offsize, global_addr;
1136 struct mdump_config_stc mdump_config;
1137 enum _ecore_status_t rc;
1139 OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info));
1141 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1143 global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1144 global_addr = SECTION_ADDR(global_offsize, 0);
1145 p_mdump_info->reason = ecore_rd(p_hwfn, p_ptt,
1147 OFFSETOF(struct public_global,
1150 if (p_mdump_info->reason) {
1151 rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config);
1152 if (rc != ECORE_SUCCESS)
1155 p_mdump_info->version = mdump_config.version;
1156 p_mdump_info->config = mdump_config.config;
1157 p_mdump_info->epoch = mdump_config.epoc;
1158 p_mdump_info->num_of_logs = mdump_config.num_of_logs;
1159 p_mdump_info->valid_logs = mdump_config.valid_logs;
1161 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1162 "MFW mdump info: reason %d, version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n",
1163 p_mdump_info->reason, p_mdump_info->version,
1164 p_mdump_info->config, p_mdump_info->epoch,
1165 p_mdump_info->num_of_logs, p_mdump_info->valid_logs);
1167 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1168 "MFW mdump info: reason %d\n", p_mdump_info->reason);
1171 return ECORE_SUCCESS;
1174 enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
1175 struct ecore_ptt *p_ptt)
1179 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_CLEAR_LOGS,
1180 OSAL_NULL, OSAL_NULL, &mcp_resp);
1183 static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
1184 struct ecore_ptt *p_ptt)
1186 /* In CMT mode - no need for more than a single acknowledgment to the
1187 * MFW, and no more than a single notification to the upper driver.
1189 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1192 DP_NOTICE(p_hwfn, false,
1193 "Received a critical error notification from the MFW!\n");
1195 if (p_hwfn->p_dev->mdump_en) {
1196 DP_NOTICE(p_hwfn, false,
1197 "Not acknowledging the notification to allow the MFW crash dump\n");
1198 p_hwfn->p_dev->mdump_en = false;
1202 ecore_mcp_mdump_ack(p_hwfn, p_ptt);
1203 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
1206 enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
1207 struct ecore_ptt *p_ptt)
1209 struct ecore_mcp_info *info = p_hwfn->mcp_info;
1210 enum _ecore_status_t rc = ECORE_SUCCESS;
1214 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n");
1216 /* Read Messages from MFW */
1217 ecore_mcp_read_mb(p_hwfn, p_ptt);
1219 /* Compare current messages to old ones */
1220 for (i = 0; i < info->mfw_mb_length; i++) {
1221 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
1226 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1227 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
1228 i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
1231 case MFW_DRV_MSG_LINK_CHANGE:
1232 ecore_mcp_handle_link_change(p_hwfn, p_ptt, false);
1234 case MFW_DRV_MSG_VF_DISABLED:
1235 ecore_mcp_handle_vf_flr(p_hwfn, p_ptt);
1237 case MFW_DRV_MSG_LLDP_DATA_UPDATED:
1238 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1239 ECORE_DCBX_REMOTE_LLDP_MIB);
1241 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
1242 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1243 ECORE_DCBX_REMOTE_MIB);
1245 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
1246 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1247 ECORE_DCBX_OPERATIONAL_MIB);
1249 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
1250 ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
1252 case MFW_DRV_MSG_ERROR_RECOVERY:
1253 ecore_mcp_handle_process_kill(p_hwfn, p_ptt);
1255 case MFW_DRV_MSG_GET_LAN_STATS:
1256 case MFW_DRV_MSG_GET_FCOE_STATS:
1257 case MFW_DRV_MSG_GET_ISCSI_STATS:
1258 case MFW_DRV_MSG_GET_RDMA_STATS:
1259 ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
1261 case MFW_DRV_MSG_BW_UPDATE:
1262 ecore_mcp_update_bw(p_hwfn, p_ptt);
1264 case MFW_DRV_MSG_FAILURE_DETECTED:
1265 ecore_mcp_handle_fan_failure(p_hwfn, p_ptt);
1267 case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
1268 ecore_mcp_handle_critical_error(p_hwfn, p_ptt);
1272 DP_NOTICE(p_hwfn, false,
1273 "Unimplemented MFW message %d\n", i);
1278 /* ACK everything */
1279 for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
1280 OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]);
1282 /* MFW expect answer in BE, so we force write in that format */
1283 ecore_wr(p_hwfn, p_ptt,
1284 info->mfw_mb_addr + sizeof(u32) +
1285 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
1286 sizeof(u32) + i * sizeof(u32), val);
1290 DP_NOTICE(p_hwfn, false,
1291 "Received an MFW message indication but no"
1296 /* Copy the new mfw messages into the shadow */
1297 OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
1302 enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
1303 struct ecore_ptt *p_ptt,
1305 u32 *p_running_bundle_id)
1310 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1311 DP_NOTICE(p_hwfn, false, "Emulation - can't get MFW version\n");
1312 return ECORE_SUCCESS;
1316 if (IS_VF(p_hwfn->p_dev)) {
1317 if (p_hwfn->vf_iov_info) {
1318 struct pfvf_acquire_resp_tlv *p_resp;
1320 p_resp = &p_hwfn->vf_iov_info->acquire_resp;
1321 *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
1322 return ECORE_SUCCESS;
1324 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1325 "VF requested MFW version prior to ACQUIRE\n");
1330 global_offsize = ecore_rd(p_hwfn, p_ptt,
1331 SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
1335 ecore_rd(p_hwfn, p_ptt,
1336 SECTION_ADDR(global_offsize,
1337 0) + OFFSETOF(struct public_global, mfw_ver));
1339 if (p_running_bundle_id != OSAL_NULL) {
1340 *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt,
1341 SECTION_ADDR(global_offsize,
1343 OFFSETOF(struct public_global,
1344 running_bundle_id));
1347 return ECORE_SUCCESS;
1350 enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
1353 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[0];
1354 struct ecore_ptt *p_ptt;
1356 /* TODO - Add support for VFs */
1360 if (!ecore_mcp_is_init(p_hwfn)) {
1361 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
1365 *p_media_type = MEDIA_UNSPECIFIED;
1367 p_ptt = ecore_ptt_acquire(p_hwfn);
1371 *p_media_type = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1372 OFFSETOF(struct public_port, media_type));
1374 ecore_ptt_release(p_hwfn, p_ptt);
1376 return ECORE_SUCCESS;
1379 static enum _ecore_status_t
1380 ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn,
1381 struct public_func *p_info,
1382 enum ecore_pci_personality *p_proto)
1384 enum _ecore_status_t rc = ECORE_SUCCESS;
1386 switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
1387 case FUNC_MF_CFG_PROTOCOL_ETHERNET:
1388 *p_proto = ECORE_PCI_ETH;
1397 enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
1398 struct ecore_ptt *p_ptt)
1400 struct ecore_mcp_function_info *info;
1401 struct public_func shmem_info;
1403 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1404 info = &p_hwfn->mcp_info->func_info;
1406 info->pause_on_host = (shmem_info.config &
1407 FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
1409 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, &info->protocol)) {
1410 DP_ERR(p_hwfn, "Unknown personality %08x\n",
1411 (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
1415 ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1417 if (shmem_info.mac_upper || shmem_info.mac_lower) {
1418 info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
1419 info->mac[1] = (u8)(shmem_info.mac_upper);
1420 info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
1421 info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
1422 info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
1423 info->mac[5] = (u8)(shmem_info.mac_lower);
1425 /* TODO - are there protocols for which there's no MAC? */
1426 DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n");
1429 /* TODO - are these calculations true for BE machine? */
1430 info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
1431 (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
1432 info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
1433 (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
1435 info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
1437 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP),
1438 "Read configuration from shmem: pause_on_host %02x"
1439 " protocol %02x BW [%02x - %02x]"
1440 " MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %lx"
1441 " node %lx ovlan %04x\n",
1442 info->pause_on_host, info->protocol,
1443 info->bandwidth_min, info->bandwidth_max,
1444 info->mac[0], info->mac[1], info->mac[2],
1445 info->mac[3], info->mac[4], info->mac[5],
1446 (unsigned long)info->wwn_port,
1447 (unsigned long)info->wwn_node, info->ovlan);
1449 return ECORE_SUCCESS;
1452 struct ecore_mcp_link_params
1453 *ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn)
1455 if (!p_hwfn || !p_hwfn->mcp_info)
1457 return &p_hwfn->mcp_info->link_input;
1460 struct ecore_mcp_link_state
1461 *ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn)
1463 if (!p_hwfn || !p_hwfn->mcp_info)
1467 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
1468 DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n");
1469 p_hwfn->mcp_info->link_output.link_up = true;
1473 return &p_hwfn->mcp_info->link_output;
1476 struct ecore_mcp_link_capabilities
1477 *ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn)
1479 if (!p_hwfn || !p_hwfn->mcp_info)
1481 return &p_hwfn->mcp_info->link_capabilities;
1484 enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
1485 struct ecore_ptt *p_ptt)
1487 u32 resp = 0, param = 0;
1488 enum _ecore_status_t rc;
1490 rc = ecore_mcp_cmd(p_hwfn, p_ptt,
1491 DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, ¶m);
1493 /* Wait for the drain to complete before returning */
1499 const struct ecore_mcp_function_info
1500 *ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn)
1502 if (!p_hwfn || !p_hwfn->mcp_info)
1504 return &p_hwfn->mcp_info->func_info;
1507 enum _ecore_status_t ecore_mcp_nvm_command(struct ecore_hwfn *p_hwfn,
1508 struct ecore_ptt *p_ptt,
1509 struct ecore_mcp_nvm_params *params)
1511 enum _ecore_status_t rc;
1513 switch (params->type) {
1514 case ECORE_MCP_NVM_RD:
1515 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1516 params->nvm_common.offset,
1517 ¶ms->nvm_common.resp,
1518 ¶ms->nvm_common.param,
1519 params->nvm_rd.buf_size,
1520 params->nvm_rd.buf);
1523 rc = ecore_mcp_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1524 params->nvm_common.offset,
1525 ¶ms->nvm_common.resp,
1526 ¶ms->nvm_common.param);
1528 case ECORE_MCP_NVM_WR:
1529 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1530 params->nvm_common.offset,
1531 ¶ms->nvm_common.resp,
1532 ¶ms->nvm_common.param,
1533 params->nvm_wr.buf_size,
1534 params->nvm_wr.buf);
1543 int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
1544 struct ecore_ptt *p_ptt, u32 personalities)
1546 enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT;
1547 struct public_func shmem_info;
1548 int i, count = 0, num_pfs;
1550 num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
1552 for (i = 0; i < num_pfs; i++) {
1553 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1554 MCP_PF_ID_BY_REL(p_hwfn, i));
1555 if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
1558 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info,
1559 &protocol) != ECORE_SUCCESS)
1562 if ((1 << ((u32)protocol)) & personalities)
1569 enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
1570 struct ecore_ptt *p_ptt,
1576 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1577 DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n");
1582 if (IS_VF(p_hwfn->p_dev))
1585 flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
1586 flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
1587 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
1588 flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
1590 *p_flash_size = flash_size;
1592 return ECORE_SUCCESS;
1595 enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
1596 struct ecore_ptt *p_ptt)
1598 struct ecore_dev *p_dev = p_hwfn->p_dev;
1600 if (p_dev->recov_in_prog) {
1601 DP_NOTICE(p_hwfn, false,
1602 "Avoid triggering a recovery since such a process"
1603 " is already in progress\n");
1607 DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n");
1608 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
1610 return ECORE_SUCCESS;
1613 enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
1614 struct ecore_ptt *p_ptt,
1617 u32 resp = 0, param = 0, rc_param = 0;
1618 enum _ecore_status_t rc;
1620 /* Only Leader can configure MSIX, and need to take CMT into account */
1622 if (!IS_LEAD_HWFN(p_hwfn))
1623 return ECORE_SUCCESS;
1624 num *= p_hwfn->p_dev->num_hwfns;
1626 param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
1627 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
1628 param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
1629 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
1631 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
1634 if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
1635 DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n",
1639 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1640 "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
1647 enum _ecore_status_t
1648 ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1649 struct ecore_mcp_drv_version *p_ver)
1651 struct drv_version_stc *p_drv_version;
1652 struct ecore_mcp_mb_params mb_params;
1653 union drv_union_data union_data;
1657 enum _ecore_status_t rc;
1660 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
1661 return ECORE_SUCCESS;
1664 p_drv_version = &union_data.drv_version;
1665 p_drv_version->version = p_ver->version;
1666 num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4;
1667 for (i = 0; i < num_words; i++) {
1668 p_name = &p_ver->name[i * sizeof(u32)];
1669 val = OSAL_CPU_TO_BE32(*(u32 *)p_name);
1670 *(u32 *)&p_drv_version->name[i * sizeof(u32)] = val;
1673 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1674 mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
1675 mb_params.p_data_src = &union_data;
1676 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1677 if (rc != ECORE_SUCCESS)
1678 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1683 enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
1684 struct ecore_ptt *p_ptt)
1686 enum _ecore_status_t rc;
1687 u32 resp = 0, param = 0;
1689 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
1691 if (rc != ECORE_SUCCESS)
1692 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1697 enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
1698 struct ecore_ptt *p_ptt)
1700 u32 value, cpu_mode;
1702 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
1704 value = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
1705 value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
1706 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
1707 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
1709 return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -1 : 0;
1712 enum _ecore_status_t
1713 ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
1714 struct ecore_ptt *p_ptt,
1715 enum ecore_ov_config_method config,
1716 enum ecore_ov_client client)
1718 enum _ecore_status_t rc;
1719 u32 resp = 0, param = 0;
1723 case ECORE_OV_CLIENT_DRV:
1724 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
1726 case ECORE_OV_CLIENT_USER:
1727 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
1730 DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", config);
1734 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
1735 drv_mb_param, &resp, ¶m);
1736 if (rc != ECORE_SUCCESS)
1737 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1742 enum _ecore_status_t
1743 ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
1744 struct ecore_ptt *p_ptt,
1745 enum ecore_ov_driver_state drv_state)
1747 enum _ecore_status_t rc;
1748 u32 resp = 0, param = 0;
1751 switch (drv_state) {
1752 case ECORE_OV_DRIVER_STATE_NOT_LOADED:
1753 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
1755 case ECORE_OV_DRIVER_STATE_DISABLED:
1756 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
1758 case ECORE_OV_DRIVER_STATE_ACTIVE:
1759 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
1762 DP_NOTICE(p_hwfn, true, "Invalid driver state %d\n", drv_state);
1766 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
1767 drv_state, &resp, ¶m);
1768 if (rc != ECORE_SUCCESS)
1769 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1774 enum _ecore_status_t
1775 ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1776 struct ecore_fc_npiv_tbl *p_table)
1781 enum _ecore_status_t
1782 ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn,
1783 struct ecore_ptt *p_ptt, u16 mtu)
1788 enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
1789 struct ecore_ptt *p_ptt,
1790 enum ecore_led_mode mode)
1792 u32 resp = 0, param = 0, drv_mb_param;
1793 enum _ecore_status_t rc;
1796 case ECORE_LED_MODE_ON:
1797 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
1799 case ECORE_LED_MODE_OFF:
1800 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
1802 case ECORE_LED_MODE_RESTORE:
1803 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
1806 DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode);
1810 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
1811 drv_mb_param, &resp, ¶m);
1812 if (rc != ECORE_SUCCESS)
1813 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1818 enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
1819 struct ecore_ptt *p_ptt,
1822 enum _ecore_status_t rc;
1823 u32 resp = 0, param = 0;
1825 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
1826 mask_parities, &resp, ¶m);
1828 if (rc != ECORE_SUCCESS) {
1830 "MCP response failure for mask parities, aborting\n");
1831 } else if (resp != FW_MSG_CODE_OK) {
1833 "MCP did not ack mask parity request. Old MFW?\n");
1840 enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
1843 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1844 u32 bytes_left, offset, bytes_to_copy, buf_size;
1845 struct ecore_mcp_nvm_params params;
1846 struct ecore_ptt *p_ptt;
1847 enum _ecore_status_t rc = ECORE_SUCCESS;
1849 p_ptt = ecore_ptt_acquire(p_hwfn);
1853 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
1856 params.type = ECORE_MCP_NVM_RD;
1857 params.nvm_rd.buf_size = &buf_size;
1858 params.nvm_common.cmd = DRV_MSG_CODE_NVM_READ_NVRAM;
1859 while (bytes_left > 0) {
1860 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
1861 MCP_DRV_NVM_BUF_LEN);
1862 params.nvm_common.offset = (addr + offset) |
1863 (bytes_to_copy << DRV_MB_PARAM_NVM_LEN_SHIFT);
1864 params.nvm_rd.buf = (u32 *)(p_buf + offset);
1865 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
1866 if (rc != ECORE_SUCCESS || (params.nvm_common.resp !=
1867 FW_MSG_CODE_NVM_OK)) {
1868 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
1872 /* This can be a lengthy process, and it's possible scheduler
1873 * isn't preemptible. Sleep a bit to prevent CPU hogging.
1875 if (bytes_left % 0x1000 <
1876 (bytes_left - *params.nvm_rd.buf_size) % 0x1000)
1879 offset += *params.nvm_rd.buf_size;
1880 bytes_left -= *params.nvm_rd.buf_size;
1883 p_dev->mcp_nvm_resp = params.nvm_common.resp;
1884 ecore_ptt_release(p_hwfn, p_ptt);
1889 enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
1890 u32 addr, u8 *p_buf, u32 len)
1892 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1893 struct ecore_mcp_nvm_params params;
1894 struct ecore_ptt *p_ptt;
1895 enum _ecore_status_t rc;
1897 p_ptt = ecore_ptt_acquire(p_hwfn);
1901 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
1902 params.type = ECORE_MCP_NVM_RD;
1903 params.nvm_rd.buf_size = &len;
1904 params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_READ) ?
1905 DRV_MSG_CODE_PHY_CORE_READ : DRV_MSG_CODE_PHY_RAW_READ;
1906 params.nvm_common.offset = addr;
1907 params.nvm_rd.buf = (u32 *)p_buf;
1908 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
1909 if (rc != ECORE_SUCCESS)
1910 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
1912 p_dev->mcp_nvm_resp = params.nvm_common.resp;
1913 ecore_ptt_release(p_hwfn, p_ptt);
1918 enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
1920 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1921 struct ecore_mcp_nvm_params params;
1922 struct ecore_ptt *p_ptt;
1924 p_ptt = ecore_ptt_acquire(p_hwfn);
1928 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
1929 OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp));
1930 ecore_ptt_release(p_hwfn, p_ptt);
1932 return ECORE_SUCCESS;
1935 enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr)
1937 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1938 struct ecore_mcp_nvm_params params;
1939 struct ecore_ptt *p_ptt;
1940 enum _ecore_status_t rc;
1942 p_ptt = ecore_ptt_acquire(p_hwfn);
1945 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
1946 params.type = ECORE_MCP_CMD;
1947 params.nvm_common.cmd = DRV_MSG_CODE_NVM_DEL_FILE;
1948 params.nvm_common.offset = addr;
1949 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
1950 p_dev->mcp_nvm_resp = params.nvm_common.resp;
1951 ecore_ptt_release(p_hwfn, p_ptt);
1956 enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
1959 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1960 struct ecore_mcp_nvm_params params;
1961 struct ecore_ptt *p_ptt;
1962 enum _ecore_status_t rc;
1964 p_ptt = ecore_ptt_acquire(p_hwfn);
1967 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
1968 params.type = ECORE_MCP_CMD;
1969 params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN;
1970 params.nvm_common.offset = addr;
1971 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
1972 p_dev->mcp_nvm_resp = params.nvm_common.resp;
1973 ecore_ptt_release(p_hwfn, p_ptt);
1978 /* rc receives ECORE_INVAL as default parameter because
1979 * it might not enter the while loop if the len is 0
1981 enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
1982 u32 addr, u8 *p_buf, u32 len)
1984 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1985 enum _ecore_status_t rc = ECORE_INVAL;
1986 struct ecore_mcp_nvm_params params;
1987 struct ecore_ptt *p_ptt;
1988 u32 buf_idx, buf_size;
1990 p_ptt = ecore_ptt_acquire(p_hwfn);
1994 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
1995 params.type = ECORE_MCP_NVM_WR;
1996 if (cmd == ECORE_PUT_FILE_DATA)
1997 params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
1999 params.nvm_common.cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
2001 while (buf_idx < len) {
2002 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
2003 MCP_DRV_NVM_BUF_LEN);
2004 params.nvm_common.offset = ((buf_size <<
2005 DRV_MB_PARAM_NVM_LEN_SHIFT)
2007 params.nvm_wr.buf_size = buf_size;
2008 params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
2009 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2010 if (rc != ECORE_SUCCESS ||
2011 ((params.nvm_common.resp != FW_MSG_CODE_NVM_OK) &&
2012 (params.nvm_common.resp !=
2013 FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK)))
2014 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2016 /* This can be a lengthy process, and it's possible scheduler
2017 * isn't preemptible. Sleep a bit to prevent CPU hogging.
2019 if (buf_idx % 0x1000 >
2020 (buf_idx + buf_size) % 0x1000)
2023 buf_idx += buf_size;
2026 p_dev->mcp_nvm_resp = params.nvm_common.resp;
2027 ecore_ptt_release(p_hwfn, p_ptt);
2032 enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
2033 u32 addr, u8 *p_buf, u32 len)
2035 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2036 struct ecore_mcp_nvm_params params;
2037 struct ecore_ptt *p_ptt;
2038 enum _ecore_status_t rc;
2040 p_ptt = ecore_ptt_acquire(p_hwfn);
2044 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2045 params.type = ECORE_MCP_NVM_WR;
2046 params.nvm_wr.buf_size = len;
2047 params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_WRITE) ?
2048 DRV_MSG_CODE_PHY_CORE_WRITE : DRV_MSG_CODE_PHY_RAW_WRITE;
2049 params.nvm_common.offset = addr;
2050 params.nvm_wr.buf = (u32 *)p_buf;
2051 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2052 if (rc != ECORE_SUCCESS)
2053 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2054 p_dev->mcp_nvm_resp = params.nvm_common.resp;
2055 ecore_ptt_release(p_hwfn, p_ptt);
2060 enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
2063 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2064 struct ecore_mcp_nvm_params params;
2065 struct ecore_ptt *p_ptt;
2066 enum _ecore_status_t rc;
2068 p_ptt = ecore_ptt_acquire(p_hwfn);
2072 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2073 params.type = ECORE_MCP_CMD;
2074 params.nvm_common.cmd = DRV_MSG_CODE_SET_SECURE_MODE;
2075 params.nvm_common.offset = addr;
2076 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2077 p_dev->mcp_nvm_resp = params.nvm_common.resp;
2078 ecore_ptt_release(p_hwfn, p_ptt);
2083 enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
2084 struct ecore_ptt *p_ptt,
2085 u32 port, u32 addr, u32 offset,
2088 struct ecore_mcp_nvm_params params;
2089 enum _ecore_status_t rc;
2090 u32 bytes_left, bytes_to_copy, buf_size;
2092 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2093 params.nvm_common.offset =
2094 (port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) |
2095 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT);
2099 params.type = ECORE_MCP_NVM_RD;
2100 params.nvm_rd.buf_size = &buf_size;
2101 params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_READ;
2102 while (bytes_left > 0) {
2103 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
2104 MAX_I2C_TRANSACTION_SIZE);
2105 params.nvm_rd.buf = (u32 *)(p_buf + offset);
2106 params.nvm_common.offset &=
2107 (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
2108 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
2109 params.nvm_common.offset |=
2111 DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT);
2112 params.nvm_common.offset |=
2113 (bytes_to_copy << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT);
2114 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2115 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
2116 FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
2118 } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
2119 FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
2120 return ECORE_UNKNOWN_ERROR;
2122 offset += *params.nvm_rd.buf_size;
2123 bytes_left -= *params.nvm_rd.buf_size;
2126 return ECORE_SUCCESS;
2129 enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
2130 struct ecore_ptt *p_ptt,
2131 u32 port, u32 addr, u32 offset,
2134 struct ecore_mcp_nvm_params params;
2135 enum _ecore_status_t rc;
2136 u32 buf_idx, buf_size;
2138 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2139 params.nvm_common.offset =
2140 (port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) |
2141 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT);
2142 params.type = ECORE_MCP_NVM_WR;
2143 params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_WRITE;
2145 while (buf_idx < len) {
2146 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
2147 MAX_I2C_TRANSACTION_SIZE);
2148 params.nvm_common.offset &=
2149 (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
2150 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
2151 params.nvm_common.offset |=
2152 ((offset + buf_idx) <<
2153 DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT);
2154 params.nvm_common.offset |=
2155 (buf_size << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT);
2156 params.nvm_wr.buf_size = buf_size;
2157 params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
2158 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2159 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
2160 FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
2162 } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
2163 FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
2164 return ECORE_UNKNOWN_ERROR;
2166 buf_idx += buf_size;
2169 return ECORE_SUCCESS;
2172 enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
2173 struct ecore_ptt *p_ptt,
2174 u16 gpio, u32 *gpio_val)
2176 enum _ecore_status_t rc = ECORE_SUCCESS;
2177 u32 drv_mb_param = 0, rsp;
2179 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT);
2181 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ,
2182 drv_mb_param, &rsp, gpio_val);
2184 if (rc != ECORE_SUCCESS)
2187 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
2188 return ECORE_UNKNOWN_ERROR;
2190 return ECORE_SUCCESS;
2193 enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
2194 struct ecore_ptt *p_ptt,
2195 u16 gpio, u16 gpio_val)
2197 enum _ecore_status_t rc = ECORE_SUCCESS;
2198 u32 drv_mb_param = 0, param, rsp;
2200 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT) |
2201 (gpio_val << DRV_MB_PARAM_GPIO_VALUE_SHIFT);
2203 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE,
2204 drv_mb_param, &rsp, ¶m);
2206 if (rc != ECORE_SUCCESS)
2209 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
2210 return ECORE_UNKNOWN_ERROR;
2212 return ECORE_SUCCESS;
2215 enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn,
2216 struct ecore_ptt *p_ptt,
2217 u16 gpio, u32 *gpio_direction,
2220 u32 drv_mb_param = 0, rsp, val = 0;
2221 enum _ecore_status_t rc = ECORE_SUCCESS;
2223 drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT;
2225 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO,
2226 drv_mb_param, &rsp, &val);
2227 if (rc != ECORE_SUCCESS)
2230 *gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >>
2231 DRV_MB_PARAM_GPIO_DIRECTION_SHIFT;
2232 *gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >>
2233 DRV_MB_PARAM_GPIO_CTRL_SHIFT;
2235 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
2236 return ECORE_UNKNOWN_ERROR;
2238 return ECORE_SUCCESS;
2241 enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn,
2242 struct ecore_ptt *p_ptt)
2244 u32 drv_mb_param = 0, rsp, param;
2245 enum _ecore_status_t rc = ECORE_SUCCESS;
2247 drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
2248 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2250 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2251 drv_mb_param, &rsp, ¶m);
2253 if (rc != ECORE_SUCCESS)
2256 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2257 (param != DRV_MB_PARAM_BIST_RC_PASSED))
2258 rc = ECORE_UNKNOWN_ERROR;
2263 enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn,
2264 struct ecore_ptt *p_ptt)
2266 u32 drv_mb_param, rsp, param;
2267 enum _ecore_status_t rc = ECORE_SUCCESS;
2269 drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
2270 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2272 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2273 drv_mb_param, &rsp, ¶m);
2275 if (rc != ECORE_SUCCESS)
2278 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2279 (param != DRV_MB_PARAM_BIST_RC_PASSED))
2280 rc = ECORE_UNKNOWN_ERROR;
2285 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images(
2286 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *num_images)
2288 u32 drv_mb_param = 0, rsp;
2289 enum _ecore_status_t rc = ECORE_SUCCESS;
2291 drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
2292 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2294 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2295 drv_mb_param, &rsp, num_images);
2297 if (rc != ECORE_SUCCESS)
2300 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
2301 rc = ECORE_UNKNOWN_ERROR;
2306 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att(
2307 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2308 struct bist_nvm_image_att *p_image_att, u32 image_index)
2310 struct ecore_mcp_nvm_params params;
2311 enum _ecore_status_t rc;
2314 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2315 params.nvm_common.offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
2316 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2317 params.nvm_common.offset |= (image_index <<
2318 DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT);
2320 params.type = ECORE_MCP_NVM_RD;
2321 params.nvm_rd.buf_size = &buf_size;
2322 params.nvm_common.cmd = DRV_MSG_CODE_BIST_TEST;
2323 params.nvm_rd.buf = (u32 *)p_image_att;
2325 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2326 if (rc != ECORE_SUCCESS)
2329 if (((params.nvm_common.resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2330 (p_image_att->return_code != 1))
2331 rc = ECORE_UNKNOWN_ERROR;
2336 enum _ecore_status_t
2337 ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn,
2338 struct ecore_ptt *p_ptt,
2339 struct ecore_temperature_info *p_temp_info)
2341 struct ecore_temperature_sensor *p_temp_sensor;
2342 struct temperature_status_stc *p_mfw_temp_info;
2343 struct ecore_mcp_mb_params mb_params;
2344 union drv_union_data union_data;
2346 enum _ecore_status_t rc;
2349 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2350 mb_params.cmd = DRV_MSG_CODE_GET_TEMPERATURE;
2351 mb_params.p_data_dst = &union_data;
2352 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2353 if (rc != ECORE_SUCCESS)
2356 p_mfw_temp_info = &union_data.temp_info;
2358 OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS != MAX_NUM_OF_SENSORS);
2359 p_temp_info->num_sensors = OSAL_MIN_T(u32,
2360 p_mfw_temp_info->num_of_sensors,
2361 ECORE_MAX_NUM_OF_SENSORS);
2362 for (i = 0; i < p_temp_info->num_sensors; i++) {
2363 val = p_mfw_temp_info->sensor[i];
2364 p_temp_sensor = &p_temp_info->sensors[i];
2365 p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >>
2366 SENSOR_LOCATION_SHIFT;
2367 p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >>
2368 THRESHOLD_HIGH_SHIFT;
2369 p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >>
2370 CRITICAL_TEMPERATURE_SHIFT;
2371 p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >>
2375 return ECORE_SUCCESS;
2378 enum _ecore_status_t ecore_mcp_get_mba_versions(
2379 struct ecore_hwfn *p_hwfn,
2380 struct ecore_ptt *p_ptt,
2381 struct ecore_mba_vers *p_mba_vers)
2383 struct ecore_mcp_nvm_params params;
2384 enum _ecore_status_t rc;
2387 OSAL_MEM_ZERO(¶ms, sizeof(params));
2388 params.type = ECORE_MCP_NVM_RD;
2389 params.nvm_common.cmd = DRV_MSG_CODE_GET_MBA_VERSION;
2390 params.nvm_common.offset = 0;
2391 params.nvm_rd.buf = &p_mba_vers->mba_vers[0];
2392 params.nvm_rd.buf_size = &buf_size;
2393 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2395 if (rc != ECORE_SUCCESS)
2398 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
2400 rc = ECORE_UNKNOWN_ERROR;
2402 if (buf_size != MCP_DRV_NVM_BUF_LEN)
2403 rc = ECORE_UNKNOWN_ERROR;
2408 enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
2409 struct ecore_ptt *p_ptt,
2414 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MEM_ECC_EVENTS,
2415 0, &rsp, (u32 *)num_events);
2418 #define ECORE_RESC_ALLOC_VERSION_MAJOR 1
2419 #define ECORE_RESC_ALLOC_VERSION_MINOR 0
2420 #define ECORE_RESC_ALLOC_VERSION \
2421 ((ECORE_RESC_ALLOC_VERSION_MAJOR << \
2422 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
2423 (ECORE_RESC_ALLOC_VERSION_MINOR << \
2424 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
2426 enum _ecore_status_t ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn,
2427 struct ecore_ptt *p_ptt,
2428 struct resource_info *p_resc_info,
2429 u32 *p_mcp_resp, u32 *p_mcp_param)
2431 struct ecore_mcp_mb_params mb_params;
2432 union drv_union_data *p_union_data;
2433 enum _ecore_status_t rc;
2435 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2436 mb_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
2437 mb_params.param = ECORE_RESC_ALLOC_VERSION;
2438 p_union_data = (union drv_union_data *)p_resc_info;
2439 mb_params.p_data_src = p_union_data;
2440 mb_params.p_data_dst = p_union_data;
2441 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2442 if (rc != ECORE_SUCCESS)
2445 *p_mcp_resp = mb_params.mcp_resp;
2446 *p_mcp_param = mb_params.mcp_param;
2448 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2449 "MFW resource_info: version 0x%x, res_id 0x%x, size 0x%x,"
2450 " offset 0x%x, vf_size 0x%x, vf_offset 0x%x, flags 0x%x\n",
2451 *p_mcp_param, p_resc_info->res_id, p_resc_info->size,
2452 p_resc_info->offset, p_resc_info->vf_size,
2453 p_resc_info->vf_offset, p_resc_info->flags);
2455 return ECORE_SUCCESS;
2458 enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn,
2459 struct ecore_ptt *p_ptt)
2461 u32 mcp_resp, mcp_param;
2463 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
2464 &mcp_resp, &mcp_param);