2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
11 #include "ecore_status.h"
12 #include "ecore_mcp.h"
13 #include "mcp_public.h"
16 #include "ecore_init_fw_funcs.h"
17 #include "ecore_sriov.h"
19 #include "ecore_iov_api.h"
20 #include "ecore_gtt_reg_addr.h"
21 #include "ecore_iro.h"
22 #include "ecore_dcbx.h"
24 #define CHIP_MCP_RESP_ITER_US 10
25 #define EMUL_MCP_RESP_ITER_US (1000 * 1000)
27 #define ECORE_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
28 #define ECORE_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
30 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
31 ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
34 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
35 ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
37 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
38 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
39 OFFSETOF(struct public_drv_mb, _field), _val)
41 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
42 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
43 OFFSETOF(struct public_drv_mb, _field))
45 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
46 DRV_ID_PDA_COMP_VER_SHIFT)
48 #define MCP_BYTES_PER_MBIT_SHIFT 17
52 static int loaded_port[MAX_NUM_PORTS] = { 0 };
55 bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn)
57 if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
62 void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
64 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
66 u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr);
68 p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
70 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
71 "port_addr = 0x%x, port_id 0x%02x\n",
72 p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
75 void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
77 u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
82 if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev))
86 if (!p_hwfn->mcp_info->public_base)
89 for (i = 0; i < length; i++) {
90 tmp = ecore_rd(p_hwfn, p_ptt,
91 p_hwfn->mcp_info->mfw_mb_addr +
92 (i << 2) + sizeof(u32));
94 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
95 OSAL_BE32_TO_CPU(tmp);
99 enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
101 if (p_hwfn->mcp_info) {
102 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
103 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
104 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->lock);
106 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
108 return ECORE_SUCCESS;
111 static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
112 struct ecore_ptt *p_ptt)
114 struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
115 u32 drv_mb_offsize, mfw_mb_offsize;
116 u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
119 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
120 DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n");
121 p_info->public_base = 0;
126 p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
127 if (!p_info->public_base)
130 p_info->public_base |= GRCBASE_MCP;
132 /* Calculate the driver and MFW mailbox address */
133 drv_mb_offsize = ecore_rd(p_hwfn, p_ptt,
134 SECTION_OFFSIZE_ADDR(p_info->public_base,
136 p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
137 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
138 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x"
139 " mcp_pf_id = 0x%x\n",
140 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
142 /* Set the MFW MB address */
143 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
144 SECTION_OFFSIZE_ADDR(p_info->public_base,
146 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
147 p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
148 p_info->mfw_mb_addr);
150 /* Get the current driver mailbox sequence before sending
153 p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
154 DRV_MSG_SEQ_NUMBER_MASK;
156 /* Get current FW pulse sequence */
157 p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
160 p_info->mcp_hist = (u16)ecore_rd(p_hwfn, p_ptt,
161 MISCS_REG_GENERIC_POR_0);
163 return ECORE_SUCCESS;
166 enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
167 struct ecore_ptt *p_ptt)
169 struct ecore_mcp_info *p_info;
172 /* Allocate mcp_info structure */
173 p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
174 sizeof(*p_hwfn->mcp_info));
175 if (!p_hwfn->mcp_info)
177 p_info = p_hwfn->mcp_info;
179 if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
180 DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
181 /* Do not free mcp_info here, since public_base indicate that
182 * the MCP is not initialized
184 return ECORE_SUCCESS;
187 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
188 p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
189 p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
190 if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
193 /* Initialize the MFW spinlock */
194 OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->lock);
195 OSAL_SPIN_LOCK_INIT(&p_info->lock);
197 return ECORE_SUCCESS;
200 DP_NOTICE(p_hwfn, true, "Failed to allocate mcp memory\n");
201 ecore_mcp_free(p_hwfn);
205 /* Locks the MFW mailbox of a PF to ensure a single access.
206 * The lock is achieved in most cases by holding a spinlock, causing other
207 * threads to wait till a previous access is done.
208 * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single
209 * access is achieved by setting a blocking flag, which will fail other
210 * competing contexts to send their mailboxes.
212 static enum _ecore_status_t ecore_mcp_mb_lock(struct ecore_hwfn *p_hwfn,
215 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
217 /* The spinlock shouldn't be acquired when the mailbox command is
218 * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel
219 * pending [UN]LOAD_REQ command of another PF together with a spinlock
220 * (i.e. interrupts are disabled) - can lead to a deadlock.
221 * It is assumed that for a single PF, no other mailbox commands can be
222 * sent from another context while sending LOAD_REQ, and that any
223 * parallel commands to UNLOAD_REQ can be cancelled.
225 if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE)
226 p_hwfn->mcp_info->block_mb_sending = false;
228 if (p_hwfn->mcp_info->block_mb_sending) {
229 DP_NOTICE(p_hwfn, false,
230 "Trying to send a MFW mailbox command [0x%x]"
231 " in parallel to [UN]LOAD_REQ. Aborting.\n",
233 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
237 if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) {
238 p_hwfn->mcp_info->block_mb_sending = true;
239 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
242 return ECORE_SUCCESS;
245 static void ecore_mcp_mb_unlock(struct ecore_hwfn *p_hwfn, u32 cmd)
247 if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
248 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
251 enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
252 struct ecore_ptt *p_ptt)
254 u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
255 u32 delay = CHIP_MCP_RESP_ITER_US;
256 u32 org_mcp_reset_seq, cnt = 0;
257 enum _ecore_status_t rc = ECORE_SUCCESS;
260 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
261 delay = EMUL_MCP_RESP_ITER_US;
264 /* Ensure that only a single thread is accessing the mailbox at a
267 rc = ecore_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
268 if (rc != ECORE_SUCCESS)
271 /* Set drv command along with the updated sequence */
272 org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
273 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
276 /* Wait for MFW response */
278 /* Give the FW up to 500 second (50*1000*10usec) */
279 } while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt,
280 MISCS_REG_GENERIC_POR_0)) &&
281 (cnt++ < ECORE_MCP_RESET_RETRIES));
283 if (org_mcp_reset_seq !=
284 ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
285 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
286 "MCP was reset after %d usec\n", cnt * delay);
288 DP_ERR(p_hwfn, "Failed to reset MCP\n");
292 ecore_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
297 static enum _ecore_status_t ecore_do_mcp_cmd(struct ecore_hwfn *p_hwfn,
298 struct ecore_ptt *p_ptt,
303 u32 delay = CHIP_MCP_RESP_ITER_US;
304 u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
305 u32 seq, cnt = 1, actual_mb_seq;
306 enum _ecore_status_t rc = ECORE_SUCCESS;
309 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
310 delay = EMUL_MCP_RESP_ITER_US;
311 /* There is a built-in delay of 100usec in each MFW response read */
312 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
316 /* Get actual driver mailbox sequence */
317 actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
318 DRV_MSG_SEQ_NUMBER_MASK;
320 /* Use MCP history register to check if MCP reset occurred between
323 if (p_hwfn->mcp_info->mcp_hist !=
324 ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
325 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Rereading MCP offsets\n");
326 ecore_load_mcp_offsets(p_hwfn, p_ptt);
327 ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
329 seq = ++p_hwfn->mcp_info->drv_mb_seq;
332 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
334 /* Set drv command along with the updated sequence */
335 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
338 /* Wait for MFW response */
340 *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
342 /* Give the FW up to 5 second (500*10ms) */
343 } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
344 (cnt++ < max_retries));
346 /* Is this a reply to our command? */
347 if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
348 *o_mcp_resp &= FW_MSG_CODE_MASK;
349 /* Get the MCP param */
350 *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
353 DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
357 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
362 static enum _ecore_status_t
363 ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
364 struct ecore_ptt *p_ptt,
365 struct ecore_mcp_mb_params *p_mb_params)
368 enum _ecore_status_t rc;
370 /* MCP not initialized */
371 if (!ecore_mcp_is_init(p_hwfn)) {
372 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
376 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
377 OFFSETOF(struct public_drv_mb, union_data);
379 /* Ensure that only a single thread is accessing the mailbox at a
382 rc = ecore_mcp_mb_lock(p_hwfn, p_mb_params->cmd);
383 if (rc != ECORE_SUCCESS)
386 if (p_mb_params->p_data_src != OSAL_NULL)
387 ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr,
388 p_mb_params->p_data_src,
389 sizeof(*p_mb_params->p_data_src));
391 rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
392 p_mb_params->param, &p_mb_params->mcp_resp,
393 &p_mb_params->mcp_param);
395 if (p_mb_params->p_data_dst != OSAL_NULL)
396 ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
398 sizeof(*p_mb_params->p_data_dst));
400 ecore_mcp_mb_unlock(p_hwfn, p_mb_params->cmd);
405 enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
406 struct ecore_ptt *p_ptt, u32 cmd, u32 param,
407 u32 *o_mcp_resp, u32 *o_mcp_param)
409 struct ecore_mcp_mb_params mb_params;
410 enum _ecore_status_t rc;
413 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
414 if (cmd == DRV_MSG_CODE_UNLOAD_REQ) {
416 loaded_port[p_hwfn->port_id]--;
417 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n",
420 return ECORE_SUCCESS;
424 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
426 mb_params.param = param;
427 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
428 if (rc != ECORE_SUCCESS)
431 *o_mcp_resp = mb_params.mcp_resp;
432 *o_mcp_param = mb_params.mcp_param;
434 return ECORE_SUCCESS;
437 enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
438 struct ecore_ptt *p_ptt,
443 u32 i_txn_size, u32 *i_buf)
445 struct ecore_mcp_mb_params mb_params;
446 union drv_union_data union_data;
447 enum _ecore_status_t rc;
449 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
451 mb_params.param = param;
452 OSAL_MEMCPY((u32 *)&union_data.raw_data, i_buf, i_txn_size);
453 mb_params.p_data_src = &union_data;
454 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
455 if (rc != ECORE_SUCCESS)
458 *o_mcp_resp = mb_params.mcp_resp;
459 *o_mcp_param = mb_params.mcp_param;
461 return ECORE_SUCCESS;
464 enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
465 struct ecore_ptt *p_ptt,
470 u32 *o_txn_size, u32 *o_buf)
472 struct ecore_mcp_mb_params mb_params;
473 union drv_union_data union_data;
474 enum _ecore_status_t rc;
476 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
478 mb_params.param = param;
479 mb_params.p_data_dst = &union_data;
480 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
481 if (rc != ECORE_SUCCESS)
484 *o_mcp_resp = mb_params.mcp_resp;
485 *o_mcp_param = mb_params.mcp_param;
487 *o_txn_size = *o_mcp_param;
488 OSAL_MEMCPY(o_buf, (u32 *)&union_data.raw_data, *o_txn_size);
490 return ECORE_SUCCESS;
494 static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
497 static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
500 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
501 else if (!loaded_port[p_hwfn->port_id])
502 load_phase = FW_MSG_CODE_DRV_LOAD_PORT;
504 load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
506 /* On CMT, always tell that it's engine */
507 if (p_hwfn->p_dev->num_hwfns > 1)
508 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
510 *p_load_code = load_phase;
512 loaded_port[p_hwfn->port_id]++;
514 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
515 "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n",
516 *p_load_code, loaded, p_hwfn->port_id,
517 loaded_port[p_hwfn->port_id]);
521 enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
522 struct ecore_ptt *p_ptt,
525 struct ecore_dev *p_dev = p_hwfn->p_dev;
526 struct ecore_mcp_mb_params mb_params;
527 union drv_union_data union_data;
528 enum _ecore_status_t rc;
531 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
532 ecore_mcp_mf_workaround(p_hwfn, p_load_code);
533 return ECORE_SUCCESS;
537 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
538 mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
539 mb_params.param = PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
541 OSAL_MEMCPY(&union_data.ver_str, p_dev->ver_str, MCP_DRV_VER_STR_SIZE);
542 mb_params.p_data_src = &union_data;
543 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
545 /* if mcp fails to respond we must abort */
546 if (rc != ECORE_SUCCESS) {
547 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
551 *p_load_code = mb_params.mcp_resp;
553 /* If MFW refused (e.g. other port is in diagnostic mode) we
554 * must abort. This can happen in the following cases:
555 * - Other port is in diagnostic mode
556 * - Previously loaded function on the engine is not compliant with
558 * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION.
561 if (!(*p_load_code) ||
562 ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) ||
563 ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) ||
564 ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) {
565 DP_ERR(p_hwfn, "MCP refused load request, aborting\n");
569 return ECORE_SUCCESS;
572 static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
573 struct ecore_ptt *p_ptt)
575 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
577 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
578 u32 path_addr = SECTION_ADDR(mfw_path_offsize,
579 ECORE_PATH_ID(p_hwfn));
580 u32 disabled_vfs[VF_MAX_STATIC / 32];
583 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
584 "Reading Disabled VF information from [offset %08x],"
586 mfw_path_offsize, path_addr);
588 for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
589 disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,
591 OFFSETOF(struct public_path,
594 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
595 "FLR-ed VFs [%08x,...,%08x] - %08x\n",
596 i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
599 if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs))
600 OSAL_VF_FLR_UPDATE(p_hwfn);
603 enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
604 struct ecore_ptt *p_ptt,
607 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
609 u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);
610 u32 func_addr = SECTION_ADDR(mfw_func_offsize,
612 struct ecore_mcp_mb_params mb_params;
613 union drv_union_data union_data;
614 enum _ecore_status_t rc;
617 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
618 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
619 "Acking VFs [%08x,...,%08x] - %08x\n",
620 i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
622 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
623 mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
624 OSAL_MEMCPY(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8);
625 mb_params.p_data_src = &union_data;
626 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt,
628 if (rc != ECORE_SUCCESS) {
629 DP_NOTICE(p_hwfn, false,
630 "Failed to pass ACK for VF flr to MFW\n");
631 return ECORE_TIMEOUT;
634 /* TMP - clear the ACK bits; should be done by MFW */
635 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
636 ecore_wr(p_hwfn, p_ptt,
638 OFFSETOF(struct public_func, drv_ack_vf_disabled) +
644 static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
645 struct ecore_ptt *p_ptt)
647 u32 transceiver_state;
649 transceiver_state = ecore_rd(p_hwfn, p_ptt,
650 p_hwfn->mcp_info->port_addr +
651 OFFSETOF(struct public_port,
654 DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP),
655 "Received transceiver state update [0x%08x] from mfw"
657 transceiver_state, (u32)(p_hwfn->mcp_info->port_addr +
658 OFFSETOF(struct public_port,
661 transceiver_state = GET_FIELD(transceiver_state, ETH_TRANSCEIVER_STATE);
663 if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
664 DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
666 DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
669 static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
670 struct ecore_ptt *p_ptt,
673 struct ecore_mcp_link_state *p_link;
677 p_link = &p_hwfn->mcp_info->link_output;
678 OSAL_MEMSET(p_link, 0, sizeof(*p_link));
680 status = ecore_rd(p_hwfn, p_ptt,
681 p_hwfn->mcp_info->port_addr +
682 OFFSETOF(struct public_port, link_status));
683 DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP),
684 "Received link update [0x%08x] from mfw"
686 status, (u32)(p_hwfn->mcp_info->port_addr +
687 OFFSETOF(struct public_port,
690 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
691 "Resetting link indications\n");
695 if (p_hwfn->b_drv_link_init)
696 p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
698 p_link->link_up = false;
700 p_link->full_duplex = true;
701 switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
702 case LINK_STATUS_SPEED_AND_DUPLEX_100G:
703 p_link->speed = 100000;
705 case LINK_STATUS_SPEED_AND_DUPLEX_50G:
706 p_link->speed = 50000;
708 case LINK_STATUS_SPEED_AND_DUPLEX_40G:
709 p_link->speed = 40000;
711 case LINK_STATUS_SPEED_AND_DUPLEX_25G:
712 p_link->speed = 25000;
714 case LINK_STATUS_SPEED_AND_DUPLEX_20G:
715 p_link->speed = 20000;
717 case LINK_STATUS_SPEED_AND_DUPLEX_10G:
718 p_link->speed = 10000;
720 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
721 p_link->full_duplex = false;
723 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
724 p_link->speed = 1000;
730 /* We never store total line speed as p_link->speed is
731 * again changes according to bandwidth allocation.
733 if (p_link->link_up && p_link->speed)
734 p_link->line_speed = p_link->speed;
736 p_link->line_speed = 0;
738 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
739 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
741 /* Max bandwidth configuration */
742 __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
745 /* Mintz bandwidth configuration */
746 __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
748 ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev,
749 p_link->min_pf_rate);
751 p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
752 p_link->an_complete = !!(status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
753 p_link->parallel_detection = !!(status &
754 LINK_STATUS_PARALLEL_DETECTION_USED);
755 p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
757 p_link->partner_adv_speed |=
758 (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
759 ECORE_LINK_PARTNER_SPEED_1G_FD : 0;
760 p_link->partner_adv_speed |=
761 (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
762 ECORE_LINK_PARTNER_SPEED_1G_HD : 0;
763 p_link->partner_adv_speed |=
764 (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
765 ECORE_LINK_PARTNER_SPEED_10G : 0;
766 p_link->partner_adv_speed |=
767 (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
768 ECORE_LINK_PARTNER_SPEED_20G : 0;
769 p_link->partner_adv_speed |=
770 (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
771 ECORE_LINK_PARTNER_SPEED_25G : 0;
772 p_link->partner_adv_speed |=
773 (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
774 ECORE_LINK_PARTNER_SPEED_40G : 0;
775 p_link->partner_adv_speed |=
776 (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
777 ECORE_LINK_PARTNER_SPEED_50G : 0;
778 p_link->partner_adv_speed |=
779 (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
780 ECORE_LINK_PARTNER_SPEED_100G : 0;
782 p_link->partner_tx_flow_ctrl_en =
783 !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
784 p_link->partner_rx_flow_ctrl_en =
785 !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
787 switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
788 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
789 p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE;
791 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
792 p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE;
794 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
795 p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE;
798 p_link->partner_adv_pause = 0;
801 p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
803 OSAL_LINK_UPDATE(p_hwfn);
806 enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
807 struct ecore_ptt *p_ptt, bool b_up)
809 struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
810 struct ecore_mcp_mb_params mb_params;
811 union drv_union_data union_data;
812 struct eth_phy_cfg *p_phy_cfg;
813 enum _ecore_status_t rc = ECORE_SUCCESS;
817 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
818 return ECORE_SUCCESS;
821 /* Set the shmem configuration according to params */
822 p_phy_cfg = &union_data.drv_phy_cfg;
823 OSAL_MEMSET(p_phy_cfg, 0, sizeof(*p_phy_cfg));
824 cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
825 if (!params->speed.autoneg)
826 p_phy_cfg->speed = params->speed.forced_speed;
827 p_phy_cfg->pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
828 p_phy_cfg->pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
829 p_phy_cfg->pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
830 p_phy_cfg->adv_speed = params->speed.advertised_speeds;
831 p_phy_cfg->loopback_mode = params->loopback_mode;
832 p_hwfn->b_drv_link_init = b_up;
835 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
836 "Configuring Link: Speed 0x%08x, Pause 0x%08x,"
837 " adv_speed 0x%08x, loopback 0x%08x\n",
838 p_phy_cfg->speed, p_phy_cfg->pause,
839 p_phy_cfg->adv_speed, p_phy_cfg->loopback_mode);
841 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
843 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
845 mb_params.p_data_src = &union_data;
846 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
848 /* if mcp fails to respond we must abort */
849 if (rc != ECORE_SUCCESS) {
850 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
854 /* Reset the link status if needed */
856 ecore_mcp_handle_link_change(p_hwfn, p_ptt, true);
861 u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
862 struct ecore_ptt *p_ptt)
864 u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
866 /* TODO - Add support for VFs */
867 if (IS_VF(p_hwfn->p_dev))
870 path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
872 path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr);
873 path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn));
875 proc_kill_cnt = ecore_rd(p_hwfn, p_ptt,
877 OFFSETOF(struct public_path, process_kill)) &
878 PROCESS_KILL_COUNTER_MASK;
880 return proc_kill_cnt;
883 static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn,
884 struct ecore_ptt *p_ptt)
886 struct ecore_dev *p_dev = p_hwfn->p_dev;
889 /* Prevent possible attentions/interrupts during the recovery handling
890 * and till its load phase, during which they will be re-enabled.
892 ecore_int_igu_disable_int(p_hwfn, p_ptt);
894 DP_NOTICE(p_hwfn, false, "Received a process kill indication\n");
896 /* The following operations should be done once, and thus in CMT mode
897 * are carried out by only the first HW function.
899 if (p_hwfn != ECORE_LEADING_HWFN(p_dev))
902 if (p_dev->recov_in_prog) {
903 DP_NOTICE(p_hwfn, false,
904 "Ignoring the indication since a recovery"
905 " process is already in progress\n");
909 p_dev->recov_in_prog = true;
911 proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt);
912 DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt);
914 OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn);
917 static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
918 struct ecore_ptt *p_ptt,
919 enum MFW_DRV_MSG_TYPE type)
921 enum ecore_mcp_protocol_type stats_type;
922 union ecore_mcp_protocol_stats stats;
923 struct ecore_mcp_mb_params mb_params;
924 union drv_union_data union_data;
928 case MFW_DRV_MSG_GET_LAN_STATS:
929 stats_type = ECORE_MCP_LAN_STATS;
930 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
933 DP_INFO(p_hwfn, "Invalid protocol type %d\n", type);
937 OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats);
939 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
940 mb_params.cmd = DRV_MSG_CODE_GET_STATS;
941 mb_params.param = hsi_param;
942 OSAL_MEMCPY(&union_data, &stats, sizeof(stats));
943 mb_params.p_data_src = &union_data;
944 ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
947 static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
948 struct public_func *p_shmem_info)
950 struct ecore_mcp_function_info *p_info;
952 p_info = &p_hwfn->mcp_info->func_info;
954 /* TODO - bandwidth min/max should have valid values of 1-100,
955 * as well as some indication that the feature is disabled.
956 * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS
957 * limit and correct value to min `1' and max `100' if limit isn't in
960 p_info->bandwidth_min = (p_shmem_info->config &
961 FUNC_MF_CFG_MIN_BW_MASK) >>
962 FUNC_MF_CFG_MIN_BW_SHIFT;
963 if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
965 "bandwidth minimum out of bounds [%02x]. Set to 1\n",
966 p_info->bandwidth_min);
967 p_info->bandwidth_min = 1;
970 p_info->bandwidth_max = (p_shmem_info->config &
971 FUNC_MF_CFG_MAX_BW_MASK) >>
972 FUNC_MF_CFG_MAX_BW_SHIFT;
973 if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
975 "bandwidth maximum out of bounds [%02x]. Set to 100\n",
976 p_info->bandwidth_max);
977 p_info->bandwidth_max = 100;
981 static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
982 struct ecore_ptt *p_ptt,
983 struct public_func *p_data,
986 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
988 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
989 u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
992 OSAL_MEM_ZERO(p_data, sizeof(*p_data));
994 size = OSAL_MIN_T(u32, sizeof(*p_data),
995 SECTION_SIZE(mfw_path_offsize));
996 for (i = 0; i < size / sizeof(u32); i++)
997 ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
998 func_addr + (i << 2));
1004 ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1006 struct ecore_mcp_function_info *p_info;
1007 struct public_func shmem_info;
1008 u32 resp = 0, param = 0;
1010 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1012 ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1014 p_info = &p_hwfn->mcp_info->func_info;
1016 ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min);
1018 ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max);
1020 /* Acknowledge the MFW */
1021 ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1025 static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn,
1026 struct ecore_ptt *p_ptt)
1028 /* A single notification should be sent to upper driver in CMT mode */
1029 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1032 DP_NOTICE(p_hwfn, false,
1033 "Fan failure was detected on the network interface card"
1034 " and it's going to be shut down.\n");
1036 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
1039 static enum _ecore_status_t
1040 ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1041 u32 mdump_cmd, union drv_union_data *p_data_src,
1042 union drv_union_data *p_data_dst, u32 *p_mcp_resp)
1044 struct ecore_mcp_mb_params mb_params;
1045 enum _ecore_status_t rc;
1047 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1048 mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
1049 mb_params.param = mdump_cmd;
1050 mb_params.p_data_src = p_data_src;
1051 mb_params.p_data_dst = p_data_dst;
1052 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1053 if (rc != ECORE_SUCCESS)
1056 *p_mcp_resp = mb_params.mcp_resp;
1057 if (*p_mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
1058 DP_NOTICE(p_hwfn, false,
1059 "MFW claims that the mdump command is illegal [mdump_cmd 0x%x]\n",
1067 static enum _ecore_status_t ecore_mcp_mdump_ack(struct ecore_hwfn *p_hwfn,
1068 struct ecore_ptt *p_ptt)
1072 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_ACK,
1073 OSAL_NULL, OSAL_NULL, &mcp_resp);
1076 enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn,
1077 struct ecore_ptt *p_ptt,
1080 union drv_union_data union_data;
1083 OSAL_MEMCPY(&union_data.raw_data, &epoch, sizeof(epoch));
1085 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_SET_VALUES,
1086 &union_data, OSAL_NULL, &mcp_resp);
1089 enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
1090 struct ecore_ptt *p_ptt)
1094 p_hwfn->p_dev->mdump_en = true;
1096 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_TRIGGER,
1097 OSAL_NULL, OSAL_NULL, &mcp_resp);
1100 static enum _ecore_status_t
1101 ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1102 struct mdump_config_stc *p_mdump_config)
1104 union drv_union_data union_data;
1106 enum _ecore_status_t rc;
1108 rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_GET_CONFIG,
1109 OSAL_NULL, &union_data, &mcp_resp);
1110 if (rc != ECORE_SUCCESS)
1113 /* A zero response implies that the mdump command is not supported */
1115 return ECORE_NOTIMPL;
1117 if (mcp_resp != FW_MSG_CODE_OK) {
1118 DP_NOTICE(p_hwfn, false,
1119 "Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n",
1121 rc = ECORE_UNKNOWN_ERROR;
1124 OSAL_MEMCPY(p_mdump_config, &union_data.mdump_config,
1125 sizeof(*p_mdump_config));
1130 enum _ecore_status_t
1131 ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1132 struct ecore_mdump_info *p_mdump_info)
1134 u32 addr, global_offsize, global_addr;
1135 struct mdump_config_stc mdump_config;
1136 enum _ecore_status_t rc;
1138 OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info));
1140 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1142 global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1143 global_addr = SECTION_ADDR(global_offsize, 0);
1144 p_mdump_info->reason = ecore_rd(p_hwfn, p_ptt,
1146 OFFSETOF(struct public_global,
1149 if (p_mdump_info->reason) {
1150 rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config);
1151 if (rc != ECORE_SUCCESS)
1154 p_mdump_info->version = mdump_config.version;
1155 p_mdump_info->config = mdump_config.config;
1156 p_mdump_info->epoch = mdump_config.epoc;
1157 p_mdump_info->num_of_logs = mdump_config.num_of_logs;
1158 p_mdump_info->valid_logs = mdump_config.valid_logs;
1160 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1161 "MFW mdump info: reason %d, version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n",
1162 p_mdump_info->reason, p_mdump_info->version,
1163 p_mdump_info->config, p_mdump_info->epoch,
1164 p_mdump_info->num_of_logs, p_mdump_info->valid_logs);
1166 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1167 "MFW mdump info: reason %d\n", p_mdump_info->reason);
1170 return ECORE_SUCCESS;
1173 enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
1174 struct ecore_ptt *p_ptt)
1178 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_CLEAR_LOGS,
1179 OSAL_NULL, OSAL_NULL, &mcp_resp);
1182 static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
1183 struct ecore_ptt *p_ptt)
1185 /* In CMT mode - no need for more than a single acknowledgment to the
1186 * MFW, and no more than a single notification to the upper driver.
1188 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1191 DP_NOTICE(p_hwfn, false,
1192 "Received a critical error notification from the MFW!\n");
1194 if (p_hwfn->p_dev->mdump_en) {
1195 DP_NOTICE(p_hwfn, false,
1196 "Not acknowledging the notification to allow the MFW crash dump\n");
1197 p_hwfn->p_dev->mdump_en = false;
1201 ecore_mcp_mdump_ack(p_hwfn, p_ptt);
1202 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
1205 enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
1206 struct ecore_ptt *p_ptt)
1208 struct ecore_mcp_info *info = p_hwfn->mcp_info;
1209 enum _ecore_status_t rc = ECORE_SUCCESS;
1213 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n");
1215 /* Read Messages from MFW */
1216 ecore_mcp_read_mb(p_hwfn, p_ptt);
1218 /* Compare current messages to old ones */
1219 for (i = 0; i < info->mfw_mb_length; i++) {
1220 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
1225 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1226 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
1227 i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
1230 case MFW_DRV_MSG_LINK_CHANGE:
1231 ecore_mcp_handle_link_change(p_hwfn, p_ptt, false);
1233 case MFW_DRV_MSG_VF_DISABLED:
1234 ecore_mcp_handle_vf_flr(p_hwfn, p_ptt);
1236 case MFW_DRV_MSG_LLDP_DATA_UPDATED:
1237 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1238 ECORE_DCBX_REMOTE_LLDP_MIB);
1240 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
1241 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1242 ECORE_DCBX_REMOTE_MIB);
1244 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
1245 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1246 ECORE_DCBX_OPERATIONAL_MIB);
1248 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
1249 ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
1251 case MFW_DRV_MSG_ERROR_RECOVERY:
1252 ecore_mcp_handle_process_kill(p_hwfn, p_ptt);
1254 case MFW_DRV_MSG_GET_LAN_STATS:
1255 case MFW_DRV_MSG_GET_FCOE_STATS:
1256 case MFW_DRV_MSG_GET_ISCSI_STATS:
1257 case MFW_DRV_MSG_GET_RDMA_STATS:
1258 ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
1260 case MFW_DRV_MSG_BW_UPDATE:
1261 ecore_mcp_update_bw(p_hwfn, p_ptt);
1263 case MFW_DRV_MSG_FAILURE_DETECTED:
1264 ecore_mcp_handle_fan_failure(p_hwfn, p_ptt);
1266 case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
1267 ecore_mcp_handle_critical_error(p_hwfn, p_ptt);
1270 DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
1275 /* ACK everything */
1276 for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
1277 OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]);
1279 /* MFW expect answer in BE, so we force write in that format */
1280 ecore_wr(p_hwfn, p_ptt,
1281 info->mfw_mb_addr + sizeof(u32) +
1282 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
1283 sizeof(u32) + i * sizeof(u32), val);
1287 DP_NOTICE(p_hwfn, false,
1288 "Received an MFW message indication but no"
1293 /* Copy the new mfw messages into the shadow */
1294 OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
1299 enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
1300 struct ecore_ptt *p_ptt,
1302 u32 *p_running_bundle_id)
1307 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1308 DP_NOTICE(p_hwfn, false, "Emulation - can't get MFW version\n");
1309 return ECORE_SUCCESS;
1313 if (IS_VF(p_hwfn->p_dev)) {
1314 if (p_hwfn->vf_iov_info) {
1315 struct pfvf_acquire_resp_tlv *p_resp;
1317 p_resp = &p_hwfn->vf_iov_info->acquire_resp;
1318 *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
1319 return ECORE_SUCCESS;
1321 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1322 "VF requested MFW version prior to ACQUIRE\n");
1327 global_offsize = ecore_rd(p_hwfn, p_ptt,
1328 SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
1332 ecore_rd(p_hwfn, p_ptt,
1333 SECTION_ADDR(global_offsize,
1334 0) + OFFSETOF(struct public_global, mfw_ver));
1336 if (p_running_bundle_id != OSAL_NULL) {
1337 *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt,
1338 SECTION_ADDR(global_offsize,
1340 OFFSETOF(struct public_global,
1341 running_bundle_id));
1344 return ECORE_SUCCESS;
1347 enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
1350 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[0];
1351 struct ecore_ptt *p_ptt;
1353 /* TODO - Add support for VFs */
1357 if (!ecore_mcp_is_init(p_hwfn)) {
1358 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
1362 *p_media_type = MEDIA_UNSPECIFIED;
1364 p_ptt = ecore_ptt_acquire(p_hwfn);
1368 *p_media_type = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1369 OFFSETOF(struct public_port, media_type));
1371 ecore_ptt_release(p_hwfn, p_ptt);
1373 return ECORE_SUCCESS;
1376 static enum _ecore_status_t
1377 ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn,
1378 struct public_func *p_info,
1379 enum ecore_pci_personality *p_proto)
1381 enum _ecore_status_t rc = ECORE_SUCCESS;
1383 switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
1384 case FUNC_MF_CFG_PROTOCOL_ETHERNET:
1385 *p_proto = ECORE_PCI_ETH;
1394 enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
1395 struct ecore_ptt *p_ptt)
1397 struct ecore_mcp_function_info *info;
1398 struct public_func shmem_info;
1400 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1401 info = &p_hwfn->mcp_info->func_info;
1403 info->pause_on_host = (shmem_info.config &
1404 FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
1406 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, &info->protocol)) {
1407 DP_ERR(p_hwfn, "Unknown personality %08x\n",
1408 (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
1412 ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1414 if (shmem_info.mac_upper || shmem_info.mac_lower) {
1415 info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
1416 info->mac[1] = (u8)(shmem_info.mac_upper);
1417 info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
1418 info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
1419 info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
1420 info->mac[5] = (u8)(shmem_info.mac_lower);
1422 /* TODO - are there protocols for which there's no MAC? */
1423 DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n");
1426 /* TODO - are these calculations true for BE machine? */
1427 info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
1428 (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
1429 info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
1430 (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
1432 info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
1434 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP),
1435 "Read configuration from shmem: pause_on_host %02x"
1436 " protocol %02x BW [%02x - %02x]"
1437 " MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %lx"
1438 " node %lx ovlan %04x\n",
1439 info->pause_on_host, info->protocol,
1440 info->bandwidth_min, info->bandwidth_max,
1441 info->mac[0], info->mac[1], info->mac[2],
1442 info->mac[3], info->mac[4], info->mac[5],
1443 (unsigned long)info->wwn_port,
1444 (unsigned long)info->wwn_node, info->ovlan);
1446 return ECORE_SUCCESS;
1449 struct ecore_mcp_link_params
1450 *ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn)
1452 if (!p_hwfn || !p_hwfn->mcp_info)
1454 return &p_hwfn->mcp_info->link_input;
1457 struct ecore_mcp_link_state
1458 *ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn)
1460 if (!p_hwfn || !p_hwfn->mcp_info)
1464 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
1465 DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n");
1466 p_hwfn->mcp_info->link_output.link_up = true;
1470 return &p_hwfn->mcp_info->link_output;
1473 struct ecore_mcp_link_capabilities
1474 *ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn)
1476 if (!p_hwfn || !p_hwfn->mcp_info)
1478 return &p_hwfn->mcp_info->link_capabilities;
1481 enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
1482 struct ecore_ptt *p_ptt)
1484 u32 resp = 0, param = 0;
1485 enum _ecore_status_t rc;
1487 rc = ecore_mcp_cmd(p_hwfn, p_ptt,
1488 DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, ¶m);
1490 /* Wait for the drain to complete before returning */
1496 const struct ecore_mcp_function_info
1497 *ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn)
1499 if (!p_hwfn || !p_hwfn->mcp_info)
1501 return &p_hwfn->mcp_info->func_info;
1504 enum _ecore_status_t ecore_mcp_nvm_command(struct ecore_hwfn *p_hwfn,
1505 struct ecore_ptt *p_ptt,
1506 struct ecore_mcp_nvm_params *params)
1508 enum _ecore_status_t rc;
1510 switch (params->type) {
1511 case ECORE_MCP_NVM_RD:
1512 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1513 params->nvm_common.offset,
1514 ¶ms->nvm_common.resp,
1515 ¶ms->nvm_common.param,
1516 params->nvm_rd.buf_size,
1517 params->nvm_rd.buf);
1520 rc = ecore_mcp_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1521 params->nvm_common.offset,
1522 ¶ms->nvm_common.resp,
1523 ¶ms->nvm_common.param);
1525 case ECORE_MCP_NVM_WR:
1526 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1527 params->nvm_common.offset,
1528 ¶ms->nvm_common.resp,
1529 ¶ms->nvm_common.param,
1530 params->nvm_wr.buf_size,
1531 params->nvm_wr.buf);
1540 int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
1541 struct ecore_ptt *p_ptt, u32 personalities)
1543 enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT;
1544 struct public_func shmem_info;
1545 int i, count = 0, num_pfs;
1547 num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
1549 for (i = 0; i < num_pfs; i++) {
1550 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1551 MCP_PF_ID_BY_REL(p_hwfn, i));
1552 if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
1555 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info,
1556 &protocol) != ECORE_SUCCESS)
1559 if ((1 << ((u32)protocol)) & personalities)
1566 enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
1567 struct ecore_ptt *p_ptt,
1573 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1574 DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n");
1579 if (IS_VF(p_hwfn->p_dev))
1582 flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
1583 flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
1584 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
1585 flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
1587 *p_flash_size = flash_size;
1589 return ECORE_SUCCESS;
1592 enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
1593 struct ecore_ptt *p_ptt)
1595 struct ecore_dev *p_dev = p_hwfn->p_dev;
1597 if (p_dev->recov_in_prog) {
1598 DP_NOTICE(p_hwfn, false,
1599 "Avoid triggering a recovery since such a process"
1600 " is already in progress\n");
1604 DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n");
1605 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
1607 return ECORE_SUCCESS;
1610 enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
1611 struct ecore_ptt *p_ptt,
1614 u32 resp = 0, param = 0, rc_param = 0;
1615 enum _ecore_status_t rc;
1617 /* Only Leader can configure MSIX, and need to take CMT into account */
1619 if (!IS_LEAD_HWFN(p_hwfn))
1620 return ECORE_SUCCESS;
1621 num *= p_hwfn->p_dev->num_hwfns;
1623 param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
1624 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
1625 param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
1626 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
1628 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
1631 if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
1632 DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n",
1636 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1637 "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
1644 enum _ecore_status_t
1645 ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1646 struct ecore_mcp_drv_version *p_ver)
1648 struct drv_version_stc *p_drv_version;
1649 struct ecore_mcp_mb_params mb_params;
1650 union drv_union_data union_data;
1654 enum _ecore_status_t rc;
1657 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
1658 return ECORE_SUCCESS;
1661 p_drv_version = &union_data.drv_version;
1662 p_drv_version->version = p_ver->version;
1663 num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4;
1664 for (i = 0; i < num_words; i++) {
1665 p_name = &p_ver->name[i * sizeof(u32)];
1666 val = OSAL_CPU_TO_BE32(*(u32 *)p_name);
1667 *(u32 *)&p_drv_version->name[i * sizeof(u32)] = val;
1670 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1671 mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
1672 mb_params.p_data_src = &union_data;
1673 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1674 if (rc != ECORE_SUCCESS)
1675 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1680 enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
1681 struct ecore_ptt *p_ptt)
1683 enum _ecore_status_t rc;
1684 u32 resp = 0, param = 0;
1686 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
1688 if (rc != ECORE_SUCCESS)
1689 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1694 enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
1695 struct ecore_ptt *p_ptt)
1697 u32 value, cpu_mode;
1699 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
1701 value = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
1702 value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
1703 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
1704 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
1706 return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -1 : 0;
1709 enum _ecore_status_t
1710 ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
1711 struct ecore_ptt *p_ptt,
1712 enum ecore_ov_client client)
1714 enum _ecore_status_t rc;
1715 u32 resp = 0, param = 0;
1719 case ECORE_OV_CLIENT_DRV:
1720 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
1722 case ECORE_OV_CLIENT_USER:
1723 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
1725 case ECORE_OV_CLIENT_VENDOR_SPEC:
1726 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
1729 DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", client);
1733 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
1734 drv_mb_param, &resp, ¶m);
1735 if (rc != ECORE_SUCCESS)
1736 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1741 enum _ecore_status_t
1742 ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
1743 struct ecore_ptt *p_ptt,
1744 enum ecore_ov_driver_state drv_state)
1746 enum _ecore_status_t rc;
1747 u32 resp = 0, param = 0;
1750 switch (drv_state) {
1751 case ECORE_OV_DRIVER_STATE_NOT_LOADED:
1752 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
1754 case ECORE_OV_DRIVER_STATE_DISABLED:
1755 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
1757 case ECORE_OV_DRIVER_STATE_ACTIVE:
1758 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
1761 DP_NOTICE(p_hwfn, true, "Invalid driver state %d\n", drv_state);
1765 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
1766 drv_mb_param, &resp, ¶m);
1767 if (rc != ECORE_SUCCESS)
1768 DP_ERR(p_hwfn, "Failed to send driver state\n");
1773 enum _ecore_status_t
1774 ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1775 struct ecore_fc_npiv_tbl *p_table)
1780 enum _ecore_status_t
1781 ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn,
1782 struct ecore_ptt *p_ptt, u16 mtu)
1787 enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
1788 struct ecore_ptt *p_ptt,
1789 enum ecore_led_mode mode)
1791 u32 resp = 0, param = 0, drv_mb_param;
1792 enum _ecore_status_t rc;
1795 case ECORE_LED_MODE_ON:
1796 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
1798 case ECORE_LED_MODE_OFF:
1799 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
1801 case ECORE_LED_MODE_RESTORE:
1802 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
1805 DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode);
1809 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
1810 drv_mb_param, &resp, ¶m);
1811 if (rc != ECORE_SUCCESS)
1812 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1817 enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
1818 struct ecore_ptt *p_ptt,
1821 enum _ecore_status_t rc;
1822 u32 resp = 0, param = 0;
1824 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
1825 mask_parities, &resp, ¶m);
1827 if (rc != ECORE_SUCCESS) {
1829 "MCP response failure for mask parities, aborting\n");
1830 } else if (resp != FW_MSG_CODE_OK) {
1832 "MCP did not ack mask parity request. Old MFW?\n");
1839 enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
1842 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1843 u32 bytes_left, offset, bytes_to_copy, buf_size;
1844 struct ecore_mcp_nvm_params params;
1845 struct ecore_ptt *p_ptt;
1846 enum _ecore_status_t rc = ECORE_SUCCESS;
1848 p_ptt = ecore_ptt_acquire(p_hwfn);
1852 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
1855 params.type = ECORE_MCP_NVM_RD;
1856 params.nvm_rd.buf_size = &buf_size;
1857 params.nvm_common.cmd = DRV_MSG_CODE_NVM_READ_NVRAM;
1858 while (bytes_left > 0) {
1859 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
1860 MCP_DRV_NVM_BUF_LEN);
1861 params.nvm_common.offset = (addr + offset) |
1862 (bytes_to_copy << DRV_MB_PARAM_NVM_LEN_SHIFT);
1863 params.nvm_rd.buf = (u32 *)(p_buf + offset);
1864 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
1865 if (rc != ECORE_SUCCESS || (params.nvm_common.resp !=
1866 FW_MSG_CODE_NVM_OK)) {
1867 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
1871 /* This can be a lengthy process, and it's possible scheduler
1872 * isn't preemptible. Sleep a bit to prevent CPU hogging.
1874 if (bytes_left % 0x1000 <
1875 (bytes_left - *params.nvm_rd.buf_size) % 0x1000)
1878 offset += *params.nvm_rd.buf_size;
1879 bytes_left -= *params.nvm_rd.buf_size;
1882 p_dev->mcp_nvm_resp = params.nvm_common.resp;
1883 ecore_ptt_release(p_hwfn, p_ptt);
1888 enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
1889 u32 addr, u8 *p_buf, u32 len)
1891 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1892 struct ecore_mcp_nvm_params params;
1893 struct ecore_ptt *p_ptt;
1894 enum _ecore_status_t rc;
1896 p_ptt = ecore_ptt_acquire(p_hwfn);
1900 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
1901 params.type = ECORE_MCP_NVM_RD;
1902 params.nvm_rd.buf_size = &len;
1903 params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_READ) ?
1904 DRV_MSG_CODE_PHY_CORE_READ : DRV_MSG_CODE_PHY_RAW_READ;
1905 params.nvm_common.offset = addr;
1906 params.nvm_rd.buf = (u32 *)p_buf;
1907 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
1908 if (rc != ECORE_SUCCESS)
1909 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
1911 p_dev->mcp_nvm_resp = params.nvm_common.resp;
1912 ecore_ptt_release(p_hwfn, p_ptt);
1917 enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
1919 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1920 struct ecore_mcp_nvm_params params;
1921 struct ecore_ptt *p_ptt;
1923 p_ptt = ecore_ptt_acquire(p_hwfn);
1927 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
1928 OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp));
1929 ecore_ptt_release(p_hwfn, p_ptt);
1931 return ECORE_SUCCESS;
1934 enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr)
1936 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1937 struct ecore_mcp_nvm_params params;
1938 struct ecore_ptt *p_ptt;
1939 enum _ecore_status_t rc;
1941 p_ptt = ecore_ptt_acquire(p_hwfn);
1944 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
1945 params.type = ECORE_MCP_CMD;
1946 params.nvm_common.cmd = DRV_MSG_CODE_NVM_DEL_FILE;
1947 params.nvm_common.offset = addr;
1948 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
1949 p_dev->mcp_nvm_resp = params.nvm_common.resp;
1950 ecore_ptt_release(p_hwfn, p_ptt);
1955 enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
1958 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1959 struct ecore_mcp_nvm_params params;
1960 struct ecore_ptt *p_ptt;
1961 enum _ecore_status_t rc;
1963 p_ptt = ecore_ptt_acquire(p_hwfn);
1966 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
1967 params.type = ECORE_MCP_CMD;
1968 params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN;
1969 params.nvm_common.offset = addr;
1970 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
1971 p_dev->mcp_nvm_resp = params.nvm_common.resp;
1972 ecore_ptt_release(p_hwfn, p_ptt);
1977 /* rc receives ECORE_INVAL as default parameter because
1978 * it might not enter the while loop if the len is 0
1980 enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
1981 u32 addr, u8 *p_buf, u32 len)
1983 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1984 enum _ecore_status_t rc = ECORE_INVAL;
1985 struct ecore_mcp_nvm_params params;
1986 struct ecore_ptt *p_ptt;
1987 u32 buf_idx, buf_size;
1989 p_ptt = ecore_ptt_acquire(p_hwfn);
1993 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
1994 params.type = ECORE_MCP_NVM_WR;
1995 if (cmd == ECORE_PUT_FILE_DATA)
1996 params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
1998 params.nvm_common.cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
2000 while (buf_idx < len) {
2001 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
2002 MCP_DRV_NVM_BUF_LEN);
2003 params.nvm_common.offset = ((buf_size <<
2004 DRV_MB_PARAM_NVM_LEN_SHIFT)
2006 params.nvm_wr.buf_size = buf_size;
2007 params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
2008 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2009 if (rc != ECORE_SUCCESS ||
2010 ((params.nvm_common.resp != FW_MSG_CODE_NVM_OK) &&
2011 (params.nvm_common.resp !=
2012 FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK)))
2013 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2015 /* This can be a lengthy process, and it's possible scheduler
2016 * isn't preemptible. Sleep a bit to prevent CPU hogging.
2018 if (buf_idx % 0x1000 >
2019 (buf_idx + buf_size) % 0x1000)
2022 buf_idx += buf_size;
2025 p_dev->mcp_nvm_resp = params.nvm_common.resp;
2026 ecore_ptt_release(p_hwfn, p_ptt);
2031 enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
2032 u32 addr, u8 *p_buf, u32 len)
2034 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2035 struct ecore_mcp_nvm_params params;
2036 struct ecore_ptt *p_ptt;
2037 enum _ecore_status_t rc;
2039 p_ptt = ecore_ptt_acquire(p_hwfn);
2043 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2044 params.type = ECORE_MCP_NVM_WR;
2045 params.nvm_wr.buf_size = len;
2046 params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_WRITE) ?
2047 DRV_MSG_CODE_PHY_CORE_WRITE : DRV_MSG_CODE_PHY_RAW_WRITE;
2048 params.nvm_common.offset = addr;
2049 params.nvm_wr.buf = (u32 *)p_buf;
2050 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2051 if (rc != ECORE_SUCCESS)
2052 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2053 p_dev->mcp_nvm_resp = params.nvm_common.resp;
2054 ecore_ptt_release(p_hwfn, p_ptt);
2059 enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
2062 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2063 struct ecore_mcp_nvm_params params;
2064 struct ecore_ptt *p_ptt;
2065 enum _ecore_status_t rc;
2067 p_ptt = ecore_ptt_acquire(p_hwfn);
2071 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2072 params.type = ECORE_MCP_CMD;
2073 params.nvm_common.cmd = DRV_MSG_CODE_SET_SECURE_MODE;
2074 params.nvm_common.offset = addr;
2075 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2076 p_dev->mcp_nvm_resp = params.nvm_common.resp;
2077 ecore_ptt_release(p_hwfn, p_ptt);
2082 enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
2083 struct ecore_ptt *p_ptt,
2084 u32 port, u32 addr, u32 offset,
2087 struct ecore_mcp_nvm_params params;
2088 enum _ecore_status_t rc;
2089 u32 bytes_left, bytes_to_copy, buf_size;
2091 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2092 params.nvm_common.offset =
2093 (port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) |
2094 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT);
2098 params.type = ECORE_MCP_NVM_RD;
2099 params.nvm_rd.buf_size = &buf_size;
2100 params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_READ;
2101 while (bytes_left > 0) {
2102 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
2103 MAX_I2C_TRANSACTION_SIZE);
2104 params.nvm_rd.buf = (u32 *)(p_buf + offset);
2105 params.nvm_common.offset &=
2106 (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
2107 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
2108 params.nvm_common.offset |=
2110 DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT);
2111 params.nvm_common.offset |=
2112 (bytes_to_copy << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT);
2113 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2114 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
2115 FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
2117 } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
2118 FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
2119 return ECORE_UNKNOWN_ERROR;
2121 offset += *params.nvm_rd.buf_size;
2122 bytes_left -= *params.nvm_rd.buf_size;
2125 return ECORE_SUCCESS;
2128 enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
2129 struct ecore_ptt *p_ptt,
2130 u32 port, u32 addr, u32 offset,
2133 struct ecore_mcp_nvm_params params;
2134 enum _ecore_status_t rc;
2135 u32 buf_idx, buf_size;
2137 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2138 params.nvm_common.offset =
2139 (port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) |
2140 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT);
2141 params.type = ECORE_MCP_NVM_WR;
2142 params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_WRITE;
2144 while (buf_idx < len) {
2145 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
2146 MAX_I2C_TRANSACTION_SIZE);
2147 params.nvm_common.offset &=
2148 (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
2149 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
2150 params.nvm_common.offset |=
2151 ((offset + buf_idx) <<
2152 DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT);
2153 params.nvm_common.offset |=
2154 (buf_size << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT);
2155 params.nvm_wr.buf_size = buf_size;
2156 params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
2157 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2158 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
2159 FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
2161 } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
2162 FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
2163 return ECORE_UNKNOWN_ERROR;
2165 buf_idx += buf_size;
2168 return ECORE_SUCCESS;
2171 enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
2172 struct ecore_ptt *p_ptt,
2173 u16 gpio, u32 *gpio_val)
2175 enum _ecore_status_t rc = ECORE_SUCCESS;
2176 u32 drv_mb_param = 0, rsp;
2178 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT);
2180 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ,
2181 drv_mb_param, &rsp, gpio_val);
2183 if (rc != ECORE_SUCCESS)
2186 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
2187 return ECORE_UNKNOWN_ERROR;
2189 return ECORE_SUCCESS;
2192 enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
2193 struct ecore_ptt *p_ptt,
2194 u16 gpio, u16 gpio_val)
2196 enum _ecore_status_t rc = ECORE_SUCCESS;
2197 u32 drv_mb_param = 0, param, rsp;
2199 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT) |
2200 (gpio_val << DRV_MB_PARAM_GPIO_VALUE_SHIFT);
2202 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE,
2203 drv_mb_param, &rsp, ¶m);
2205 if (rc != ECORE_SUCCESS)
2208 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
2209 return ECORE_UNKNOWN_ERROR;
2211 return ECORE_SUCCESS;
2214 enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn,
2215 struct ecore_ptt *p_ptt,
2216 u16 gpio, u32 *gpio_direction,
2219 u32 drv_mb_param = 0, rsp, val = 0;
2220 enum _ecore_status_t rc = ECORE_SUCCESS;
2222 drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT;
2224 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO,
2225 drv_mb_param, &rsp, &val);
2226 if (rc != ECORE_SUCCESS)
2229 *gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >>
2230 DRV_MB_PARAM_GPIO_DIRECTION_SHIFT;
2231 *gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >>
2232 DRV_MB_PARAM_GPIO_CTRL_SHIFT;
2234 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
2235 return ECORE_UNKNOWN_ERROR;
2237 return ECORE_SUCCESS;
2240 enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn,
2241 struct ecore_ptt *p_ptt)
2243 u32 drv_mb_param = 0, rsp, param;
2244 enum _ecore_status_t rc = ECORE_SUCCESS;
2246 drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
2247 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2249 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2250 drv_mb_param, &rsp, ¶m);
2252 if (rc != ECORE_SUCCESS)
2255 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2256 (param != DRV_MB_PARAM_BIST_RC_PASSED))
2257 rc = ECORE_UNKNOWN_ERROR;
2262 enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn,
2263 struct ecore_ptt *p_ptt)
2265 u32 drv_mb_param, rsp, param;
2266 enum _ecore_status_t rc = ECORE_SUCCESS;
2268 drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
2269 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2271 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2272 drv_mb_param, &rsp, ¶m);
2274 if (rc != ECORE_SUCCESS)
2277 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2278 (param != DRV_MB_PARAM_BIST_RC_PASSED))
2279 rc = ECORE_UNKNOWN_ERROR;
2284 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images(
2285 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *num_images)
2287 u32 drv_mb_param = 0, rsp;
2288 enum _ecore_status_t rc = ECORE_SUCCESS;
2290 drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
2291 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2293 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2294 drv_mb_param, &rsp, num_images);
2296 if (rc != ECORE_SUCCESS)
2299 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
2300 rc = ECORE_UNKNOWN_ERROR;
2305 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att(
2306 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2307 struct bist_nvm_image_att *p_image_att, u32 image_index)
2309 struct ecore_mcp_nvm_params params;
2310 enum _ecore_status_t rc;
2313 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2314 params.nvm_common.offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
2315 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2316 params.nvm_common.offset |= (image_index <<
2317 DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT);
2319 params.type = ECORE_MCP_NVM_RD;
2320 params.nvm_rd.buf_size = &buf_size;
2321 params.nvm_common.cmd = DRV_MSG_CODE_BIST_TEST;
2322 params.nvm_rd.buf = (u32 *)p_image_att;
2324 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2325 if (rc != ECORE_SUCCESS)
2328 if (((params.nvm_common.resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2329 (p_image_att->return_code != 1))
2330 rc = ECORE_UNKNOWN_ERROR;
2335 enum _ecore_status_t
2336 ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn,
2337 struct ecore_ptt *p_ptt,
2338 struct ecore_temperature_info *p_temp_info)
2340 struct ecore_temperature_sensor *p_temp_sensor;
2341 struct temperature_status_stc *p_mfw_temp_info;
2342 struct ecore_mcp_mb_params mb_params;
2343 union drv_union_data union_data;
2345 enum _ecore_status_t rc;
2348 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2349 mb_params.cmd = DRV_MSG_CODE_GET_TEMPERATURE;
2350 mb_params.p_data_dst = &union_data;
2351 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2352 if (rc != ECORE_SUCCESS)
2355 p_mfw_temp_info = &union_data.temp_info;
2357 OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS != MAX_NUM_OF_SENSORS);
2358 p_temp_info->num_sensors = OSAL_MIN_T(u32,
2359 p_mfw_temp_info->num_of_sensors,
2360 ECORE_MAX_NUM_OF_SENSORS);
2361 for (i = 0; i < p_temp_info->num_sensors; i++) {
2362 val = p_mfw_temp_info->sensor[i];
2363 p_temp_sensor = &p_temp_info->sensors[i];
2364 p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >>
2365 SENSOR_LOCATION_SHIFT;
2366 p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >>
2367 THRESHOLD_HIGH_SHIFT;
2368 p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >>
2369 CRITICAL_TEMPERATURE_SHIFT;
2370 p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >>
2374 return ECORE_SUCCESS;
2377 enum _ecore_status_t ecore_mcp_get_mba_versions(
2378 struct ecore_hwfn *p_hwfn,
2379 struct ecore_ptt *p_ptt,
2380 struct ecore_mba_vers *p_mba_vers)
2382 struct ecore_mcp_nvm_params params;
2383 enum _ecore_status_t rc;
2386 OSAL_MEM_ZERO(¶ms, sizeof(params));
2387 params.type = ECORE_MCP_NVM_RD;
2388 params.nvm_common.cmd = DRV_MSG_CODE_GET_MBA_VERSION;
2389 params.nvm_common.offset = 0;
2390 params.nvm_rd.buf = &p_mba_vers->mba_vers[0];
2391 params.nvm_rd.buf_size = &buf_size;
2392 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2394 if (rc != ECORE_SUCCESS)
2397 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
2399 rc = ECORE_UNKNOWN_ERROR;
2401 if (buf_size != MCP_DRV_NVM_BUF_LEN)
2402 rc = ECORE_UNKNOWN_ERROR;
2407 enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
2408 struct ecore_ptt *p_ptt,
2413 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MEM_ECC_EVENTS,
2414 0, &rsp, (u32 *)num_events);
2417 #define ECORE_RESC_ALLOC_VERSION_MAJOR 1
2418 #define ECORE_RESC_ALLOC_VERSION_MINOR 0
2419 #define ECORE_RESC_ALLOC_VERSION \
2420 ((ECORE_RESC_ALLOC_VERSION_MAJOR << \
2421 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
2422 (ECORE_RESC_ALLOC_VERSION_MINOR << \
2423 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
2425 enum _ecore_status_t ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn,
2426 struct ecore_ptt *p_ptt,
2427 struct resource_info *p_resc_info,
2428 u32 *p_mcp_resp, u32 *p_mcp_param)
2430 struct ecore_mcp_mb_params mb_params;
2431 union drv_union_data union_data;
2432 enum _ecore_status_t rc;
2434 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2435 mb_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
2436 mb_params.param = ECORE_RESC_ALLOC_VERSION;
2437 OSAL_MEMCPY(&union_data.resource, p_resc_info, sizeof(*p_resc_info));
2438 mb_params.p_data_src = &union_data;
2439 mb_params.p_data_dst = &union_data;
2440 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2441 if (rc != ECORE_SUCCESS)
2444 *p_mcp_resp = mb_params.mcp_resp;
2445 *p_mcp_param = mb_params.mcp_param;
2447 OSAL_MEMCPY(p_resc_info, &union_data.resource, sizeof(*p_resc_info));
2449 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2450 "MFW resource_info: version 0x%x, res_id 0x%x, size 0x%x,"
2451 " offset 0x%x, vf_size 0x%x, vf_offset 0x%x, flags 0x%x\n",
2452 *p_mcp_param, p_resc_info->res_id, p_resc_info->size,
2453 p_resc_info->offset, p_resc_info->vf_size,
2454 p_resc_info->vf_offset, p_resc_info->flags);
2456 return ECORE_SUCCESS;
2459 enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn,
2460 struct ecore_ptt *p_ptt)
2462 u32 mcp_resp, mcp_param;
2464 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
2465 &mcp_resp, &mcp_param);