2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
11 #include "ecore_status.h"
12 #include "ecore_mcp.h"
13 #include "mcp_public.h"
16 #include "ecore_init_fw_funcs.h"
17 #include "ecore_sriov.h"
19 #include "ecore_iov_api.h"
20 #include "ecore_gtt_reg_addr.h"
21 #include "ecore_iro.h"
22 #include "ecore_dcbx.h"
24 #define CHIP_MCP_RESP_ITER_US 10
25 #define EMUL_MCP_RESP_ITER_US (1000 * 1000)
27 #define ECORE_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
28 #define ECORE_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
30 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
31 ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
34 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
35 ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
37 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
38 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
39 OFFSETOF(struct public_drv_mb, _field), _val)
41 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
42 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
43 OFFSETOF(struct public_drv_mb, _field))
45 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
46 DRV_ID_PDA_COMP_VER_SHIFT)
48 #define MCP_BYTES_PER_MBIT_SHIFT 17
52 static int loaded_port[MAX_NUM_PORTS] = { 0 };
55 bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn)
57 if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
62 void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
64 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
66 u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr);
68 p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
70 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
71 "port_addr = 0x%x, port_id 0x%02x\n",
72 p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
75 void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
77 u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
82 if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev))
86 if (!p_hwfn->mcp_info->public_base)
89 for (i = 0; i < length; i++) {
90 tmp = ecore_rd(p_hwfn, p_ptt,
91 p_hwfn->mcp_info->mfw_mb_addr +
92 (i << 2) + sizeof(u32));
94 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
95 OSAL_BE32_TO_CPU(tmp);
99 enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
101 if (p_hwfn->mcp_info) {
102 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
103 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
104 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->lock);
106 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
108 return ECORE_SUCCESS;
111 static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
112 struct ecore_ptt *p_ptt)
114 struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
115 u32 drv_mb_offsize, mfw_mb_offsize;
116 u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
119 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
120 DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n");
121 p_info->public_base = 0;
126 p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
127 if (!p_info->public_base)
130 p_info->public_base |= GRCBASE_MCP;
132 /* Calculate the driver and MFW mailbox address */
133 drv_mb_offsize = ecore_rd(p_hwfn, p_ptt,
134 SECTION_OFFSIZE_ADDR(p_info->public_base,
136 p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
137 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
138 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x"
139 " mcp_pf_id = 0x%x\n",
140 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
142 /* Set the MFW MB address */
143 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
144 SECTION_OFFSIZE_ADDR(p_info->public_base,
146 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
147 p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
148 p_info->mfw_mb_addr);
150 /* Get the current driver mailbox sequence before sending
153 p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
154 DRV_MSG_SEQ_NUMBER_MASK;
156 /* Get current FW pulse sequence */
157 p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
160 p_info->mcp_hist = (u16)ecore_rd(p_hwfn, p_ptt,
161 MISCS_REG_GENERIC_POR_0);
163 return ECORE_SUCCESS;
166 enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
167 struct ecore_ptt *p_ptt)
169 struct ecore_mcp_info *p_info;
172 /* Allocate mcp_info structure */
173 p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
174 sizeof(*p_hwfn->mcp_info));
175 if (!p_hwfn->mcp_info)
177 p_info = p_hwfn->mcp_info;
179 if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
180 DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
181 /* Do not free mcp_info here, since public_base indicate that
182 * the MCP is not initialized
184 return ECORE_SUCCESS;
187 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
188 p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
189 p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
190 if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
193 /* Initialize the MFW spinlock */
194 OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->lock);
195 OSAL_SPIN_LOCK_INIT(&p_info->lock);
197 return ECORE_SUCCESS;
200 DP_NOTICE(p_hwfn, true, "Failed to allocate mcp memory\n");
201 ecore_mcp_free(p_hwfn);
205 /* Locks the MFW mailbox of a PF to ensure a single access.
206 * The lock is achieved in most cases by holding a spinlock, causing other
207 * threads to wait till a previous access is done.
208 * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single
209 * access is achieved by setting a blocking flag, which will fail other
210 * competing contexts to send their mailboxes.
212 static enum _ecore_status_t ecore_mcp_mb_lock(struct ecore_hwfn *p_hwfn,
215 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
217 /* The spinlock shouldn't be acquired when the mailbox command is
218 * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel
219 * pending [UN]LOAD_REQ command of another PF together with a spinlock
220 * (i.e. interrupts are disabled) - can lead to a deadlock.
221 * It is assumed that for a single PF, no other mailbox commands can be
222 * sent from another context while sending LOAD_REQ, and that any
223 * parallel commands to UNLOAD_REQ can be cancelled.
225 if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE)
226 p_hwfn->mcp_info->block_mb_sending = false;
228 if (p_hwfn->mcp_info->block_mb_sending) {
229 DP_NOTICE(p_hwfn, false,
230 "Trying to send a MFW mailbox command [0x%x]"
231 " in parallel to [UN]LOAD_REQ. Aborting.\n",
233 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
237 if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) {
238 p_hwfn->mcp_info->block_mb_sending = true;
239 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
242 return ECORE_SUCCESS;
245 static void ecore_mcp_mb_unlock(struct ecore_hwfn *p_hwfn, u32 cmd)
247 if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
248 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
251 enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
252 struct ecore_ptt *p_ptt)
254 u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
255 u32 delay = CHIP_MCP_RESP_ITER_US;
256 u32 org_mcp_reset_seq, cnt = 0;
257 enum _ecore_status_t rc = ECORE_SUCCESS;
260 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
261 delay = EMUL_MCP_RESP_ITER_US;
264 /* Ensure that only a single thread is accessing the mailbox at a
267 rc = ecore_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
268 if (rc != ECORE_SUCCESS)
271 /* Set drv command along with the updated sequence */
272 org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
273 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
276 /* Wait for MFW response */
278 /* Give the FW up to 500 second (50*1000*10usec) */
279 } while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt,
280 MISCS_REG_GENERIC_POR_0)) &&
281 (cnt++ < ECORE_MCP_RESET_RETRIES));
283 if (org_mcp_reset_seq !=
284 ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
285 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
286 "MCP was reset after %d usec\n", cnt * delay);
288 DP_ERR(p_hwfn, "Failed to reset MCP\n");
292 ecore_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
297 static enum _ecore_status_t ecore_do_mcp_cmd(struct ecore_hwfn *p_hwfn,
298 struct ecore_ptt *p_ptt,
303 u32 delay = CHIP_MCP_RESP_ITER_US;
304 u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
305 u32 seq, cnt = 1, actual_mb_seq;
306 enum _ecore_status_t rc = ECORE_SUCCESS;
309 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
310 delay = EMUL_MCP_RESP_ITER_US;
311 /* There is a built-in delay of 100usec in each MFW response read */
312 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
316 /* Get actual driver mailbox sequence */
317 actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
318 DRV_MSG_SEQ_NUMBER_MASK;
320 /* Use MCP history register to check if MCP reset occurred between
323 if (p_hwfn->mcp_info->mcp_hist !=
324 ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
325 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Rereading MCP offsets\n");
326 ecore_load_mcp_offsets(p_hwfn, p_ptt);
327 ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
329 seq = ++p_hwfn->mcp_info->drv_mb_seq;
332 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
334 /* Set drv command along with the updated sequence */
335 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
338 /* Wait for MFW response */
340 *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
342 /* Give the FW up to 5 second (500*10ms) */
343 } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
344 (cnt++ < max_retries));
346 /* Is this a reply to our command? */
347 if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
348 *o_mcp_resp &= FW_MSG_CODE_MASK;
349 /* Get the MCP param */
350 *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
353 DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
357 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
362 static enum _ecore_status_t
363 ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
364 struct ecore_ptt *p_ptt,
365 struct ecore_mcp_mb_params *p_mb_params)
368 enum _ecore_status_t rc;
370 /* MCP not initialized */
371 if (!ecore_mcp_is_init(p_hwfn)) {
372 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
376 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
377 OFFSETOF(struct public_drv_mb, union_data);
379 /* Ensure that only a single thread is accessing the mailbox at a
382 rc = ecore_mcp_mb_lock(p_hwfn, p_mb_params->cmd);
383 if (rc != ECORE_SUCCESS)
386 if (p_mb_params->p_data_src != OSAL_NULL)
387 ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr,
388 p_mb_params->p_data_src,
389 sizeof(*p_mb_params->p_data_src));
391 rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
392 p_mb_params->param, &p_mb_params->mcp_resp,
393 &p_mb_params->mcp_param);
395 if (p_mb_params->p_data_dst != OSAL_NULL)
396 ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
398 sizeof(*p_mb_params->p_data_dst));
400 ecore_mcp_mb_unlock(p_hwfn, p_mb_params->cmd);
405 enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
406 struct ecore_ptt *p_ptt, u32 cmd, u32 param,
407 u32 *o_mcp_resp, u32 *o_mcp_param)
409 struct ecore_mcp_mb_params mb_params;
410 enum _ecore_status_t rc;
413 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
414 if (cmd == DRV_MSG_CODE_UNLOAD_REQ) {
416 loaded_port[p_hwfn->port_id]--;
417 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n",
420 return ECORE_SUCCESS;
424 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
426 mb_params.param = param;
427 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
428 if (rc != ECORE_SUCCESS)
431 *o_mcp_resp = mb_params.mcp_resp;
432 *o_mcp_param = mb_params.mcp_param;
434 return ECORE_SUCCESS;
437 enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
438 struct ecore_ptt *p_ptt,
443 u32 i_txn_size, u32 *i_buf)
445 struct ecore_mcp_mb_params mb_params;
446 union drv_union_data union_data;
447 enum _ecore_status_t rc;
449 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
451 mb_params.param = param;
452 OSAL_MEMCPY((u32 *)&union_data.raw_data, i_buf, i_txn_size);
453 mb_params.p_data_src = &union_data;
454 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
455 if (rc != ECORE_SUCCESS)
458 *o_mcp_resp = mb_params.mcp_resp;
459 *o_mcp_param = mb_params.mcp_param;
461 return ECORE_SUCCESS;
464 enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
465 struct ecore_ptt *p_ptt,
470 u32 *o_txn_size, u32 *o_buf)
472 struct ecore_mcp_mb_params mb_params;
473 union drv_union_data union_data;
474 enum _ecore_status_t rc;
476 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
478 mb_params.param = param;
479 mb_params.p_data_dst = &union_data;
480 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
481 if (rc != ECORE_SUCCESS)
484 *o_mcp_resp = mb_params.mcp_resp;
485 *o_mcp_param = mb_params.mcp_param;
487 *o_txn_size = *o_mcp_param;
488 OSAL_MEMCPY(o_buf, (u32 *)&union_data.raw_data, *o_txn_size);
490 return ECORE_SUCCESS;
494 static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
497 static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
500 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
501 else if (!loaded_port[p_hwfn->port_id])
502 load_phase = FW_MSG_CODE_DRV_LOAD_PORT;
504 load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
506 /* On CMT, always tell that it's engine */
507 if (p_hwfn->p_dev->num_hwfns > 1)
508 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
510 *p_load_code = load_phase;
512 loaded_port[p_hwfn->port_id]++;
514 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
515 "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n",
516 *p_load_code, loaded, p_hwfn->port_id,
517 loaded_port[p_hwfn->port_id]);
521 static bool ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role)
523 return (drv_role == DRV_ROLE_OS &&
524 exist_drv_role == DRV_ROLE_PREBOOT) ||
525 (drv_role == DRV_ROLE_KDUMP && exist_drv_role == DRV_ROLE_OS);
528 static enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn,
529 struct ecore_ptt *p_ptt)
531 u32 resp = 0, param = 0;
532 enum _ecore_status_t rc;
534 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
536 if (rc != ECORE_SUCCESS)
537 DP_NOTICE(p_hwfn, false,
538 "Failed to send cancel load request, rc = %d\n", rc);
543 #define CONFIG_ECORE_L2_BITMAP_IDX (0x1 << 0)
544 #define CONFIG_ECORE_SRIOV_BITMAP_IDX (0x1 << 1)
545 #define CONFIG_ECORE_ROCE_BITMAP_IDX (0x1 << 2)
546 #define CONFIG_ECORE_IWARP_BITMAP_IDX (0x1 << 3)
547 #define CONFIG_ECORE_FCOE_BITMAP_IDX (0x1 << 4)
548 #define CONFIG_ECORE_ISCSI_BITMAP_IDX (0x1 << 5)
549 #define CONFIG_ECORE_LL2_BITMAP_IDX (0x1 << 6)
551 static u32 ecore_get_config_bitmap(void)
553 u32 config_bitmap = 0x0;
555 #ifdef CONFIG_ECORE_L2
556 config_bitmap |= CONFIG_ECORE_L2_BITMAP_IDX;
558 #ifdef CONFIG_ECORE_SRIOV
559 config_bitmap |= CONFIG_ECORE_SRIOV_BITMAP_IDX;
561 #ifdef CONFIG_ECORE_ROCE
562 config_bitmap |= CONFIG_ECORE_ROCE_BITMAP_IDX;
564 #ifdef CONFIG_ECORE_IWARP
565 config_bitmap |= CONFIG_ECORE_IWARP_BITMAP_IDX;
567 #ifdef CONFIG_ECORE_FCOE
568 config_bitmap |= CONFIG_ECORE_FCOE_BITMAP_IDX;
570 #ifdef CONFIG_ECORE_ISCSI
571 config_bitmap |= CONFIG_ECORE_ISCSI_BITMAP_IDX;
573 #ifdef CONFIG_ECORE_LL2
574 config_bitmap |= CONFIG_ECORE_LL2_BITMAP_IDX;
577 return config_bitmap;
580 struct ecore_load_req_in_params {
582 #define ECORE_LOAD_REQ_HSI_VER_DEFAULT 0
583 #define ECORE_LOAD_REQ_HSI_VER_1 1
590 bool avoid_eng_reset;
593 struct ecore_load_req_out_params {
603 static enum _ecore_status_t
604 __ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
605 struct ecore_load_req_in_params *p_in_params,
606 struct ecore_load_req_out_params *p_out_params)
608 union drv_union_data union_data_src, union_data_dst;
609 struct ecore_mcp_mb_params mb_params;
610 struct load_req_stc *p_load_req;
611 struct load_rsp_stc *p_load_rsp;
613 enum _ecore_status_t rc;
615 p_load_req = &union_data_src.load_req;
616 OSAL_MEM_ZERO(p_load_req, sizeof(*p_load_req));
617 p_load_req->drv_ver_0 = p_in_params->drv_ver_0;
618 p_load_req->drv_ver_1 = p_in_params->drv_ver_1;
619 p_load_req->fw_ver = p_in_params->fw_ver;
620 ECORE_MFW_SET_FIELD(p_load_req->misc0, LOAD_REQ_ROLE,
621 p_in_params->drv_role);
622 ECORE_MFW_SET_FIELD(p_load_req->misc0, LOAD_REQ_LOCK_TO,
623 p_in_params->timeout_val);
624 ECORE_MFW_SET_FIELD(p_load_req->misc0, LOAD_REQ_FORCE,
625 p_in_params->force_cmd);
626 ECORE_MFW_SET_FIELD(p_load_req->misc0, LOAD_REQ_FLAGS0,
627 p_in_params->avoid_eng_reset);
629 hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ?
630 DRV_ID_MCP_HSI_VER_CURRENT :
631 (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT);
633 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
634 mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
635 mb_params.param = PDA_COMP | hsi_ver | p_hwfn->p_dev->drv_type;
636 mb_params.p_data_src = &union_data_src;
637 mb_params.p_data_dst = &union_data_dst;
639 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
640 "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
642 ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
643 ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
644 ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
645 ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
647 if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1)
648 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
649 "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
650 p_load_req->drv_ver_0, p_load_req->drv_ver_1,
651 p_load_req->fw_ver, p_load_req->misc0,
652 ECORE_MFW_GET_FIELD(p_load_req->misc0,
654 ECORE_MFW_GET_FIELD(p_load_req->misc0,
656 ECORE_MFW_GET_FIELD(p_load_req->misc0,
658 ECORE_MFW_GET_FIELD(p_load_req->misc0,
661 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
662 if (rc != ECORE_SUCCESS) {
663 DP_NOTICE(p_hwfn, false,
664 "Failed to send load request, rc = %d\n", rc);
668 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
669 "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
670 p_out_params->load_code = mb_params.mcp_resp;
672 if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
673 p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
674 p_load_rsp = &union_data_dst.load_rsp;
675 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
676 "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
677 p_load_rsp->drv_ver_0, p_load_rsp->drv_ver_1,
678 p_load_rsp->fw_ver, p_load_rsp->misc0,
679 ECORE_MFW_GET_FIELD(p_load_rsp->misc0,
681 ECORE_MFW_GET_FIELD(p_load_rsp->misc0,
683 ECORE_MFW_GET_FIELD(p_load_rsp->misc0,
686 p_out_params->exist_drv_ver_0 = p_load_rsp->drv_ver_0;
687 p_out_params->exist_drv_ver_1 = p_load_rsp->drv_ver_1;
688 p_out_params->exist_fw_ver = p_load_rsp->fw_ver;
689 p_out_params->exist_drv_role =
690 ECORE_MFW_GET_FIELD(p_load_rsp->misc0, LOAD_RSP_ROLE);
691 p_out_params->mfw_hsi_ver =
692 ECORE_MFW_GET_FIELD(p_load_rsp->misc0, LOAD_RSP_HSI);
693 p_out_params->drv_exists =
694 ECORE_MFW_GET_FIELD(p_load_rsp->misc0,
696 LOAD_RSP_FLAGS0_DRV_EXISTS;
699 return ECORE_SUCCESS;
702 static enum _ecore_status_t eocre_get_mfw_drv_role(struct ecore_hwfn *p_hwfn,
703 enum ecore_drv_role drv_role,
707 case ECORE_DRV_ROLE_OS:
708 *p_mfw_drv_role = DRV_ROLE_OS;
710 case ECORE_DRV_ROLE_KDUMP:
711 *p_mfw_drv_role = DRV_ROLE_KDUMP;
714 DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role);
718 return ECORE_SUCCESS;
721 enum ecore_load_req_force {
722 ECORE_LOAD_REQ_FORCE_NONE,
723 ECORE_LOAD_REQ_FORCE_PF,
724 ECORE_LOAD_REQ_FORCE_ALL,
727 static enum _ecore_status_t
728 ecore_get_mfw_force_cmd(struct ecore_hwfn *p_hwfn,
729 enum ecore_load_req_force force_cmd,
733 case ECORE_LOAD_REQ_FORCE_NONE:
734 *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
736 case ECORE_LOAD_REQ_FORCE_PF:
737 *p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
739 case ECORE_LOAD_REQ_FORCE_ALL:
740 *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
743 DP_ERR(p_hwfn, "Unexpected force value %d\n", force_cmd);
747 return ECORE_SUCCESS;
750 enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
751 struct ecore_ptt *p_ptt,
752 struct ecore_load_req_params *p_params)
754 struct ecore_load_req_out_params out_params;
755 struct ecore_load_req_in_params in_params;
756 u8 mfw_drv_role, mfw_force_cmd;
757 enum _ecore_status_t rc;
760 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
761 ecore_mcp_mf_workaround(p_hwfn, &p_params->load_code);
762 return ECORE_SUCCESS;
766 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
767 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT;
768 in_params.drv_ver_0 = ECORE_VERSION;
769 in_params.drv_ver_1 = ecore_get_config_bitmap();
770 in_params.fw_ver = STORM_FW_VERSION;
771 rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
772 if (rc != ECORE_SUCCESS)
775 in_params.drv_role = mfw_drv_role;
776 in_params.timeout_val = p_params->timeout_val;
777 rc = ecore_get_mfw_force_cmd(p_hwfn, ECORE_LOAD_REQ_FORCE_NONE,
779 if (rc != ECORE_SUCCESS)
782 in_params.force_cmd = mfw_force_cmd;
783 in_params.avoid_eng_reset = p_params->avoid_eng_reset;
785 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
786 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
787 if (rc != ECORE_SUCCESS)
790 /* First handle cases where another load request should/might be sent:
791 * - MFW expects the old interface [HSI version = 1]
792 * - MFW responds that a force load request is required
794 if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
796 "MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n");
798 /* The previous load request set the mailbox blocking */
799 p_hwfn->mcp_info->block_mb_sending = false;
801 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1;
802 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
803 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
805 if (rc != ECORE_SUCCESS)
807 } else if (out_params.load_code ==
808 FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
809 /* The previous load request set the mailbox blocking */
810 p_hwfn->mcp_info->block_mb_sending = false;
812 if (ecore_mcp_can_force_load(in_params.drv_role,
813 out_params.exist_drv_role)) {
815 "A force load is required [existing: role %d, fw_ver 0x%08x, drv_ver 0x%08x_0x%08x]. Sending a force load request.\n",
816 out_params.exist_drv_role,
817 out_params.exist_fw_ver,
818 out_params.exist_drv_ver_0,
819 out_params.exist_drv_ver_1);
821 rc = ecore_get_mfw_force_cmd(p_hwfn,
822 ECORE_LOAD_REQ_FORCE_ALL,
824 if (rc != ECORE_SUCCESS)
827 in_params.force_cmd = mfw_force_cmd;
828 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
829 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
831 if (rc != ECORE_SUCCESS)
834 DP_NOTICE(p_hwfn, false,
835 "A force load is required [existing: role %d, fw_ver 0x%08x, drv_ver 0x%08x_0x%08x]. Avoiding to prevent disruption of active PFs.\n",
836 out_params.exist_drv_role,
837 out_params.exist_fw_ver,
838 out_params.exist_drv_ver_0,
839 out_params.exist_drv_ver_1);
841 ecore_mcp_cancel_load_req(p_hwfn, p_ptt);
846 /* Now handle the other types of responses.
847 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
848 * expected here after the additional revised load requests were sent.
850 switch (out_params.load_code) {
851 case FW_MSG_CODE_DRV_LOAD_ENGINE:
852 case FW_MSG_CODE_DRV_LOAD_PORT:
853 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
854 if (out_params.mfw_hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
855 out_params.drv_exists) {
856 /* The role and fw/driver version match, but the PF is
857 * already loaded and has not been unloaded gracefully.
858 * This is unexpected since a quasi-FLR request was
859 * previously sent as part of ecore_hw_prepare().
861 DP_NOTICE(p_hwfn, false,
862 "PF is already loaded - shouldn't have got here since a quasi-FLR request was previously sent!\n");
866 case FW_MSG_CODE_DRV_LOAD_REFUSED_PDA:
867 case FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG:
868 case FW_MSG_CODE_DRV_LOAD_REFUSED_HSI:
869 case FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT:
870 DP_NOTICE(p_hwfn, false,
871 "MFW refused a load request [resp 0x%08x]. Aborting.\n",
872 out_params.load_code);
875 DP_NOTICE(p_hwfn, false,
876 "Unexpected response to load request [resp 0x%08x]. Aborting.\n",
877 out_params.load_code);
881 p_params->load_code = out_params.load_code;
883 return ECORE_SUCCESS;
886 static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
887 struct ecore_ptt *p_ptt)
889 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
891 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
892 u32 path_addr = SECTION_ADDR(mfw_path_offsize,
893 ECORE_PATH_ID(p_hwfn));
894 u32 disabled_vfs[VF_MAX_STATIC / 32];
897 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
898 "Reading Disabled VF information from [offset %08x],"
900 mfw_path_offsize, path_addr);
902 for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
903 disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,
905 OFFSETOF(struct public_path,
908 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
909 "FLR-ed VFs [%08x,...,%08x] - %08x\n",
910 i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
913 if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs))
914 OSAL_VF_FLR_UPDATE(p_hwfn);
917 enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
918 struct ecore_ptt *p_ptt,
921 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
923 u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);
924 u32 func_addr = SECTION_ADDR(mfw_func_offsize,
926 struct ecore_mcp_mb_params mb_params;
927 union drv_union_data union_data;
928 enum _ecore_status_t rc;
931 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
932 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
933 "Acking VFs [%08x,...,%08x] - %08x\n",
934 i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
936 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
937 mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
938 OSAL_MEMCPY(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8);
939 mb_params.p_data_src = &union_data;
940 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt,
942 if (rc != ECORE_SUCCESS) {
943 DP_NOTICE(p_hwfn, false,
944 "Failed to pass ACK for VF flr to MFW\n");
945 return ECORE_TIMEOUT;
948 /* TMP - clear the ACK bits; should be done by MFW */
949 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
950 ecore_wr(p_hwfn, p_ptt,
952 OFFSETOF(struct public_func, drv_ack_vf_disabled) +
958 static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
959 struct ecore_ptt *p_ptt)
961 u32 transceiver_state;
963 transceiver_state = ecore_rd(p_hwfn, p_ptt,
964 p_hwfn->mcp_info->port_addr +
965 OFFSETOF(struct public_port,
968 DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP),
969 "Received transceiver state update [0x%08x] from mfw"
971 transceiver_state, (u32)(p_hwfn->mcp_info->port_addr +
972 OFFSETOF(struct public_port,
975 transceiver_state = GET_FIELD(transceiver_state, ETH_TRANSCEIVER_STATE);
977 if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
978 DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
980 DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
983 static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
984 struct ecore_ptt *p_ptt,
987 struct ecore_mcp_link_state *p_link;
991 p_link = &p_hwfn->mcp_info->link_output;
992 OSAL_MEMSET(p_link, 0, sizeof(*p_link));
994 status = ecore_rd(p_hwfn, p_ptt,
995 p_hwfn->mcp_info->port_addr +
996 OFFSETOF(struct public_port, link_status));
997 DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP),
998 "Received link update [0x%08x] from mfw"
1000 status, (u32)(p_hwfn->mcp_info->port_addr +
1001 OFFSETOF(struct public_port,
1004 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1005 "Resetting link indications\n");
1009 if (p_hwfn->b_drv_link_init)
1010 p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1012 p_link->link_up = false;
1014 p_link->full_duplex = true;
1015 switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
1016 case LINK_STATUS_SPEED_AND_DUPLEX_100G:
1017 p_link->speed = 100000;
1019 case LINK_STATUS_SPEED_AND_DUPLEX_50G:
1020 p_link->speed = 50000;
1022 case LINK_STATUS_SPEED_AND_DUPLEX_40G:
1023 p_link->speed = 40000;
1025 case LINK_STATUS_SPEED_AND_DUPLEX_25G:
1026 p_link->speed = 25000;
1028 case LINK_STATUS_SPEED_AND_DUPLEX_20G:
1029 p_link->speed = 20000;
1031 case LINK_STATUS_SPEED_AND_DUPLEX_10G:
1032 p_link->speed = 10000;
1034 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
1035 p_link->full_duplex = false;
1037 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
1038 p_link->speed = 1000;
1044 /* We never store total line speed as p_link->speed is
1045 * again changes according to bandwidth allocation.
1047 if (p_link->link_up && p_link->speed)
1048 p_link->line_speed = p_link->speed;
1050 p_link->line_speed = 0;
1052 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
1053 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
1055 /* Max bandwidth configuration */
1056 __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
1059 /* Mintz bandwidth configuration */
1060 __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
1062 ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev,
1063 p_link->min_pf_rate);
1065 p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
1066 p_link->an_complete = !!(status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
1067 p_link->parallel_detection = !!(status &
1068 LINK_STATUS_PARALLEL_DETECTION_USED);
1069 p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
1071 p_link->partner_adv_speed |=
1072 (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
1073 ECORE_LINK_PARTNER_SPEED_1G_FD : 0;
1074 p_link->partner_adv_speed |=
1075 (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
1076 ECORE_LINK_PARTNER_SPEED_1G_HD : 0;
1077 p_link->partner_adv_speed |=
1078 (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
1079 ECORE_LINK_PARTNER_SPEED_10G : 0;
1080 p_link->partner_adv_speed |=
1081 (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
1082 ECORE_LINK_PARTNER_SPEED_20G : 0;
1083 p_link->partner_adv_speed |=
1084 (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
1085 ECORE_LINK_PARTNER_SPEED_25G : 0;
1086 p_link->partner_adv_speed |=
1087 (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
1088 ECORE_LINK_PARTNER_SPEED_40G : 0;
1089 p_link->partner_adv_speed |=
1090 (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
1091 ECORE_LINK_PARTNER_SPEED_50G : 0;
1092 p_link->partner_adv_speed |=
1093 (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
1094 ECORE_LINK_PARTNER_SPEED_100G : 0;
1096 p_link->partner_tx_flow_ctrl_en =
1097 !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
1098 p_link->partner_rx_flow_ctrl_en =
1099 !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
1101 switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
1102 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
1103 p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE;
1105 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
1106 p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE;
1108 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
1109 p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE;
1112 p_link->partner_adv_pause = 0;
1115 p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
1117 OSAL_LINK_UPDATE(p_hwfn);
1120 enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
1121 struct ecore_ptt *p_ptt, bool b_up)
1123 struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
1124 struct ecore_mcp_mb_params mb_params;
1125 union drv_union_data union_data;
1126 struct eth_phy_cfg *p_phy_cfg;
1127 enum _ecore_status_t rc = ECORE_SUCCESS;
1131 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
1132 return ECORE_SUCCESS;
1135 /* Set the shmem configuration according to params */
1136 p_phy_cfg = &union_data.drv_phy_cfg;
1137 OSAL_MEMSET(p_phy_cfg, 0, sizeof(*p_phy_cfg));
1138 cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
1139 if (!params->speed.autoneg)
1140 p_phy_cfg->speed = params->speed.forced_speed;
1141 p_phy_cfg->pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
1142 p_phy_cfg->pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
1143 p_phy_cfg->pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1144 p_phy_cfg->adv_speed = params->speed.advertised_speeds;
1145 p_phy_cfg->loopback_mode = params->loopback_mode;
1146 p_hwfn->b_drv_link_init = b_up;
1149 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1150 "Configuring Link: Speed 0x%08x, Pause 0x%08x,"
1151 " adv_speed 0x%08x, loopback 0x%08x\n",
1152 p_phy_cfg->speed, p_phy_cfg->pause,
1153 p_phy_cfg->adv_speed, p_phy_cfg->loopback_mode);
1155 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
1157 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1158 mb_params.cmd = cmd;
1159 mb_params.p_data_src = &union_data;
1160 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1162 /* if mcp fails to respond we must abort */
1163 if (rc != ECORE_SUCCESS) {
1164 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1168 /* Reset the link status if needed */
1170 ecore_mcp_handle_link_change(p_hwfn, p_ptt, true);
1175 u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
1176 struct ecore_ptt *p_ptt)
1178 u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
1180 /* TODO - Add support for VFs */
1181 if (IS_VF(p_hwfn->p_dev))
1184 path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1186 path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr);
1187 path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn));
1189 proc_kill_cnt = ecore_rd(p_hwfn, p_ptt,
1191 OFFSETOF(struct public_path, process_kill)) &
1192 PROCESS_KILL_COUNTER_MASK;
1194 return proc_kill_cnt;
1197 static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn,
1198 struct ecore_ptt *p_ptt)
1200 struct ecore_dev *p_dev = p_hwfn->p_dev;
1203 /* Prevent possible attentions/interrupts during the recovery handling
1204 * and till its load phase, during which they will be re-enabled.
1206 ecore_int_igu_disable_int(p_hwfn, p_ptt);
1208 DP_NOTICE(p_hwfn, false, "Received a process kill indication\n");
1210 /* The following operations should be done once, and thus in CMT mode
1211 * are carried out by only the first HW function.
1213 if (p_hwfn != ECORE_LEADING_HWFN(p_dev))
1216 if (p_dev->recov_in_prog) {
1217 DP_NOTICE(p_hwfn, false,
1218 "Ignoring the indication since a recovery"
1219 " process is already in progress\n");
1223 p_dev->recov_in_prog = true;
1225 proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt);
1226 DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt);
1228 OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn);
1231 static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
1232 struct ecore_ptt *p_ptt,
1233 enum MFW_DRV_MSG_TYPE type)
1235 enum ecore_mcp_protocol_type stats_type;
1236 union ecore_mcp_protocol_stats stats;
1237 struct ecore_mcp_mb_params mb_params;
1238 union drv_union_data union_data;
1240 enum _ecore_status_t rc;
1243 case MFW_DRV_MSG_GET_LAN_STATS:
1244 stats_type = ECORE_MCP_LAN_STATS;
1245 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1248 DP_INFO(p_hwfn, "Invalid protocol type %d\n", type);
1252 OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats);
1254 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1255 mb_params.cmd = DRV_MSG_CODE_GET_STATS;
1256 mb_params.param = hsi_param;
1257 OSAL_MEMCPY(&union_data, &stats, sizeof(stats));
1258 mb_params.p_data_src = &union_data;
1259 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1260 if (rc != ECORE_SUCCESS)
1261 DP_ERR(p_hwfn, "Failed to send protocol stats, rc = %d\n", rc);
1264 static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
1265 struct public_func *p_shmem_info)
1267 struct ecore_mcp_function_info *p_info;
1269 p_info = &p_hwfn->mcp_info->func_info;
1271 /* TODO - bandwidth min/max should have valid values of 1-100,
1272 * as well as some indication that the feature is disabled.
1273 * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS
1274 * limit and correct value to min `1' and max `100' if limit isn't in
1277 p_info->bandwidth_min = (p_shmem_info->config &
1278 FUNC_MF_CFG_MIN_BW_MASK) >>
1279 FUNC_MF_CFG_MIN_BW_SHIFT;
1280 if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1282 "bandwidth minimum out of bounds [%02x]. Set to 1\n",
1283 p_info->bandwidth_min);
1284 p_info->bandwidth_min = 1;
1287 p_info->bandwidth_max = (p_shmem_info->config &
1288 FUNC_MF_CFG_MAX_BW_MASK) >>
1289 FUNC_MF_CFG_MAX_BW_SHIFT;
1290 if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1292 "bandwidth maximum out of bounds [%02x]. Set to 100\n",
1293 p_info->bandwidth_max);
1294 p_info->bandwidth_max = 100;
1298 static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
1299 struct ecore_ptt *p_ptt,
1300 struct public_func *p_data,
1303 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1305 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1306 u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1309 OSAL_MEM_ZERO(p_data, sizeof(*p_data));
1311 size = OSAL_MIN_T(u32, sizeof(*p_data),
1312 SECTION_SIZE(mfw_path_offsize));
1313 for (i = 0; i < size / sizeof(u32); i++)
1314 ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
1315 func_addr + (i << 2));
1321 ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1323 struct ecore_mcp_function_info *p_info;
1324 struct public_func shmem_info;
1325 u32 resp = 0, param = 0;
1327 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1329 ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1331 p_info = &p_hwfn->mcp_info->func_info;
1333 ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min);
1335 ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max);
1337 /* Acknowledge the MFW */
1338 ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1342 static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn,
1343 struct ecore_ptt *p_ptt)
1345 /* A single notification should be sent to upper driver in CMT mode */
1346 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1349 DP_NOTICE(p_hwfn, false,
1350 "Fan failure was detected on the network interface card"
1351 " and it's going to be shut down.\n");
1353 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
1356 static enum _ecore_status_t
1357 ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1358 u32 mdump_cmd, union drv_union_data *p_data_src,
1359 union drv_union_data *p_data_dst, u32 *p_mcp_resp)
1361 struct ecore_mcp_mb_params mb_params;
1362 enum _ecore_status_t rc;
1364 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1365 mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
1366 mb_params.param = mdump_cmd;
1367 mb_params.p_data_src = p_data_src;
1368 mb_params.p_data_dst = p_data_dst;
1369 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1370 if (rc != ECORE_SUCCESS)
1373 *p_mcp_resp = mb_params.mcp_resp;
1374 if (*p_mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
1375 DP_NOTICE(p_hwfn, false,
1376 "MFW claims that the mdump command is illegal [mdump_cmd 0x%x]\n",
1384 static enum _ecore_status_t ecore_mcp_mdump_ack(struct ecore_hwfn *p_hwfn,
1385 struct ecore_ptt *p_ptt)
1389 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_ACK,
1390 OSAL_NULL, OSAL_NULL, &mcp_resp);
1393 enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn,
1394 struct ecore_ptt *p_ptt,
1397 union drv_union_data union_data;
1400 OSAL_MEMCPY(&union_data.raw_data, &epoch, sizeof(epoch));
1402 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_SET_VALUES,
1403 &union_data, OSAL_NULL, &mcp_resp);
1406 enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
1407 struct ecore_ptt *p_ptt)
1411 p_hwfn->p_dev->mdump_en = true;
1413 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_TRIGGER,
1414 OSAL_NULL, OSAL_NULL, &mcp_resp);
1417 static enum _ecore_status_t
1418 ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1419 struct mdump_config_stc *p_mdump_config)
1421 union drv_union_data union_data;
1423 enum _ecore_status_t rc;
1425 rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_GET_CONFIG,
1426 OSAL_NULL, &union_data, &mcp_resp);
1427 if (rc != ECORE_SUCCESS)
1430 if (mcp_resp == FW_MSG_CODE_UNSUPPORTED)
1431 return ECORE_NOTIMPL;
1433 if (mcp_resp != FW_MSG_CODE_OK) {
1434 DP_NOTICE(p_hwfn, false,
1435 "Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n",
1437 rc = ECORE_UNKNOWN_ERROR;
1440 OSAL_MEMCPY(p_mdump_config, &union_data.mdump_config,
1441 sizeof(*p_mdump_config));
1446 enum _ecore_status_t
1447 ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1448 struct ecore_mdump_info *p_mdump_info)
1450 u32 addr, global_offsize, global_addr;
1451 struct mdump_config_stc mdump_config;
1452 enum _ecore_status_t rc;
1454 OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info));
1456 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1458 global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1459 global_addr = SECTION_ADDR(global_offsize, 0);
1460 p_mdump_info->reason = ecore_rd(p_hwfn, p_ptt,
1462 OFFSETOF(struct public_global,
1465 if (p_mdump_info->reason) {
1466 rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config);
1467 if (rc != ECORE_SUCCESS)
1470 p_mdump_info->version = mdump_config.version;
1471 p_mdump_info->config = mdump_config.config;
1472 p_mdump_info->epoch = mdump_config.epoc;
1473 p_mdump_info->num_of_logs = mdump_config.num_of_logs;
1474 p_mdump_info->valid_logs = mdump_config.valid_logs;
1476 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1477 "MFW mdump info: reason %d, version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n",
1478 p_mdump_info->reason, p_mdump_info->version,
1479 p_mdump_info->config, p_mdump_info->epoch,
1480 p_mdump_info->num_of_logs, p_mdump_info->valid_logs);
1482 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1483 "MFW mdump info: reason %d\n", p_mdump_info->reason);
1486 return ECORE_SUCCESS;
1489 enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
1490 struct ecore_ptt *p_ptt)
1494 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_CLEAR_LOGS,
1495 OSAL_NULL, OSAL_NULL, &mcp_resp);
1498 static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
1499 struct ecore_ptt *p_ptt)
1501 /* In CMT mode - no need for more than a single acknowledgment to the
1502 * MFW, and no more than a single notification to the upper driver.
1504 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1507 DP_NOTICE(p_hwfn, false,
1508 "Received a critical error notification from the MFW!\n");
1510 if (p_hwfn->p_dev->mdump_en) {
1511 DP_NOTICE(p_hwfn, false,
1512 "Not acknowledging the notification to allow the MFW crash dump\n");
1513 p_hwfn->p_dev->mdump_en = false;
1517 ecore_mcp_mdump_ack(p_hwfn, p_ptt);
1518 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
1521 enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
1522 struct ecore_ptt *p_ptt)
1524 struct ecore_mcp_info *info = p_hwfn->mcp_info;
1525 enum _ecore_status_t rc = ECORE_SUCCESS;
1529 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n");
1531 /* Read Messages from MFW */
1532 ecore_mcp_read_mb(p_hwfn, p_ptt);
1534 /* Compare current messages to old ones */
1535 for (i = 0; i < info->mfw_mb_length; i++) {
1536 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
1541 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1542 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
1543 i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
1546 case MFW_DRV_MSG_LINK_CHANGE:
1547 ecore_mcp_handle_link_change(p_hwfn, p_ptt, false);
1549 case MFW_DRV_MSG_VF_DISABLED:
1550 ecore_mcp_handle_vf_flr(p_hwfn, p_ptt);
1552 case MFW_DRV_MSG_LLDP_DATA_UPDATED:
1553 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1554 ECORE_DCBX_REMOTE_LLDP_MIB);
1556 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
1557 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1558 ECORE_DCBX_REMOTE_MIB);
1560 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
1561 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1562 ECORE_DCBX_OPERATIONAL_MIB);
1564 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
1565 ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
1567 case MFW_DRV_MSG_ERROR_RECOVERY:
1568 ecore_mcp_handle_process_kill(p_hwfn, p_ptt);
1570 case MFW_DRV_MSG_GET_LAN_STATS:
1571 case MFW_DRV_MSG_GET_FCOE_STATS:
1572 case MFW_DRV_MSG_GET_ISCSI_STATS:
1573 case MFW_DRV_MSG_GET_RDMA_STATS:
1574 ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
1576 case MFW_DRV_MSG_BW_UPDATE:
1577 ecore_mcp_update_bw(p_hwfn, p_ptt);
1579 case MFW_DRV_MSG_FAILURE_DETECTED:
1580 ecore_mcp_handle_fan_failure(p_hwfn, p_ptt);
1582 case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
1583 ecore_mcp_handle_critical_error(p_hwfn, p_ptt);
1586 DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
1591 /* ACK everything */
1592 for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
1593 OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]);
1595 /* MFW expect answer in BE, so we force write in that format */
1596 ecore_wr(p_hwfn, p_ptt,
1597 info->mfw_mb_addr + sizeof(u32) +
1598 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
1599 sizeof(u32) + i * sizeof(u32), val);
1603 DP_NOTICE(p_hwfn, false,
1604 "Received an MFW message indication but no"
1609 /* Copy the new mfw messages into the shadow */
1610 OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
1615 enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
1616 struct ecore_ptt *p_ptt,
1618 u32 *p_running_bundle_id)
1623 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1624 DP_NOTICE(p_hwfn, false, "Emulation - can't get MFW version\n");
1625 return ECORE_SUCCESS;
1629 if (IS_VF(p_hwfn->p_dev)) {
1630 if (p_hwfn->vf_iov_info) {
1631 struct pfvf_acquire_resp_tlv *p_resp;
1633 p_resp = &p_hwfn->vf_iov_info->acquire_resp;
1634 *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
1635 return ECORE_SUCCESS;
1637 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1638 "VF requested MFW version prior to ACQUIRE\n");
1643 global_offsize = ecore_rd(p_hwfn, p_ptt,
1644 SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
1648 ecore_rd(p_hwfn, p_ptt,
1649 SECTION_ADDR(global_offsize,
1650 0) + OFFSETOF(struct public_global, mfw_ver));
1652 if (p_running_bundle_id != OSAL_NULL) {
1653 *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt,
1654 SECTION_ADDR(global_offsize,
1656 OFFSETOF(struct public_global,
1657 running_bundle_id));
1660 return ECORE_SUCCESS;
1663 enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
1666 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[0];
1667 struct ecore_ptt *p_ptt;
1669 /* TODO - Add support for VFs */
1673 if (!ecore_mcp_is_init(p_hwfn)) {
1674 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
1678 *p_media_type = MEDIA_UNSPECIFIED;
1680 p_ptt = ecore_ptt_acquire(p_hwfn);
1684 *p_media_type = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1685 OFFSETOF(struct public_port, media_type));
1687 ecore_ptt_release(p_hwfn, p_ptt);
1689 return ECORE_SUCCESS;
1693 /* Old MFW has a global configuration for all PFs regarding RDMA support */
1695 ecore_mcp_get_shmem_proto_legacy(struct ecore_hwfn *p_hwfn,
1696 enum ecore_pci_personality *p_proto)
1698 *p_proto = ECORE_PCI_ETH;
1700 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
1701 "According to Legacy capabilities, L2 personality is %08x\n",
1706 static enum _ecore_status_t
1707 ecore_mcp_get_shmem_proto_mfw(struct ecore_hwfn *p_hwfn,
1708 struct ecore_ptt *p_ptt,
1709 enum ecore_pci_personality *p_proto)
1711 u32 resp = 0, param = 0;
1712 enum _ecore_status_t rc;
1714 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
1715 "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
1716 (u32)*p_proto, resp, param);
1717 return ECORE_SUCCESS;
1720 static enum _ecore_status_t
1721 ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn,
1722 struct public_func *p_info,
1723 struct ecore_ptt *p_ptt,
1724 enum ecore_pci_personality *p_proto)
1726 enum _ecore_status_t rc = ECORE_SUCCESS;
1728 switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
1729 case FUNC_MF_CFG_PROTOCOL_ETHERNET:
1730 if (ecore_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto) !=
1732 ecore_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
1741 enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
1742 struct ecore_ptt *p_ptt)
1744 struct ecore_mcp_function_info *info;
1745 struct public_func shmem_info;
1747 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1748 info = &p_hwfn->mcp_info->func_info;
1750 info->pause_on_host = (shmem_info.config &
1751 FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
1753 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
1755 DP_ERR(p_hwfn, "Unknown personality %08x\n",
1756 (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
1760 ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1762 if (shmem_info.mac_upper || shmem_info.mac_lower) {
1763 info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
1764 info->mac[1] = (u8)(shmem_info.mac_upper);
1765 info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
1766 info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
1767 info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
1768 info->mac[5] = (u8)(shmem_info.mac_lower);
1770 /* TODO - are there protocols for which there's no MAC? */
1771 DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n");
1774 /* TODO - are these calculations true for BE machine? */
1775 info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
1776 (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
1777 info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
1778 (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
1780 info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
1782 info->mtu = (u16)shmem_info.mtu_size;
1787 info->mtu = (u16)shmem_info.mtu_size;
1789 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP),
1790 "Read configuration from shmem: pause_on_host %02x"
1791 " protocol %02x BW [%02x - %02x]"
1792 " MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %lx"
1793 " node %lx ovlan %04x\n",
1794 info->pause_on_host, info->protocol,
1795 info->bandwidth_min, info->bandwidth_max,
1796 info->mac[0], info->mac[1], info->mac[2],
1797 info->mac[3], info->mac[4], info->mac[5],
1798 (unsigned long)info->wwn_port,
1799 (unsigned long)info->wwn_node, info->ovlan);
1801 return ECORE_SUCCESS;
1804 struct ecore_mcp_link_params
1805 *ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn)
1807 if (!p_hwfn || !p_hwfn->mcp_info)
1809 return &p_hwfn->mcp_info->link_input;
1812 struct ecore_mcp_link_state
1813 *ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn)
1815 if (!p_hwfn || !p_hwfn->mcp_info)
1819 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
1820 DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n");
1821 p_hwfn->mcp_info->link_output.link_up = true;
1825 return &p_hwfn->mcp_info->link_output;
1828 struct ecore_mcp_link_capabilities
1829 *ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn)
1831 if (!p_hwfn || !p_hwfn->mcp_info)
1833 return &p_hwfn->mcp_info->link_capabilities;
1836 enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
1837 struct ecore_ptt *p_ptt)
1839 u32 resp = 0, param = 0;
1840 enum _ecore_status_t rc;
1842 rc = ecore_mcp_cmd(p_hwfn, p_ptt,
1843 DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, ¶m);
1845 /* Wait for the drain to complete before returning */
1851 const struct ecore_mcp_function_info
1852 *ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn)
1854 if (!p_hwfn || !p_hwfn->mcp_info)
1856 return &p_hwfn->mcp_info->func_info;
1859 enum _ecore_status_t ecore_mcp_nvm_command(struct ecore_hwfn *p_hwfn,
1860 struct ecore_ptt *p_ptt,
1861 struct ecore_mcp_nvm_params *params)
1863 enum _ecore_status_t rc;
1865 switch (params->type) {
1866 case ECORE_MCP_NVM_RD:
1867 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1868 params->nvm_common.offset,
1869 ¶ms->nvm_common.resp,
1870 ¶ms->nvm_common.param,
1871 params->nvm_rd.buf_size,
1872 params->nvm_rd.buf);
1875 rc = ecore_mcp_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1876 params->nvm_common.offset,
1877 ¶ms->nvm_common.resp,
1878 ¶ms->nvm_common.param);
1880 case ECORE_MCP_NVM_WR:
1881 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1882 params->nvm_common.offset,
1883 ¶ms->nvm_common.resp,
1884 ¶ms->nvm_common.param,
1885 params->nvm_wr.buf_size,
1886 params->nvm_wr.buf);
1895 int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
1896 struct ecore_ptt *p_ptt, u32 personalities)
1898 enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT;
1899 struct public_func shmem_info;
1900 int i, count = 0, num_pfs;
1902 num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
1904 for (i = 0; i < num_pfs; i++) {
1905 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1906 MCP_PF_ID_BY_REL(p_hwfn, i));
1907 if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
1910 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
1915 if ((1 << ((u32)protocol)) & personalities)
1922 enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
1923 struct ecore_ptt *p_ptt,
1929 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1930 DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n");
1935 if (IS_VF(p_hwfn->p_dev))
1938 flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
1939 flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
1940 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
1941 flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
1943 *p_flash_size = flash_size;
1945 return ECORE_SUCCESS;
1948 enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
1949 struct ecore_ptt *p_ptt)
1951 struct ecore_dev *p_dev = p_hwfn->p_dev;
1953 if (p_dev->recov_in_prog) {
1954 DP_NOTICE(p_hwfn, false,
1955 "Avoid triggering a recovery since such a process"
1956 " is already in progress\n");
1960 DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n");
1961 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
1963 return ECORE_SUCCESS;
1966 enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
1967 struct ecore_ptt *p_ptt,
1970 u32 resp = 0, param = 0, rc_param = 0;
1971 enum _ecore_status_t rc;
1973 /* Only Leader can configure MSIX, and need to take CMT into account */
1975 if (!IS_LEAD_HWFN(p_hwfn))
1976 return ECORE_SUCCESS;
1977 num *= p_hwfn->p_dev->num_hwfns;
1979 param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
1980 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
1981 param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
1982 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
1984 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
1987 if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
1988 DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n",
1992 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1993 "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
2000 enum _ecore_status_t
2001 ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2002 struct ecore_mcp_drv_version *p_ver)
2004 struct drv_version_stc *p_drv_version;
2005 struct ecore_mcp_mb_params mb_params;
2006 union drv_union_data union_data;
2010 enum _ecore_status_t rc;
2013 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
2014 return ECORE_SUCCESS;
2017 p_drv_version = &union_data.drv_version;
2018 p_drv_version->version = p_ver->version;
2019 num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4;
2020 for (i = 0; i < num_words; i++) {
2021 /* The driver name is expected to be in a big-endian format */
2022 p_name = &p_ver->name[i * sizeof(u32)];
2023 val = OSAL_CPU_TO_BE32(*(u32 *)p_name);
2024 *(u32 *)&p_drv_version->name[i * sizeof(u32)] = val;
2027 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2028 mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
2029 mb_params.p_data_src = &union_data;
2030 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2031 if (rc != ECORE_SUCCESS)
2032 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2037 enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
2038 struct ecore_ptt *p_ptt)
2040 enum _ecore_status_t rc;
2041 u32 resp = 0, param = 0;
2043 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2045 if (rc != ECORE_SUCCESS)
2046 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2051 enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
2052 struct ecore_ptt *p_ptt)
2054 u32 value, cpu_mode;
2056 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
2058 value = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2059 value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2060 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
2061 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2063 return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -1 : 0;
2066 enum _ecore_status_t
2067 ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
2068 struct ecore_ptt *p_ptt,
2069 enum ecore_ov_client client)
2071 enum _ecore_status_t rc;
2072 u32 resp = 0, param = 0;
2076 case ECORE_OV_CLIENT_DRV:
2077 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
2079 case ECORE_OV_CLIENT_USER:
2080 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
2082 case ECORE_OV_CLIENT_VENDOR_SPEC:
2083 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
2086 DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", client);
2090 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
2091 drv_mb_param, &resp, ¶m);
2092 if (rc != ECORE_SUCCESS)
2093 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2098 enum _ecore_status_t
2099 ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
2100 struct ecore_ptt *p_ptt,
2101 enum ecore_ov_driver_state drv_state)
2103 enum _ecore_status_t rc;
2104 u32 resp = 0, param = 0;
2107 switch (drv_state) {
2108 case ECORE_OV_DRIVER_STATE_NOT_LOADED:
2109 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
2111 case ECORE_OV_DRIVER_STATE_DISABLED:
2112 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
2114 case ECORE_OV_DRIVER_STATE_ACTIVE:
2115 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
2118 DP_NOTICE(p_hwfn, true, "Invalid driver state %d\n", drv_state);
2122 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
2123 drv_mb_param, &resp, ¶m);
2124 if (rc != ECORE_SUCCESS)
2125 DP_ERR(p_hwfn, "Failed to send driver state\n");
2130 enum _ecore_status_t
2131 ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2132 struct ecore_fc_npiv_tbl *p_table)
2137 enum _ecore_status_t
2138 ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn,
2139 struct ecore_ptt *p_ptt, u16 mtu)
2144 enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
2145 struct ecore_ptt *p_ptt,
2146 enum ecore_led_mode mode)
2148 u32 resp = 0, param = 0, drv_mb_param;
2149 enum _ecore_status_t rc;
2152 case ECORE_LED_MODE_ON:
2153 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
2155 case ECORE_LED_MODE_OFF:
2156 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
2158 case ECORE_LED_MODE_RESTORE:
2159 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
2162 DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode);
2166 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
2167 drv_mb_param, &resp, ¶m);
2168 if (rc != ECORE_SUCCESS)
2169 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2174 enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
2175 struct ecore_ptt *p_ptt,
2178 enum _ecore_status_t rc;
2179 u32 resp = 0, param = 0;
2181 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
2182 mask_parities, &resp, ¶m);
2184 if (rc != ECORE_SUCCESS) {
2186 "MCP response failure for mask parities, aborting\n");
2187 } else if (resp != FW_MSG_CODE_OK) {
2189 "MCP did not ack mask parity request. Old MFW?\n");
2196 enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
2199 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2200 u32 bytes_left, offset, bytes_to_copy, buf_size;
2201 struct ecore_mcp_nvm_params params;
2202 struct ecore_ptt *p_ptt;
2203 enum _ecore_status_t rc = ECORE_SUCCESS;
2205 p_ptt = ecore_ptt_acquire(p_hwfn);
2209 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2212 params.type = ECORE_MCP_NVM_RD;
2213 params.nvm_rd.buf_size = &buf_size;
2214 params.nvm_common.cmd = DRV_MSG_CODE_NVM_READ_NVRAM;
2215 while (bytes_left > 0) {
2216 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
2217 MCP_DRV_NVM_BUF_LEN);
2218 params.nvm_common.offset = (addr + offset) |
2219 (bytes_to_copy << DRV_MB_PARAM_NVM_LEN_SHIFT);
2220 params.nvm_rd.buf = (u32 *)(p_buf + offset);
2221 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2222 if (rc != ECORE_SUCCESS || (params.nvm_common.resp !=
2223 FW_MSG_CODE_NVM_OK)) {
2224 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2228 /* This can be a lengthy process, and it's possible scheduler
2229 * isn't preemptible. Sleep a bit to prevent CPU hogging.
2231 if (bytes_left % 0x1000 <
2232 (bytes_left - *params.nvm_rd.buf_size) % 0x1000)
2235 offset += *params.nvm_rd.buf_size;
2236 bytes_left -= *params.nvm_rd.buf_size;
2239 p_dev->mcp_nvm_resp = params.nvm_common.resp;
2240 ecore_ptt_release(p_hwfn, p_ptt);
2245 enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
2246 u32 addr, u8 *p_buf, u32 len)
2248 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2249 struct ecore_mcp_nvm_params params;
2250 struct ecore_ptt *p_ptt;
2251 enum _ecore_status_t rc;
2253 p_ptt = ecore_ptt_acquire(p_hwfn);
2257 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2258 params.type = ECORE_MCP_NVM_RD;
2259 params.nvm_rd.buf_size = &len;
2260 params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_READ) ?
2261 DRV_MSG_CODE_PHY_CORE_READ : DRV_MSG_CODE_PHY_RAW_READ;
2262 params.nvm_common.offset = addr;
2263 params.nvm_rd.buf = (u32 *)p_buf;
2264 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2265 if (rc != ECORE_SUCCESS)
2266 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2268 p_dev->mcp_nvm_resp = params.nvm_common.resp;
2269 ecore_ptt_release(p_hwfn, p_ptt);
2274 enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
2276 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2277 struct ecore_mcp_nvm_params params;
2278 struct ecore_ptt *p_ptt;
2280 p_ptt = ecore_ptt_acquire(p_hwfn);
2284 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2285 OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp));
2286 ecore_ptt_release(p_hwfn, p_ptt);
2288 return ECORE_SUCCESS;
2291 enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr)
2293 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2294 struct ecore_mcp_nvm_params params;
2295 struct ecore_ptt *p_ptt;
2296 enum _ecore_status_t rc;
2298 p_ptt = ecore_ptt_acquire(p_hwfn);
2301 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2302 params.type = ECORE_MCP_CMD;
2303 params.nvm_common.cmd = DRV_MSG_CODE_NVM_DEL_FILE;
2304 params.nvm_common.offset = addr;
2305 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2306 p_dev->mcp_nvm_resp = params.nvm_common.resp;
2307 ecore_ptt_release(p_hwfn, p_ptt);
2312 enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
2315 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2316 struct ecore_mcp_nvm_params params;
2317 struct ecore_ptt *p_ptt;
2318 enum _ecore_status_t rc;
2320 p_ptt = ecore_ptt_acquire(p_hwfn);
2323 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2324 params.type = ECORE_MCP_CMD;
2325 params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN;
2326 params.nvm_common.offset = addr;
2327 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2328 p_dev->mcp_nvm_resp = params.nvm_common.resp;
2329 ecore_ptt_release(p_hwfn, p_ptt);
2334 /* rc receives ECORE_INVAL as default parameter because
2335 * it might not enter the while loop if the len is 0
2337 enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
2338 u32 addr, u8 *p_buf, u32 len)
2340 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2341 enum _ecore_status_t rc = ECORE_INVAL;
2342 struct ecore_mcp_nvm_params params;
2343 struct ecore_ptt *p_ptt;
2344 u32 buf_idx, buf_size;
2346 p_ptt = ecore_ptt_acquire(p_hwfn);
2350 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2351 params.type = ECORE_MCP_NVM_WR;
2352 if (cmd == ECORE_PUT_FILE_DATA)
2353 params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
2355 params.nvm_common.cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
2357 while (buf_idx < len) {
2358 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
2359 MCP_DRV_NVM_BUF_LEN);
2360 params.nvm_common.offset = ((buf_size <<
2361 DRV_MB_PARAM_NVM_LEN_SHIFT)
2363 params.nvm_wr.buf_size = buf_size;
2364 params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
2365 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2366 if (rc != ECORE_SUCCESS ||
2367 ((params.nvm_common.resp != FW_MSG_CODE_NVM_OK) &&
2368 (params.nvm_common.resp !=
2369 FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK)))
2370 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2372 /* This can be a lengthy process, and it's possible scheduler
2373 * isn't preemptible. Sleep a bit to prevent CPU hogging.
2375 if (buf_idx % 0x1000 >
2376 (buf_idx + buf_size) % 0x1000)
2379 buf_idx += buf_size;
2382 p_dev->mcp_nvm_resp = params.nvm_common.resp;
2383 ecore_ptt_release(p_hwfn, p_ptt);
2388 enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
2389 u32 addr, u8 *p_buf, u32 len)
2391 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2392 struct ecore_mcp_nvm_params params;
2393 struct ecore_ptt *p_ptt;
2394 enum _ecore_status_t rc;
2396 p_ptt = ecore_ptt_acquire(p_hwfn);
2400 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2401 params.type = ECORE_MCP_NVM_WR;
2402 params.nvm_wr.buf_size = len;
2403 params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_WRITE) ?
2404 DRV_MSG_CODE_PHY_CORE_WRITE : DRV_MSG_CODE_PHY_RAW_WRITE;
2405 params.nvm_common.offset = addr;
2406 params.nvm_wr.buf = (u32 *)p_buf;
2407 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2408 if (rc != ECORE_SUCCESS)
2409 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2410 p_dev->mcp_nvm_resp = params.nvm_common.resp;
2411 ecore_ptt_release(p_hwfn, p_ptt);
2416 enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
2419 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2420 struct ecore_mcp_nvm_params params;
2421 struct ecore_ptt *p_ptt;
2422 enum _ecore_status_t rc;
2424 p_ptt = ecore_ptt_acquire(p_hwfn);
2428 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2429 params.type = ECORE_MCP_CMD;
2430 params.nvm_common.cmd = DRV_MSG_CODE_SET_SECURE_MODE;
2431 params.nvm_common.offset = addr;
2432 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2433 p_dev->mcp_nvm_resp = params.nvm_common.resp;
2434 ecore_ptt_release(p_hwfn, p_ptt);
2439 enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
2440 struct ecore_ptt *p_ptt,
2441 u32 port, u32 addr, u32 offset,
2444 struct ecore_mcp_nvm_params params;
2445 enum _ecore_status_t rc;
2446 u32 bytes_left, bytes_to_copy, buf_size;
2448 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2449 params.nvm_common.offset =
2450 (port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) |
2451 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT);
2455 params.type = ECORE_MCP_NVM_RD;
2456 params.nvm_rd.buf_size = &buf_size;
2457 params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_READ;
2458 while (bytes_left > 0) {
2459 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
2460 MAX_I2C_TRANSACTION_SIZE);
2461 params.nvm_rd.buf = (u32 *)(p_buf + offset);
2462 params.nvm_common.offset &=
2463 (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
2464 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
2465 params.nvm_common.offset |=
2467 DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT);
2468 params.nvm_common.offset |=
2469 (bytes_to_copy << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT);
2470 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2471 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
2472 FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
2474 } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
2475 FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
2476 return ECORE_UNKNOWN_ERROR;
2478 offset += *params.nvm_rd.buf_size;
2479 bytes_left -= *params.nvm_rd.buf_size;
2482 return ECORE_SUCCESS;
2485 enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
2486 struct ecore_ptt *p_ptt,
2487 u32 port, u32 addr, u32 offset,
2490 struct ecore_mcp_nvm_params params;
2491 enum _ecore_status_t rc;
2492 u32 buf_idx, buf_size;
2494 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2495 params.nvm_common.offset =
2496 (port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) |
2497 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT);
2498 params.type = ECORE_MCP_NVM_WR;
2499 params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_WRITE;
2501 while (buf_idx < len) {
2502 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
2503 MAX_I2C_TRANSACTION_SIZE);
2504 params.nvm_common.offset &=
2505 (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
2506 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
2507 params.nvm_common.offset |=
2508 ((offset + buf_idx) <<
2509 DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT);
2510 params.nvm_common.offset |=
2511 (buf_size << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT);
2512 params.nvm_wr.buf_size = buf_size;
2513 params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
2514 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2515 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
2516 FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
2518 } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
2519 FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
2520 return ECORE_UNKNOWN_ERROR;
2522 buf_idx += buf_size;
2525 return ECORE_SUCCESS;
2528 enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
2529 struct ecore_ptt *p_ptt,
2530 u16 gpio, u32 *gpio_val)
2532 enum _ecore_status_t rc = ECORE_SUCCESS;
2533 u32 drv_mb_param = 0, rsp;
2535 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT);
2537 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ,
2538 drv_mb_param, &rsp, gpio_val);
2540 if (rc != ECORE_SUCCESS)
2543 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
2544 return ECORE_UNKNOWN_ERROR;
2546 return ECORE_SUCCESS;
2549 enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
2550 struct ecore_ptt *p_ptt,
2551 u16 gpio, u16 gpio_val)
2553 enum _ecore_status_t rc = ECORE_SUCCESS;
2554 u32 drv_mb_param = 0, param, rsp;
2556 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT) |
2557 (gpio_val << DRV_MB_PARAM_GPIO_VALUE_SHIFT);
2559 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE,
2560 drv_mb_param, &rsp, ¶m);
2562 if (rc != ECORE_SUCCESS)
2565 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
2566 return ECORE_UNKNOWN_ERROR;
2568 return ECORE_SUCCESS;
2571 enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn,
2572 struct ecore_ptt *p_ptt,
2573 u16 gpio, u32 *gpio_direction,
2576 u32 drv_mb_param = 0, rsp, val = 0;
2577 enum _ecore_status_t rc = ECORE_SUCCESS;
2579 drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT;
2581 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO,
2582 drv_mb_param, &rsp, &val);
2583 if (rc != ECORE_SUCCESS)
2586 *gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >>
2587 DRV_MB_PARAM_GPIO_DIRECTION_SHIFT;
2588 *gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >>
2589 DRV_MB_PARAM_GPIO_CTRL_SHIFT;
2591 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
2592 return ECORE_UNKNOWN_ERROR;
2594 return ECORE_SUCCESS;
2597 enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn,
2598 struct ecore_ptt *p_ptt)
2600 u32 drv_mb_param = 0, rsp, param;
2601 enum _ecore_status_t rc = ECORE_SUCCESS;
2603 drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
2604 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2606 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2607 drv_mb_param, &rsp, ¶m);
2609 if (rc != ECORE_SUCCESS)
2612 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2613 (param != DRV_MB_PARAM_BIST_RC_PASSED))
2614 rc = ECORE_UNKNOWN_ERROR;
2619 enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn,
2620 struct ecore_ptt *p_ptt)
2622 u32 drv_mb_param, rsp, param;
2623 enum _ecore_status_t rc = ECORE_SUCCESS;
2625 drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
2626 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2628 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2629 drv_mb_param, &rsp, ¶m);
2631 if (rc != ECORE_SUCCESS)
2634 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2635 (param != DRV_MB_PARAM_BIST_RC_PASSED))
2636 rc = ECORE_UNKNOWN_ERROR;
2641 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images(
2642 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *num_images)
2644 u32 drv_mb_param = 0, rsp;
2645 enum _ecore_status_t rc = ECORE_SUCCESS;
2647 drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
2648 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2650 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2651 drv_mb_param, &rsp, num_images);
2653 if (rc != ECORE_SUCCESS)
2656 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
2657 rc = ECORE_UNKNOWN_ERROR;
2662 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att(
2663 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2664 struct bist_nvm_image_att *p_image_att, u32 image_index)
2666 struct ecore_mcp_nvm_params params;
2667 enum _ecore_status_t rc;
2670 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2671 params.nvm_common.offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
2672 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2673 params.nvm_common.offset |= (image_index <<
2674 DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT);
2676 params.type = ECORE_MCP_NVM_RD;
2677 params.nvm_rd.buf_size = &buf_size;
2678 params.nvm_common.cmd = DRV_MSG_CODE_BIST_TEST;
2679 params.nvm_rd.buf = (u32 *)p_image_att;
2681 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2682 if (rc != ECORE_SUCCESS)
2685 if (((params.nvm_common.resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2686 (p_image_att->return_code != 1))
2687 rc = ECORE_UNKNOWN_ERROR;
2692 enum _ecore_status_t
2693 ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn,
2694 struct ecore_ptt *p_ptt,
2695 struct ecore_temperature_info *p_temp_info)
2697 struct ecore_temperature_sensor *p_temp_sensor;
2698 struct temperature_status_stc *p_mfw_temp_info;
2699 struct ecore_mcp_mb_params mb_params;
2700 union drv_union_data union_data;
2702 enum _ecore_status_t rc;
2705 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2706 mb_params.cmd = DRV_MSG_CODE_GET_TEMPERATURE;
2707 mb_params.p_data_dst = &union_data;
2708 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2709 if (rc != ECORE_SUCCESS)
2712 p_mfw_temp_info = &union_data.temp_info;
2714 OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS != MAX_NUM_OF_SENSORS);
2715 p_temp_info->num_sensors = OSAL_MIN_T(u32,
2716 p_mfw_temp_info->num_of_sensors,
2717 ECORE_MAX_NUM_OF_SENSORS);
2718 for (i = 0; i < p_temp_info->num_sensors; i++) {
2719 val = p_mfw_temp_info->sensor[i];
2720 p_temp_sensor = &p_temp_info->sensors[i];
2721 p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >>
2722 SENSOR_LOCATION_SHIFT;
2723 p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >>
2724 THRESHOLD_HIGH_SHIFT;
2725 p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >>
2726 CRITICAL_TEMPERATURE_SHIFT;
2727 p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >>
2731 return ECORE_SUCCESS;
2734 enum _ecore_status_t ecore_mcp_get_mba_versions(
2735 struct ecore_hwfn *p_hwfn,
2736 struct ecore_ptt *p_ptt,
2737 struct ecore_mba_vers *p_mba_vers)
2739 struct ecore_mcp_nvm_params params;
2740 enum _ecore_status_t rc;
2743 OSAL_MEM_ZERO(¶ms, sizeof(params));
2744 params.type = ECORE_MCP_NVM_RD;
2745 params.nvm_common.cmd = DRV_MSG_CODE_GET_MBA_VERSION;
2746 params.nvm_common.offset = 0;
2747 params.nvm_rd.buf = &p_mba_vers->mba_vers[0];
2748 params.nvm_rd.buf_size = &buf_size;
2749 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2751 if (rc != ECORE_SUCCESS)
2754 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
2756 rc = ECORE_UNKNOWN_ERROR;
2758 if (buf_size != MCP_DRV_NVM_BUF_LEN)
2759 rc = ECORE_UNKNOWN_ERROR;
2764 enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
2765 struct ecore_ptt *p_ptt,
2770 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MEM_ECC_EVENTS,
2771 0, &rsp, (u32 *)num_events);
2774 static enum resource_id_enum
2775 ecore_mcp_get_mfw_res_id(enum ecore_resources res_id)
2777 enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
2781 mfw_res_id = RESOURCE_NUM_SB_E;
2783 case ECORE_L2_QUEUE:
2784 mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
2787 mfw_res_id = RESOURCE_NUM_VPORT_E;
2790 mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
2793 mfw_res_id = RESOURCE_NUM_PQ_E;
2796 mfw_res_id = RESOURCE_NUM_RL_E;
2800 /* Each VFC resource can accommodate both a MAC and a VLAN */
2801 mfw_res_id = RESOURCE_VFC_FILTER_E;
2804 mfw_res_id = RESOURCE_ILT_E;
2806 case ECORE_LL2_QUEUE:
2807 mfw_res_id = RESOURCE_LL2_QUEUE_E;
2809 case ECORE_RDMA_CNQ_RAM:
2810 case ECORE_CMDQS_CQS:
2811 /* CNQ/CMDQS are the same resource */
2812 mfw_res_id = RESOURCE_CQS_E;
2814 case ECORE_RDMA_STATS_QUEUE:
2815 mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
2818 mfw_res_id = RESOURCE_BDQ_E;
2827 #define ECORE_RESC_ALLOC_VERSION_MAJOR 2
2828 #define ECORE_RESC_ALLOC_VERSION_MINOR 0
2829 #define ECORE_RESC_ALLOC_VERSION \
2830 ((ECORE_RESC_ALLOC_VERSION_MAJOR << \
2831 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
2832 (ECORE_RESC_ALLOC_VERSION_MINOR << \
2833 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
2835 struct ecore_resc_alloc_in_params {
2837 enum ecore_resources res_id;
2841 struct ecore_resc_alloc_out_params {
2851 static enum _ecore_status_t
2852 ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn,
2853 struct ecore_ptt *p_ptt,
2854 struct ecore_resc_alloc_in_params *p_in_params,
2855 struct ecore_resc_alloc_out_params *p_out_params)
2857 struct resource_info *p_mfw_resc_info;
2858 struct ecore_mcp_mb_params mb_params;
2859 union drv_union_data union_data;
2860 enum _ecore_status_t rc;
2862 p_mfw_resc_info = &union_data.resource;
2863 OSAL_MEM_ZERO(p_mfw_resc_info, sizeof(*p_mfw_resc_info));
2865 p_mfw_resc_info->res_id = ecore_mcp_get_mfw_res_id(p_in_params->res_id);
2866 if (p_mfw_resc_info->res_id == RESOURCE_NUM_INVALID) {
2868 "Failed to match resource %d [%s] with the MFW resources\n",
2869 p_in_params->res_id,
2870 ecore_hw_get_resc_name(p_in_params->res_id));
2874 switch (p_in_params->cmd) {
2875 case DRV_MSG_SET_RESOURCE_VALUE_MSG:
2876 p_mfw_resc_info->size = p_in_params->resc_max_val;
2878 case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
2881 DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
2886 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2887 mb_params.cmd = p_in_params->cmd;
2888 mb_params.param = ECORE_RESC_ALLOC_VERSION;
2889 mb_params.p_data_src = &union_data;
2890 mb_params.p_data_dst = &union_data;
2892 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2893 "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
2894 p_in_params->cmd, p_in_params->res_id,
2895 ecore_hw_get_resc_name(p_in_params->res_id),
2896 ECORE_MFW_GET_FIELD(mb_params.param,
2897 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
2898 ECORE_MFW_GET_FIELD(mb_params.param,
2899 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
2900 p_in_params->resc_max_val);
2902 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2903 if (rc != ECORE_SUCCESS)
2906 p_out_params->mcp_resp = mb_params.mcp_resp;
2907 p_out_params->mcp_param = mb_params.mcp_param;
2908 p_out_params->resc_num = p_mfw_resc_info->size;
2909 p_out_params->resc_start = p_mfw_resc_info->offset;
2910 p_out_params->vf_resc_num = p_mfw_resc_info->vf_size;
2911 p_out_params->vf_resc_start = p_mfw_resc_info->vf_offset;
2912 p_out_params->flags = p_mfw_resc_info->flags;
2914 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2915 "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
2916 ECORE_MFW_GET_FIELD(p_out_params->mcp_param,
2917 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
2918 ECORE_MFW_GET_FIELD(p_out_params->mcp_param,
2919 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
2920 p_out_params->resc_num, p_out_params->resc_start,
2921 p_out_params->vf_resc_num, p_out_params->vf_resc_start,
2922 p_out_params->flags);
2924 return ECORE_SUCCESS;
2927 enum _ecore_status_t
2928 ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2929 enum ecore_resources res_id, u32 resc_max_val,
2932 struct ecore_resc_alloc_out_params out_params;
2933 struct ecore_resc_alloc_in_params in_params;
2934 enum _ecore_status_t rc;
2936 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
2937 in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
2938 in_params.res_id = res_id;
2939 in_params.resc_max_val = resc_max_val;
2940 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
2941 rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
2943 if (rc != ECORE_SUCCESS)
2946 *p_mcp_resp = out_params.mcp_resp;
2948 return ECORE_SUCCESS;
2951 enum _ecore_status_t
2952 ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2953 enum ecore_resources res_id, u32 *p_mcp_resp,
2954 u32 *p_resc_num, u32 *p_resc_start)
2956 struct ecore_resc_alloc_out_params out_params;
2957 struct ecore_resc_alloc_in_params in_params;
2958 enum _ecore_status_t rc;
2960 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
2961 in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
2962 in_params.res_id = res_id;
2963 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
2964 rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
2966 if (rc != ECORE_SUCCESS)
2969 *p_mcp_resp = out_params.mcp_resp;
2971 if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
2972 *p_resc_num = out_params.resc_num;
2973 *p_resc_start = out_params.resc_start;
2976 return ECORE_SUCCESS;
2979 enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn,
2980 struct ecore_ptt *p_ptt)
2982 u32 mcp_resp, mcp_param;
2984 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
2985 &mcp_resp, &mcp_param);
2988 static enum _ecore_status_t ecore_mcp_resource_cmd(struct ecore_hwfn *p_hwfn,
2989 struct ecore_ptt *p_ptt,
2990 u32 param, u32 *p_mcp_resp,
2993 enum _ecore_status_t rc;
2995 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
2996 p_mcp_resp, p_mcp_param);
2997 if (rc != ECORE_SUCCESS)
3000 if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3002 "The resource command is unsupported by the MFW\n");
3003 return ECORE_NOTIMPL;
3006 if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
3007 u8 opcode = ECORE_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
3009 DP_NOTICE(p_hwfn, false,
3010 "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
3018 enum _ecore_status_t
3019 __ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3020 struct ecore_resc_lock_params *p_params)
3022 u32 param = 0, mcp_resp, mcp_param;
3024 enum _ecore_status_t rc;
3026 switch (p_params->timeout) {
3027 case ECORE_MCP_RESC_LOCK_TO_DEFAULT:
3028 opcode = RESOURCE_OPCODE_REQ;
3029 p_params->timeout = 0;
3031 case ECORE_MCP_RESC_LOCK_TO_NONE:
3032 opcode = RESOURCE_OPCODE_REQ_WO_AGING;
3033 p_params->timeout = 0;
3036 opcode = RESOURCE_OPCODE_REQ_W_AGING;
3040 ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3041 ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3042 ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
3044 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3045 "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
3046 param, p_params->timeout, opcode, p_params->resource);
3048 /* Attempt to acquire the resource */
3049 rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
3051 if (rc != ECORE_SUCCESS)
3054 /* Analyze the response */
3055 p_params->owner = ECORE_MFW_GET_FIELD(mcp_param,
3056 RESOURCE_CMD_RSP_OWNER);
3057 opcode = ECORE_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3059 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3060 "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
3061 mcp_param, opcode, p_params->owner);
3064 case RESOURCE_OPCODE_GNT:
3065 p_params->b_granted = true;
3067 case RESOURCE_OPCODE_BUSY:
3068 p_params->b_granted = false;
3071 DP_NOTICE(p_hwfn, false,
3072 "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
3077 return ECORE_SUCCESS;
3080 enum _ecore_status_t
3081 ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3082 struct ecore_resc_lock_params *p_params)
3085 enum _ecore_status_t rc;
3088 /* No need for an interval before the first iteration */
3090 if (p_params->sleep_b4_retry) {
3091 u16 retry_interval_in_ms =
3092 DIV_ROUND_UP(p_params->retry_interval,
3095 OSAL_MSLEEP(retry_interval_in_ms);
3097 OSAL_UDELAY(p_params->retry_interval);
3101 rc = __ecore_mcp_resc_lock(p_hwfn, p_ptt, p_params);
3102 if (rc != ECORE_SUCCESS)
3105 if (p_params->b_granted)
3107 } while (retry_cnt++ < p_params->retry_num);
3109 return ECORE_SUCCESS;
3112 enum _ecore_status_t
3113 ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3114 struct ecore_resc_unlock_params *p_params)
3116 u32 param = 0, mcp_resp, mcp_param;
3118 enum _ecore_status_t rc;
3120 opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
3121 : RESOURCE_OPCODE_RELEASE;
3122 ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3123 ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3125 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3126 "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
3127 param, opcode, p_params->resource);
3129 /* Attempt to release the resource */
3130 rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
3132 if (rc != ECORE_SUCCESS)
3135 /* Analyze the response */
3136 opcode = ECORE_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3138 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3139 "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
3143 case RESOURCE_OPCODE_RELEASED_PREVIOUS:
3145 "Resource unlock request for an already released resource [%d]\n",
3146 p_params->resource);
3148 case RESOURCE_OPCODE_RELEASED:
3149 p_params->b_released = true;
3151 case RESOURCE_OPCODE_WRONG_OWNER:
3152 p_params->b_released = false;
3155 DP_NOTICE(p_hwfn, false,
3156 "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
3161 return ECORE_SUCCESS;