2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
11 #include "ecore_status.h"
12 #include "ecore_mcp.h"
13 #include "mcp_public.h"
16 #include "ecore_init_fw_funcs.h"
17 #include "ecore_sriov.h"
19 #include "ecore_iov_api.h"
20 #include "ecore_gtt_reg_addr.h"
21 #include "ecore_iro.h"
22 #include "ecore_dcbx.h"
24 #define CHIP_MCP_RESP_ITER_US 10
25 #define EMUL_MCP_RESP_ITER_US (1000 * 1000)
27 #define ECORE_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
28 #define ECORE_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
30 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
31 ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
34 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
35 ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
37 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
38 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
39 OFFSETOF(struct public_drv_mb, _field), _val)
41 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
42 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
43 OFFSETOF(struct public_drv_mb, _field))
45 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
46 DRV_ID_PDA_COMP_VER_SHIFT)
48 #define MCP_BYTES_PER_MBIT_SHIFT 17
52 static int loaded_port[MAX_NUM_PORTS] = { 0 };
55 bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn)
57 if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
62 void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
64 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
66 u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr);
68 p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
70 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
71 "port_addr = 0x%x, port_id 0x%02x\n",
72 p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
75 void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
77 u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
82 if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev))
86 if (!p_hwfn->mcp_info->public_base)
89 for (i = 0; i < length; i++) {
90 tmp = ecore_rd(p_hwfn, p_ptt,
91 p_hwfn->mcp_info->mfw_mb_addr +
92 (i << 2) + sizeof(u32));
94 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
95 OSAL_BE32_TO_CPU(tmp);
99 enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
101 if (p_hwfn->mcp_info) {
102 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
103 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
104 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->lock);
106 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
108 return ECORE_SUCCESS;
111 static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
112 struct ecore_ptt *p_ptt)
114 struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
115 u32 drv_mb_offsize, mfw_mb_offsize;
116 u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
119 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
120 DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n");
121 p_info->public_base = 0;
126 p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
127 if (!p_info->public_base)
130 p_info->public_base |= GRCBASE_MCP;
132 /* Calculate the driver and MFW mailbox address */
133 drv_mb_offsize = ecore_rd(p_hwfn, p_ptt,
134 SECTION_OFFSIZE_ADDR(p_info->public_base,
136 p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
137 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
138 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x"
139 " mcp_pf_id = 0x%x\n",
140 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
142 /* Set the MFW MB address */
143 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
144 SECTION_OFFSIZE_ADDR(p_info->public_base,
146 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
147 p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
148 p_info->mfw_mb_addr);
150 /* Get the current driver mailbox sequence before sending
153 p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
154 DRV_MSG_SEQ_NUMBER_MASK;
156 /* Get current FW pulse sequence */
157 p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
160 p_info->mcp_hist = (u16)ecore_rd(p_hwfn, p_ptt,
161 MISCS_REG_GENERIC_POR_0);
163 return ECORE_SUCCESS;
166 enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
167 struct ecore_ptt *p_ptt)
169 struct ecore_mcp_info *p_info;
172 /* Allocate mcp_info structure */
173 p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
174 sizeof(*p_hwfn->mcp_info));
175 if (!p_hwfn->mcp_info)
177 p_info = p_hwfn->mcp_info;
179 if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
180 DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
181 /* Do not free mcp_info here, since public_base indicate that
182 * the MCP is not initialized
184 return ECORE_SUCCESS;
187 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
188 p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
189 p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
190 if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
193 /* Initialize the MFW spinlock */
194 OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->lock);
195 OSAL_SPIN_LOCK_INIT(&p_info->lock);
197 return ECORE_SUCCESS;
200 DP_NOTICE(p_hwfn, true, "Failed to allocate mcp memory\n");
201 ecore_mcp_free(p_hwfn);
205 /* Locks the MFW mailbox of a PF to ensure a single access.
206 * The lock is achieved in most cases by holding a spinlock, causing other
207 * threads to wait till a previous access is done.
208 * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single
209 * access is achieved by setting a blocking flag, which will fail other
210 * competing contexts to send their mailboxes.
212 static enum _ecore_status_t ecore_mcp_mb_lock(struct ecore_hwfn *p_hwfn,
215 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
217 /* The spinlock shouldn't be acquired when the mailbox command is
218 * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel
219 * pending [UN]LOAD_REQ command of another PF together with a spinlock
220 * (i.e. interrupts are disabled) - can lead to a deadlock.
221 * It is assumed that for a single PF, no other mailbox commands can be
222 * sent from another context while sending LOAD_REQ, and that any
223 * parallel commands to UNLOAD_REQ can be cancelled.
225 if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE)
226 p_hwfn->mcp_info->block_mb_sending = false;
228 if (p_hwfn->mcp_info->block_mb_sending) {
229 DP_NOTICE(p_hwfn, false,
230 "Trying to send a MFW mailbox command [0x%x]"
231 " in parallel to [UN]LOAD_REQ. Aborting.\n",
233 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
237 if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) {
238 p_hwfn->mcp_info->block_mb_sending = true;
239 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
242 return ECORE_SUCCESS;
245 static void ecore_mcp_mb_unlock(struct ecore_hwfn *p_hwfn, u32 cmd)
247 if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
248 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
251 enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
252 struct ecore_ptt *p_ptt)
254 u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
255 u32 delay = CHIP_MCP_RESP_ITER_US;
256 u32 org_mcp_reset_seq, cnt = 0;
257 enum _ecore_status_t rc = ECORE_SUCCESS;
260 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
261 delay = EMUL_MCP_RESP_ITER_US;
264 /* Ensure that only a single thread is accessing the mailbox at a
267 rc = ecore_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
268 if (rc != ECORE_SUCCESS)
271 /* Set drv command along with the updated sequence */
272 org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
273 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
276 /* Wait for MFW response */
278 /* Give the FW up to 500 second (50*1000*10usec) */
279 } while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt,
280 MISCS_REG_GENERIC_POR_0)) &&
281 (cnt++ < ECORE_MCP_RESET_RETRIES));
283 if (org_mcp_reset_seq !=
284 ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
285 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
286 "MCP was reset after %d usec\n", cnt * delay);
288 DP_ERR(p_hwfn, "Failed to reset MCP\n");
292 ecore_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
297 static enum _ecore_status_t ecore_do_mcp_cmd(struct ecore_hwfn *p_hwfn,
298 struct ecore_ptt *p_ptt,
303 u32 delay = CHIP_MCP_RESP_ITER_US;
304 u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
305 u32 seq, cnt = 1, actual_mb_seq;
306 enum _ecore_status_t rc = ECORE_SUCCESS;
309 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
310 delay = EMUL_MCP_RESP_ITER_US;
311 /* There is a built-in delay of 100usec in each MFW response read */
312 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
316 /* Get actual driver mailbox sequence */
317 actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
318 DRV_MSG_SEQ_NUMBER_MASK;
320 /* Use MCP history register to check if MCP reset occurred between
323 if (p_hwfn->mcp_info->mcp_hist !=
324 ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
325 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Rereading MCP offsets\n");
326 ecore_load_mcp_offsets(p_hwfn, p_ptt);
327 ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
329 seq = ++p_hwfn->mcp_info->drv_mb_seq;
332 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
334 /* Set drv command along with the updated sequence */
335 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
338 /* Wait for MFW response */
340 *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
342 /* Give the FW up to 5 second (500*10ms) */
343 } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
344 (cnt++ < max_retries));
346 /* Is this a reply to our command? */
347 if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
348 *o_mcp_resp &= FW_MSG_CODE_MASK;
349 /* Get the MCP param */
350 *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
353 DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
357 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
362 static enum _ecore_status_t
363 ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
364 struct ecore_ptt *p_ptt,
365 struct ecore_mcp_mb_params *p_mb_params)
367 union drv_union_data union_data;
369 enum _ecore_status_t rc;
371 /* MCP not initialized */
372 if (!ecore_mcp_is_init(p_hwfn)) {
373 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
377 if (p_mb_params->data_src_size > sizeof(union_data) ||
378 p_mb_params->data_dst_size > sizeof(union_data)) {
380 "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
381 p_mb_params->data_src_size, p_mb_params->data_dst_size,
386 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
387 OFFSETOF(struct public_drv_mb, union_data);
389 /* Ensure that only a single thread is accessing the mailbox at a
392 rc = ecore_mcp_mb_lock(p_hwfn, p_mb_params->cmd);
393 if (rc != ECORE_SUCCESS)
396 OSAL_MEM_ZERO(&union_data, sizeof(union_data));
397 if (p_mb_params->p_data_src != OSAL_NULL && p_mb_params->data_src_size)
398 OSAL_MEMCPY(&union_data, p_mb_params->p_data_src,
399 p_mb_params->data_src_size);
400 ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
403 rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
404 p_mb_params->param, &p_mb_params->mcp_resp,
405 &p_mb_params->mcp_param);
407 if (p_mb_params->p_data_dst != OSAL_NULL &&
408 p_mb_params->data_dst_size)
409 ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
410 union_data_addr, p_mb_params->data_dst_size);
412 ecore_mcp_mb_unlock(p_hwfn, p_mb_params->cmd);
417 enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
418 struct ecore_ptt *p_ptt, u32 cmd, u32 param,
419 u32 *o_mcp_resp, u32 *o_mcp_param)
421 struct ecore_mcp_mb_params mb_params;
422 enum _ecore_status_t rc;
425 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
426 if (cmd == DRV_MSG_CODE_UNLOAD_REQ) {
428 loaded_port[p_hwfn->port_id]--;
429 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n",
432 return ECORE_SUCCESS;
436 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
438 mb_params.param = param;
439 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
440 if (rc != ECORE_SUCCESS)
443 *o_mcp_resp = mb_params.mcp_resp;
444 *o_mcp_param = mb_params.mcp_param;
446 return ECORE_SUCCESS;
449 enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
450 struct ecore_ptt *p_ptt,
455 u32 i_txn_size, u32 *i_buf)
457 struct ecore_mcp_mb_params mb_params;
458 enum _ecore_status_t rc;
460 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
462 mb_params.param = param;
463 mb_params.p_data_src = i_buf;
464 mb_params.data_src_size = (u8)i_txn_size;
465 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
466 if (rc != ECORE_SUCCESS)
469 *o_mcp_resp = mb_params.mcp_resp;
470 *o_mcp_param = mb_params.mcp_param;
472 return ECORE_SUCCESS;
475 enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
476 struct ecore_ptt *p_ptt,
481 u32 *o_txn_size, u32 *o_buf)
483 struct ecore_mcp_mb_params mb_params;
484 u8 raw_data[MCP_DRV_NVM_BUF_LEN];
485 enum _ecore_status_t rc;
487 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
489 mb_params.param = param;
490 mb_params.p_data_dst = raw_data;
492 /* Use the maximal value since the actual one is part of the response */
493 mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
495 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
496 if (rc != ECORE_SUCCESS)
499 *o_mcp_resp = mb_params.mcp_resp;
500 *o_mcp_param = mb_params.mcp_param;
502 *o_txn_size = *o_mcp_param;
504 OSAL_MEMCPY(o_buf, raw_data, RTE_MIN(*o_txn_size, MCP_DRV_NVM_BUF_LEN));
506 return ECORE_SUCCESS;
510 static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
513 static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
516 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
517 else if (!loaded_port[p_hwfn->port_id])
518 load_phase = FW_MSG_CODE_DRV_LOAD_PORT;
520 load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
522 /* On CMT, always tell that it's engine */
523 if (p_hwfn->p_dev->num_hwfns > 1)
524 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
526 *p_load_code = load_phase;
528 loaded_port[p_hwfn->port_id]++;
530 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
531 "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n",
532 *p_load_code, loaded, p_hwfn->port_id,
533 loaded_port[p_hwfn->port_id]);
537 static bool ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role)
539 return (drv_role == DRV_ROLE_OS &&
540 exist_drv_role == DRV_ROLE_PREBOOT) ||
541 (drv_role == DRV_ROLE_KDUMP && exist_drv_role == DRV_ROLE_OS);
544 static enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn,
545 struct ecore_ptt *p_ptt)
547 u32 resp = 0, param = 0;
548 enum _ecore_status_t rc;
550 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
552 if (rc != ECORE_SUCCESS)
553 DP_NOTICE(p_hwfn, false,
554 "Failed to send cancel load request, rc = %d\n", rc);
559 #define CONFIG_ECORE_L2_BITMAP_IDX (0x1 << 0)
560 #define CONFIG_ECORE_SRIOV_BITMAP_IDX (0x1 << 1)
561 #define CONFIG_ECORE_ROCE_BITMAP_IDX (0x1 << 2)
562 #define CONFIG_ECORE_IWARP_BITMAP_IDX (0x1 << 3)
563 #define CONFIG_ECORE_FCOE_BITMAP_IDX (0x1 << 4)
564 #define CONFIG_ECORE_ISCSI_BITMAP_IDX (0x1 << 5)
565 #define CONFIG_ECORE_LL2_BITMAP_IDX (0x1 << 6)
567 static u32 ecore_get_config_bitmap(void)
569 u32 config_bitmap = 0x0;
571 #ifdef CONFIG_ECORE_L2
572 config_bitmap |= CONFIG_ECORE_L2_BITMAP_IDX;
574 #ifdef CONFIG_ECORE_SRIOV
575 config_bitmap |= CONFIG_ECORE_SRIOV_BITMAP_IDX;
577 #ifdef CONFIG_ECORE_ROCE
578 config_bitmap |= CONFIG_ECORE_ROCE_BITMAP_IDX;
580 #ifdef CONFIG_ECORE_IWARP
581 config_bitmap |= CONFIG_ECORE_IWARP_BITMAP_IDX;
583 #ifdef CONFIG_ECORE_FCOE
584 config_bitmap |= CONFIG_ECORE_FCOE_BITMAP_IDX;
586 #ifdef CONFIG_ECORE_ISCSI
587 config_bitmap |= CONFIG_ECORE_ISCSI_BITMAP_IDX;
589 #ifdef CONFIG_ECORE_LL2
590 config_bitmap |= CONFIG_ECORE_LL2_BITMAP_IDX;
593 return config_bitmap;
596 struct ecore_load_req_in_params {
598 #define ECORE_LOAD_REQ_HSI_VER_DEFAULT 0
599 #define ECORE_LOAD_REQ_HSI_VER_1 1
606 bool avoid_eng_reset;
609 struct ecore_load_req_out_params {
619 static enum _ecore_status_t
620 __ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
621 struct ecore_load_req_in_params *p_in_params,
622 struct ecore_load_req_out_params *p_out_params)
624 struct ecore_mcp_mb_params mb_params;
625 struct load_req_stc load_req;
626 struct load_rsp_stc load_rsp;
628 enum _ecore_status_t rc;
630 OSAL_MEM_ZERO(&load_req, sizeof(load_req));
631 load_req.drv_ver_0 = p_in_params->drv_ver_0;
632 load_req.drv_ver_1 = p_in_params->drv_ver_1;
633 load_req.fw_ver = p_in_params->fw_ver;
634 ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_ROLE,
635 p_in_params->drv_role);
636 ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
637 p_in_params->timeout_val);
638 ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FORCE,
639 p_in_params->force_cmd);
640 ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
641 p_in_params->avoid_eng_reset);
643 hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ?
644 DRV_ID_MCP_HSI_VER_CURRENT :
645 (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT);
647 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
648 mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
649 mb_params.param = PDA_COMP | hsi_ver | p_hwfn->p_dev->drv_type;
650 mb_params.p_data_src = &load_req;
651 mb_params.data_src_size = sizeof(load_req);
652 mb_params.p_data_dst = &load_rsp;
653 mb_params.data_dst_size = sizeof(load_rsp);
655 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
656 "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
658 ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
659 ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
660 ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
661 ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
663 if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1)
664 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
665 "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
666 load_req.drv_ver_0, load_req.drv_ver_1,
667 load_req.fw_ver, load_req.misc0,
668 ECORE_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_ROLE),
669 ECORE_MFW_GET_FIELD(load_req.misc0,
671 ECORE_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FORCE),
672 ECORE_MFW_GET_FIELD(load_req.misc0,
675 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
676 if (rc != ECORE_SUCCESS) {
677 DP_NOTICE(p_hwfn, false,
678 "Failed to send load request, rc = %d\n", rc);
682 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
683 "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
684 p_out_params->load_code = mb_params.mcp_resp;
686 if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
687 p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
688 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
689 "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
690 load_rsp.drv_ver_0, load_rsp.drv_ver_1,
691 load_rsp.fw_ver, load_rsp.misc0,
692 ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
693 ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
694 ECORE_MFW_GET_FIELD(load_rsp.misc0,
697 p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
698 p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
699 p_out_params->exist_fw_ver = load_rsp.fw_ver;
700 p_out_params->exist_drv_role =
701 ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
702 p_out_params->mfw_hsi_ver =
703 ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
704 p_out_params->drv_exists =
705 ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
706 LOAD_RSP_FLAGS0_DRV_EXISTS;
709 return ECORE_SUCCESS;
712 static enum _ecore_status_t eocre_get_mfw_drv_role(struct ecore_hwfn *p_hwfn,
713 enum ecore_drv_role drv_role,
717 case ECORE_DRV_ROLE_OS:
718 *p_mfw_drv_role = DRV_ROLE_OS;
720 case ECORE_DRV_ROLE_KDUMP:
721 *p_mfw_drv_role = DRV_ROLE_KDUMP;
724 DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role);
728 return ECORE_SUCCESS;
731 enum ecore_load_req_force {
732 ECORE_LOAD_REQ_FORCE_NONE,
733 ECORE_LOAD_REQ_FORCE_PF,
734 ECORE_LOAD_REQ_FORCE_ALL,
737 static enum _ecore_status_t
738 ecore_get_mfw_force_cmd(struct ecore_hwfn *p_hwfn,
739 enum ecore_load_req_force force_cmd,
743 case ECORE_LOAD_REQ_FORCE_NONE:
744 *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
746 case ECORE_LOAD_REQ_FORCE_PF:
747 *p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
749 case ECORE_LOAD_REQ_FORCE_ALL:
750 *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
753 DP_ERR(p_hwfn, "Unexpected force value %d\n", force_cmd);
757 return ECORE_SUCCESS;
760 enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
761 struct ecore_ptt *p_ptt,
762 struct ecore_load_req_params *p_params)
764 struct ecore_load_req_out_params out_params;
765 struct ecore_load_req_in_params in_params;
766 u8 mfw_drv_role, mfw_force_cmd;
767 enum _ecore_status_t rc;
770 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
771 ecore_mcp_mf_workaround(p_hwfn, &p_params->load_code);
772 return ECORE_SUCCESS;
776 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
777 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT;
778 in_params.drv_ver_0 = ECORE_VERSION;
779 in_params.drv_ver_1 = ecore_get_config_bitmap();
780 in_params.fw_ver = STORM_FW_VERSION;
781 rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
782 if (rc != ECORE_SUCCESS)
785 in_params.drv_role = mfw_drv_role;
786 in_params.timeout_val = p_params->timeout_val;
787 rc = ecore_get_mfw_force_cmd(p_hwfn, ECORE_LOAD_REQ_FORCE_NONE,
789 if (rc != ECORE_SUCCESS)
792 in_params.force_cmd = mfw_force_cmd;
793 in_params.avoid_eng_reset = p_params->avoid_eng_reset;
795 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
796 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
797 if (rc != ECORE_SUCCESS)
800 /* First handle cases where another load request should/might be sent:
801 * - MFW expects the old interface [HSI version = 1]
802 * - MFW responds that a force load request is required
804 if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
806 "MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n");
808 /* The previous load request set the mailbox blocking */
809 p_hwfn->mcp_info->block_mb_sending = false;
811 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1;
812 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
813 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
815 if (rc != ECORE_SUCCESS)
817 } else if (out_params.load_code ==
818 FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
819 /* The previous load request set the mailbox blocking */
820 p_hwfn->mcp_info->block_mb_sending = false;
822 if (ecore_mcp_can_force_load(in_params.drv_role,
823 out_params.exist_drv_role)) {
825 "A force load is required [existing: role %d, fw_ver 0x%08x, drv_ver 0x%08x_0x%08x]. Sending a force load request.\n",
826 out_params.exist_drv_role,
827 out_params.exist_fw_ver,
828 out_params.exist_drv_ver_0,
829 out_params.exist_drv_ver_1);
831 rc = ecore_get_mfw_force_cmd(p_hwfn,
832 ECORE_LOAD_REQ_FORCE_ALL,
834 if (rc != ECORE_SUCCESS)
837 in_params.force_cmd = mfw_force_cmd;
838 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
839 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
841 if (rc != ECORE_SUCCESS)
844 DP_NOTICE(p_hwfn, false,
845 "A force load is required [existing: role %d, fw_ver 0x%08x, drv_ver 0x%08x_0x%08x]. Avoiding to prevent disruption of active PFs.\n",
846 out_params.exist_drv_role,
847 out_params.exist_fw_ver,
848 out_params.exist_drv_ver_0,
849 out_params.exist_drv_ver_1);
851 ecore_mcp_cancel_load_req(p_hwfn, p_ptt);
856 /* Now handle the other types of responses.
857 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
858 * expected here after the additional revised load requests were sent.
860 switch (out_params.load_code) {
861 case FW_MSG_CODE_DRV_LOAD_ENGINE:
862 case FW_MSG_CODE_DRV_LOAD_PORT:
863 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
864 if (out_params.mfw_hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
865 out_params.drv_exists) {
866 /* The role and fw/driver version match, but the PF is
867 * already loaded and has not been unloaded gracefully.
868 * This is unexpected since a quasi-FLR request was
869 * previously sent as part of ecore_hw_prepare().
871 DP_NOTICE(p_hwfn, false,
872 "PF is already loaded - shouldn't have got here since a quasi-FLR request was previously sent!\n");
876 case FW_MSG_CODE_DRV_LOAD_REFUSED_PDA:
877 case FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG:
878 case FW_MSG_CODE_DRV_LOAD_REFUSED_HSI:
879 case FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT:
880 DP_NOTICE(p_hwfn, false,
881 "MFW refused a load request [resp 0x%08x]. Aborting.\n",
882 out_params.load_code);
885 DP_NOTICE(p_hwfn, false,
886 "Unexpected response to load request [resp 0x%08x]. Aborting.\n",
887 out_params.load_code);
891 p_params->load_code = out_params.load_code;
893 return ECORE_SUCCESS;
896 enum _ecore_status_t ecore_mcp_unload_done(struct ecore_hwfn *p_hwfn,
897 struct ecore_ptt *p_ptt)
899 struct ecore_mcp_mb_params mb_params;
900 struct mcp_mac wol_mac;
902 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
903 mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
905 return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
908 static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
909 struct ecore_ptt *p_ptt)
911 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
913 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
914 u32 path_addr = SECTION_ADDR(mfw_path_offsize,
915 ECORE_PATH_ID(p_hwfn));
916 u32 disabled_vfs[VF_MAX_STATIC / 32];
919 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
920 "Reading Disabled VF information from [offset %08x],"
922 mfw_path_offsize, path_addr);
924 for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
925 disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,
927 OFFSETOF(struct public_path,
930 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
931 "FLR-ed VFs [%08x,...,%08x] - %08x\n",
932 i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
935 if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs))
936 OSAL_VF_FLR_UPDATE(p_hwfn);
939 enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
940 struct ecore_ptt *p_ptt,
943 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
945 u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);
946 u32 func_addr = SECTION_ADDR(mfw_func_offsize,
948 struct ecore_mcp_mb_params mb_params;
949 enum _ecore_status_t rc;
952 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
953 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
954 "Acking VFs [%08x,...,%08x] - %08x\n",
955 i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
957 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
958 mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
959 mb_params.p_data_src = vfs_to_ack;
960 mb_params.data_src_size = VF_MAX_STATIC / 8;
961 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt,
963 if (rc != ECORE_SUCCESS) {
964 DP_NOTICE(p_hwfn, false,
965 "Failed to pass ACK for VF flr to MFW\n");
966 return ECORE_TIMEOUT;
969 /* TMP - clear the ACK bits; should be done by MFW */
970 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
971 ecore_wr(p_hwfn, p_ptt,
973 OFFSETOF(struct public_func, drv_ack_vf_disabled) +
979 static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
980 struct ecore_ptt *p_ptt)
982 u32 transceiver_state;
984 transceiver_state = ecore_rd(p_hwfn, p_ptt,
985 p_hwfn->mcp_info->port_addr +
986 OFFSETOF(struct public_port,
989 DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP),
990 "Received transceiver state update [0x%08x] from mfw"
992 transceiver_state, (u32)(p_hwfn->mcp_info->port_addr +
993 OFFSETOF(struct public_port,
996 transceiver_state = GET_FIELD(transceiver_state, ETH_TRANSCEIVER_STATE);
998 if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
999 DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
1001 DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
1004 static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
1005 struct ecore_ptt *p_ptt,
1008 struct ecore_mcp_link_state *p_link;
1012 p_link = &p_hwfn->mcp_info->link_output;
1013 OSAL_MEMSET(p_link, 0, sizeof(*p_link));
1015 status = ecore_rd(p_hwfn, p_ptt,
1016 p_hwfn->mcp_info->port_addr +
1017 OFFSETOF(struct public_port, link_status));
1018 DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP),
1019 "Received link update [0x%08x] from mfw"
1021 status, (u32)(p_hwfn->mcp_info->port_addr +
1022 OFFSETOF(struct public_port,
1025 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1026 "Resetting link indications\n");
1030 if (p_hwfn->b_drv_link_init)
1031 p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1033 p_link->link_up = false;
1035 p_link->full_duplex = true;
1036 switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
1037 case LINK_STATUS_SPEED_AND_DUPLEX_100G:
1038 p_link->speed = 100000;
1040 case LINK_STATUS_SPEED_AND_DUPLEX_50G:
1041 p_link->speed = 50000;
1043 case LINK_STATUS_SPEED_AND_DUPLEX_40G:
1044 p_link->speed = 40000;
1046 case LINK_STATUS_SPEED_AND_DUPLEX_25G:
1047 p_link->speed = 25000;
1049 case LINK_STATUS_SPEED_AND_DUPLEX_20G:
1050 p_link->speed = 20000;
1052 case LINK_STATUS_SPEED_AND_DUPLEX_10G:
1053 p_link->speed = 10000;
1055 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
1056 p_link->full_duplex = false;
1058 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
1059 p_link->speed = 1000;
1065 /* We never store total line speed as p_link->speed is
1066 * again changes according to bandwidth allocation.
1068 if (p_link->link_up && p_link->speed)
1069 p_link->line_speed = p_link->speed;
1071 p_link->line_speed = 0;
1073 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
1074 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
1076 /* Max bandwidth configuration */
1077 __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
1080 /* Mintz bandwidth configuration */
1081 __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
1083 ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev,
1084 p_link->min_pf_rate);
1086 p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
1087 p_link->an_complete = !!(status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
1088 p_link->parallel_detection = !!(status &
1089 LINK_STATUS_PARALLEL_DETECTION_USED);
1090 p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
1092 p_link->partner_adv_speed |=
1093 (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
1094 ECORE_LINK_PARTNER_SPEED_1G_FD : 0;
1095 p_link->partner_adv_speed |=
1096 (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
1097 ECORE_LINK_PARTNER_SPEED_1G_HD : 0;
1098 p_link->partner_adv_speed |=
1099 (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
1100 ECORE_LINK_PARTNER_SPEED_10G : 0;
1101 p_link->partner_adv_speed |=
1102 (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
1103 ECORE_LINK_PARTNER_SPEED_20G : 0;
1104 p_link->partner_adv_speed |=
1105 (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
1106 ECORE_LINK_PARTNER_SPEED_25G : 0;
1107 p_link->partner_adv_speed |=
1108 (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
1109 ECORE_LINK_PARTNER_SPEED_40G : 0;
1110 p_link->partner_adv_speed |=
1111 (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
1112 ECORE_LINK_PARTNER_SPEED_50G : 0;
1113 p_link->partner_adv_speed |=
1114 (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
1115 ECORE_LINK_PARTNER_SPEED_100G : 0;
1117 p_link->partner_tx_flow_ctrl_en =
1118 !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
1119 p_link->partner_rx_flow_ctrl_en =
1120 !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
1122 switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
1123 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
1124 p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE;
1126 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
1127 p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE;
1129 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
1130 p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE;
1133 p_link->partner_adv_pause = 0;
1136 p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
1138 OSAL_LINK_UPDATE(p_hwfn);
1141 enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
1142 struct ecore_ptt *p_ptt, bool b_up)
1144 struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
1145 struct ecore_mcp_mb_params mb_params;
1146 struct eth_phy_cfg phy_cfg;
1147 enum _ecore_status_t rc = ECORE_SUCCESS;
1151 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
1152 return ECORE_SUCCESS;
1155 /* Set the shmem configuration according to params */
1156 OSAL_MEM_ZERO(&phy_cfg, sizeof(phy_cfg));
1157 cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
1158 if (!params->speed.autoneg)
1159 phy_cfg.speed = params->speed.forced_speed;
1160 phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
1161 phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
1162 phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1163 phy_cfg.adv_speed = params->speed.advertised_speeds;
1164 phy_cfg.loopback_mode = params->loopback_mode;
1165 p_hwfn->b_drv_link_init = b_up;
1168 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1169 "Configuring Link: Speed 0x%08x, Pause 0x%08x,"
1170 " adv_speed 0x%08x, loopback 0x%08x\n",
1171 phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed,
1172 phy_cfg.loopback_mode);
1174 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
1176 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1177 mb_params.cmd = cmd;
1178 mb_params.p_data_src = &phy_cfg;
1179 mb_params.data_src_size = sizeof(phy_cfg);
1180 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1182 /* if mcp fails to respond we must abort */
1183 if (rc != ECORE_SUCCESS) {
1184 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1188 /* Reset the link status if needed */
1190 ecore_mcp_handle_link_change(p_hwfn, p_ptt, true);
1195 u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
1196 struct ecore_ptt *p_ptt)
1198 u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
1200 /* TODO - Add support for VFs */
1201 if (IS_VF(p_hwfn->p_dev))
1204 path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1206 path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr);
1207 path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn));
1209 proc_kill_cnt = ecore_rd(p_hwfn, p_ptt,
1211 OFFSETOF(struct public_path, process_kill)) &
1212 PROCESS_KILL_COUNTER_MASK;
1214 return proc_kill_cnt;
1217 static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn,
1218 struct ecore_ptt *p_ptt)
1220 struct ecore_dev *p_dev = p_hwfn->p_dev;
1223 /* Prevent possible attentions/interrupts during the recovery handling
1224 * and till its load phase, during which they will be re-enabled.
1226 ecore_int_igu_disable_int(p_hwfn, p_ptt);
1228 DP_NOTICE(p_hwfn, false, "Received a process kill indication\n");
1230 /* The following operations should be done once, and thus in CMT mode
1231 * are carried out by only the first HW function.
1233 if (p_hwfn != ECORE_LEADING_HWFN(p_dev))
1236 if (p_dev->recov_in_prog) {
1237 DP_NOTICE(p_hwfn, false,
1238 "Ignoring the indication since a recovery"
1239 " process is already in progress\n");
1243 p_dev->recov_in_prog = true;
1245 proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt);
1246 DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt);
1248 OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn);
1251 static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
1252 struct ecore_ptt *p_ptt,
1253 enum MFW_DRV_MSG_TYPE type)
1255 enum ecore_mcp_protocol_type stats_type;
1256 union ecore_mcp_protocol_stats stats;
1257 struct ecore_mcp_mb_params mb_params;
1259 enum _ecore_status_t rc;
1262 case MFW_DRV_MSG_GET_LAN_STATS:
1263 stats_type = ECORE_MCP_LAN_STATS;
1264 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1267 DP_INFO(p_hwfn, "Invalid protocol type %d\n", type);
1271 OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats);
1273 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1274 mb_params.cmd = DRV_MSG_CODE_GET_STATS;
1275 mb_params.param = hsi_param;
1276 mb_params.p_data_src = &stats;
1277 mb_params.data_src_size = sizeof(stats);
1278 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1279 if (rc != ECORE_SUCCESS)
1280 DP_ERR(p_hwfn, "Failed to send protocol stats, rc = %d\n", rc);
1283 static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
1284 struct public_func *p_shmem_info)
1286 struct ecore_mcp_function_info *p_info;
1288 p_info = &p_hwfn->mcp_info->func_info;
1290 /* TODO - bandwidth min/max should have valid values of 1-100,
1291 * as well as some indication that the feature is disabled.
1292 * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS
1293 * limit and correct value to min `1' and max `100' if limit isn't in
1296 p_info->bandwidth_min = (p_shmem_info->config &
1297 FUNC_MF_CFG_MIN_BW_MASK) >>
1298 FUNC_MF_CFG_MIN_BW_SHIFT;
1299 if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1301 "bandwidth minimum out of bounds [%02x]. Set to 1\n",
1302 p_info->bandwidth_min);
1303 p_info->bandwidth_min = 1;
1306 p_info->bandwidth_max = (p_shmem_info->config &
1307 FUNC_MF_CFG_MAX_BW_MASK) >>
1308 FUNC_MF_CFG_MAX_BW_SHIFT;
1309 if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1311 "bandwidth maximum out of bounds [%02x]. Set to 100\n",
1312 p_info->bandwidth_max);
1313 p_info->bandwidth_max = 100;
1317 static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
1318 struct ecore_ptt *p_ptt,
1319 struct public_func *p_data,
1322 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1324 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1325 u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1328 OSAL_MEM_ZERO(p_data, sizeof(*p_data));
1330 size = OSAL_MIN_T(u32, sizeof(*p_data),
1331 SECTION_SIZE(mfw_path_offsize));
1332 for (i = 0; i < size / sizeof(u32); i++)
1333 ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
1334 func_addr + (i << 2));
1340 ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1342 struct ecore_mcp_function_info *p_info;
1343 struct public_func shmem_info;
1344 u32 resp = 0, param = 0;
1346 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1348 ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1350 p_info = &p_hwfn->mcp_info->func_info;
1352 ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min);
1354 ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max);
1356 /* Acknowledge the MFW */
1357 ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1361 static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn,
1362 struct ecore_ptt *p_ptt)
1364 /* A single notification should be sent to upper driver in CMT mode */
1365 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1368 DP_NOTICE(p_hwfn, false,
1369 "Fan failure was detected on the network interface card"
1370 " and it's going to be shut down.\n");
1372 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
1375 struct ecore_mdump_cmd_params {
1384 static enum _ecore_status_t
1385 ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1386 struct ecore_mdump_cmd_params *p_mdump_cmd_params)
1388 struct ecore_mcp_mb_params mb_params;
1389 enum _ecore_status_t rc;
1391 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1392 mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
1393 mb_params.param = p_mdump_cmd_params->cmd;
1394 mb_params.p_data_src = p_mdump_cmd_params->p_data_src;
1395 mb_params.data_src_size = p_mdump_cmd_params->data_src_size;
1396 mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst;
1397 mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size;
1398 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1399 if (rc != ECORE_SUCCESS)
1402 p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
1403 if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
1404 DP_NOTICE(p_hwfn, false,
1405 "MFW claims that the mdump command is illegal [mdump_cmd 0x%x]\n",
1406 p_mdump_cmd_params->cmd);
1413 static enum _ecore_status_t ecore_mcp_mdump_ack(struct ecore_hwfn *p_hwfn,
1414 struct ecore_ptt *p_ptt)
1416 struct ecore_mdump_cmd_params mdump_cmd_params;
1418 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1419 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK;
1421 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1424 enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn,
1425 struct ecore_ptt *p_ptt,
1428 struct ecore_mdump_cmd_params mdump_cmd_params;
1430 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1431 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_SET_VALUES;
1432 mdump_cmd_params.p_data_src = &epoch;
1433 mdump_cmd_params.data_src_size = sizeof(epoch);
1435 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1438 enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
1439 struct ecore_ptt *p_ptt)
1441 struct ecore_mdump_cmd_params mdump_cmd_params;
1443 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1444 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_TRIGGER;
1446 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1449 static enum _ecore_status_t
1450 ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1451 struct mdump_config_stc *p_mdump_config)
1453 struct ecore_mdump_cmd_params mdump_cmd_params;
1454 enum _ecore_status_t rc;
1456 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1457 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_CONFIG;
1458 mdump_cmd_params.p_data_dst = p_mdump_config;
1459 mdump_cmd_params.data_dst_size = sizeof(*p_mdump_config);
1461 rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1462 if (rc != ECORE_SUCCESS)
1465 if (mdump_cmd_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
1467 "The mdump command is not supported by the MFW\n");
1468 return ECORE_NOTIMPL;
1471 if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1472 DP_NOTICE(p_hwfn, false,
1473 "Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n",
1474 mdump_cmd_params.mcp_resp);
1475 rc = ECORE_UNKNOWN_ERROR;
1481 enum _ecore_status_t
1482 ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1483 struct ecore_mdump_info *p_mdump_info)
1485 u32 addr, global_offsize, global_addr;
1486 struct mdump_config_stc mdump_config;
1487 enum _ecore_status_t rc;
1489 OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info));
1491 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1493 global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1494 global_addr = SECTION_ADDR(global_offsize, 0);
1495 p_mdump_info->reason = ecore_rd(p_hwfn, p_ptt,
1497 OFFSETOF(struct public_global,
1500 if (p_mdump_info->reason) {
1501 rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config);
1502 if (rc != ECORE_SUCCESS)
1505 p_mdump_info->version = mdump_config.version;
1506 p_mdump_info->config = mdump_config.config;
1507 p_mdump_info->epoch = mdump_config.epoc;
1508 p_mdump_info->num_of_logs = mdump_config.num_of_logs;
1509 p_mdump_info->valid_logs = mdump_config.valid_logs;
1511 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1512 "MFW mdump info: reason %d, version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n",
1513 p_mdump_info->reason, p_mdump_info->version,
1514 p_mdump_info->config, p_mdump_info->epoch,
1515 p_mdump_info->num_of_logs, p_mdump_info->valid_logs);
1517 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1518 "MFW mdump info: reason %d\n", p_mdump_info->reason);
1521 return ECORE_SUCCESS;
1524 enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
1525 struct ecore_ptt *p_ptt)
1527 struct ecore_mdump_cmd_params mdump_cmd_params;
1529 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1530 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLEAR_LOGS;
1532 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1535 static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
1536 struct ecore_ptt *p_ptt)
1538 /* In CMT mode - no need for more than a single acknowledgment to the
1539 * MFW, and no more than a single notification to the upper driver.
1541 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1544 DP_NOTICE(p_hwfn, false,
1545 "Received a critical error notification from the MFW!\n");
1547 if (p_hwfn->p_dev->mdump_en) {
1548 DP_NOTICE(p_hwfn, false,
1549 "Not acknowledging the notification to allow the MFW crash dump\n");
1550 p_hwfn->p_dev->mdump_en = false;
1554 ecore_mcp_mdump_ack(p_hwfn, p_ptt);
1555 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
1558 enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
1559 struct ecore_ptt *p_ptt)
1561 struct ecore_mcp_info *info = p_hwfn->mcp_info;
1562 enum _ecore_status_t rc = ECORE_SUCCESS;
1566 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n");
1568 /* Read Messages from MFW */
1569 ecore_mcp_read_mb(p_hwfn, p_ptt);
1571 /* Compare current messages to old ones */
1572 for (i = 0; i < info->mfw_mb_length; i++) {
1573 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
1578 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1579 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
1580 i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
1583 case MFW_DRV_MSG_LINK_CHANGE:
1584 ecore_mcp_handle_link_change(p_hwfn, p_ptt, false);
1586 case MFW_DRV_MSG_VF_DISABLED:
1587 ecore_mcp_handle_vf_flr(p_hwfn, p_ptt);
1589 case MFW_DRV_MSG_LLDP_DATA_UPDATED:
1590 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1591 ECORE_DCBX_REMOTE_LLDP_MIB);
1593 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
1594 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1595 ECORE_DCBX_REMOTE_MIB);
1597 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
1598 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1599 ECORE_DCBX_OPERATIONAL_MIB);
1601 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
1602 ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
1604 case MFW_DRV_MSG_ERROR_RECOVERY:
1605 ecore_mcp_handle_process_kill(p_hwfn, p_ptt);
1607 case MFW_DRV_MSG_GET_LAN_STATS:
1608 case MFW_DRV_MSG_GET_FCOE_STATS:
1609 case MFW_DRV_MSG_GET_ISCSI_STATS:
1610 case MFW_DRV_MSG_GET_RDMA_STATS:
1611 ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
1613 case MFW_DRV_MSG_BW_UPDATE:
1614 ecore_mcp_update_bw(p_hwfn, p_ptt);
1616 case MFW_DRV_MSG_FAILURE_DETECTED:
1617 ecore_mcp_handle_fan_failure(p_hwfn, p_ptt);
1619 case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
1620 ecore_mcp_handle_critical_error(p_hwfn, p_ptt);
1623 DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
1628 /* ACK everything */
1629 for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
1630 OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]);
1632 /* MFW expect answer in BE, so we force write in that format */
1633 ecore_wr(p_hwfn, p_ptt,
1634 info->mfw_mb_addr + sizeof(u32) +
1635 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
1636 sizeof(u32) + i * sizeof(u32), val);
1640 DP_NOTICE(p_hwfn, false,
1641 "Received an MFW message indication but no"
1646 /* Copy the new mfw messages into the shadow */
1647 OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
1652 enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
1653 struct ecore_ptt *p_ptt,
1655 u32 *p_running_bundle_id)
1660 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1661 DP_NOTICE(p_hwfn, false, "Emulation - can't get MFW version\n");
1662 return ECORE_SUCCESS;
1666 if (IS_VF(p_hwfn->p_dev)) {
1667 if (p_hwfn->vf_iov_info) {
1668 struct pfvf_acquire_resp_tlv *p_resp;
1670 p_resp = &p_hwfn->vf_iov_info->acquire_resp;
1671 *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
1672 return ECORE_SUCCESS;
1674 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1675 "VF requested MFW version prior to ACQUIRE\n");
1680 global_offsize = ecore_rd(p_hwfn, p_ptt,
1681 SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
1685 ecore_rd(p_hwfn, p_ptt,
1686 SECTION_ADDR(global_offsize,
1687 0) + OFFSETOF(struct public_global, mfw_ver));
1689 if (p_running_bundle_id != OSAL_NULL) {
1690 *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt,
1691 SECTION_ADDR(global_offsize,
1693 OFFSETOF(struct public_global,
1694 running_bundle_id));
1697 return ECORE_SUCCESS;
1700 enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
1703 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[0];
1704 struct ecore_ptt *p_ptt;
1706 /* TODO - Add support for VFs */
1710 if (!ecore_mcp_is_init(p_hwfn)) {
1711 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
1715 *p_media_type = MEDIA_UNSPECIFIED;
1717 p_ptt = ecore_ptt_acquire(p_hwfn);
1721 *p_media_type = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1722 OFFSETOF(struct public_port, media_type));
1724 ecore_ptt_release(p_hwfn, p_ptt);
1726 return ECORE_SUCCESS;
1730 /* Old MFW has a global configuration for all PFs regarding RDMA support */
1732 ecore_mcp_get_shmem_proto_legacy(struct ecore_hwfn *p_hwfn,
1733 enum ecore_pci_personality *p_proto)
1735 *p_proto = ECORE_PCI_ETH;
1737 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
1738 "According to Legacy capabilities, L2 personality is %08x\n",
1743 static enum _ecore_status_t
1744 ecore_mcp_get_shmem_proto_mfw(struct ecore_hwfn *p_hwfn,
1745 struct ecore_ptt *p_ptt,
1746 enum ecore_pci_personality *p_proto)
1748 u32 resp = 0, param = 0;
1749 enum _ecore_status_t rc;
1751 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
1752 "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
1753 (u32)*p_proto, resp, param);
1754 return ECORE_SUCCESS;
1757 static enum _ecore_status_t
1758 ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn,
1759 struct public_func *p_info,
1760 struct ecore_ptt *p_ptt,
1761 enum ecore_pci_personality *p_proto)
1763 enum _ecore_status_t rc = ECORE_SUCCESS;
1765 switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
1766 case FUNC_MF_CFG_PROTOCOL_ETHERNET:
1767 if (ecore_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto) !=
1769 ecore_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
1778 enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
1779 struct ecore_ptt *p_ptt)
1781 struct ecore_mcp_function_info *info;
1782 struct public_func shmem_info;
1784 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1785 info = &p_hwfn->mcp_info->func_info;
1787 info->pause_on_host = (shmem_info.config &
1788 FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
1790 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
1792 DP_ERR(p_hwfn, "Unknown personality %08x\n",
1793 (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
1797 ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1799 if (shmem_info.mac_upper || shmem_info.mac_lower) {
1800 info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
1801 info->mac[1] = (u8)(shmem_info.mac_upper);
1802 info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
1803 info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
1804 info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
1805 info->mac[5] = (u8)(shmem_info.mac_lower);
1807 /* TODO - are there protocols for which there's no MAC? */
1808 DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n");
1811 /* TODO - are these calculations true for BE machine? */
1812 info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
1813 (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
1814 info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
1815 (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
1817 info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
1819 info->mtu = (u16)shmem_info.mtu_size;
1824 info->mtu = (u16)shmem_info.mtu_size;
1826 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP),
1827 "Read configuration from shmem: pause_on_host %02x"
1828 " protocol %02x BW [%02x - %02x]"
1829 " MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %lx"
1830 " node %lx ovlan %04x\n",
1831 info->pause_on_host, info->protocol,
1832 info->bandwidth_min, info->bandwidth_max,
1833 info->mac[0], info->mac[1], info->mac[2],
1834 info->mac[3], info->mac[4], info->mac[5],
1835 (unsigned long)info->wwn_port,
1836 (unsigned long)info->wwn_node, info->ovlan);
1838 return ECORE_SUCCESS;
1841 struct ecore_mcp_link_params
1842 *ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn)
1844 if (!p_hwfn || !p_hwfn->mcp_info)
1846 return &p_hwfn->mcp_info->link_input;
1849 struct ecore_mcp_link_state
1850 *ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn)
1852 if (!p_hwfn || !p_hwfn->mcp_info)
1856 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
1857 DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n");
1858 p_hwfn->mcp_info->link_output.link_up = true;
1862 return &p_hwfn->mcp_info->link_output;
1865 struct ecore_mcp_link_capabilities
1866 *ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn)
1868 if (!p_hwfn || !p_hwfn->mcp_info)
1870 return &p_hwfn->mcp_info->link_capabilities;
1873 enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
1874 struct ecore_ptt *p_ptt)
1876 u32 resp = 0, param = 0;
1877 enum _ecore_status_t rc;
1879 rc = ecore_mcp_cmd(p_hwfn, p_ptt,
1880 DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, ¶m);
1882 /* Wait for the drain to complete before returning */
1888 const struct ecore_mcp_function_info
1889 *ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn)
1891 if (!p_hwfn || !p_hwfn->mcp_info)
1893 return &p_hwfn->mcp_info->func_info;
1896 enum _ecore_status_t ecore_mcp_nvm_command(struct ecore_hwfn *p_hwfn,
1897 struct ecore_ptt *p_ptt,
1898 struct ecore_mcp_nvm_params *params)
1900 enum _ecore_status_t rc;
1902 switch (params->type) {
1903 case ECORE_MCP_NVM_RD:
1904 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1905 params->nvm_common.offset,
1906 ¶ms->nvm_common.resp,
1907 ¶ms->nvm_common.param,
1908 params->nvm_rd.buf_size,
1909 params->nvm_rd.buf);
1912 rc = ecore_mcp_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1913 params->nvm_common.offset,
1914 ¶ms->nvm_common.resp,
1915 ¶ms->nvm_common.param);
1917 case ECORE_MCP_NVM_WR:
1918 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1919 params->nvm_common.offset,
1920 ¶ms->nvm_common.resp,
1921 ¶ms->nvm_common.param,
1922 params->nvm_wr.buf_size,
1923 params->nvm_wr.buf);
1932 int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
1933 struct ecore_ptt *p_ptt, u32 personalities)
1935 enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT;
1936 struct public_func shmem_info;
1937 int i, count = 0, num_pfs;
1939 num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
1941 for (i = 0; i < num_pfs; i++) {
1942 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1943 MCP_PF_ID_BY_REL(p_hwfn, i));
1944 if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
1947 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
1952 if ((1 << ((u32)protocol)) & personalities)
1959 enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
1960 struct ecore_ptt *p_ptt,
1966 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1967 DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n");
1972 if (IS_VF(p_hwfn->p_dev))
1975 flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
1976 flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
1977 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
1978 flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
1980 *p_flash_size = flash_size;
1982 return ECORE_SUCCESS;
1985 enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
1986 struct ecore_ptt *p_ptt)
1988 struct ecore_dev *p_dev = p_hwfn->p_dev;
1990 if (p_dev->recov_in_prog) {
1991 DP_NOTICE(p_hwfn, false,
1992 "Avoid triggering a recovery since such a process"
1993 " is already in progress\n");
1997 DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n");
1998 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
2000 return ECORE_SUCCESS;
2003 enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
2004 struct ecore_ptt *p_ptt,
2007 u32 resp = 0, param = 0, rc_param = 0;
2008 enum _ecore_status_t rc;
2010 /* Only Leader can configure MSIX, and need to take CMT into account */
2012 if (!IS_LEAD_HWFN(p_hwfn))
2013 return ECORE_SUCCESS;
2014 num *= p_hwfn->p_dev->num_hwfns;
2016 param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
2017 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
2018 param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
2019 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
2021 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
2024 if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
2025 DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n",
2029 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2030 "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
2037 enum _ecore_status_t
2038 ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2039 struct ecore_mcp_drv_version *p_ver)
2041 struct ecore_mcp_mb_params mb_params;
2042 struct drv_version_stc drv_version;
2046 enum _ecore_status_t rc;
2049 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
2050 return ECORE_SUCCESS;
2053 OSAL_MEM_ZERO(&drv_version, sizeof(drv_version));
2054 drv_version.version = p_ver->version;
2055 num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4;
2056 for (i = 0; i < num_words; i++) {
2057 /* The driver name is expected to be in a big-endian format */
2058 p_name = &p_ver->name[i * sizeof(u32)];
2059 val = OSAL_CPU_TO_BE32(*(u32 *)p_name);
2060 *(u32 *)&drv_version.name[i * sizeof(u32)] = val;
2063 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2064 mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
2065 mb_params.p_data_src = &drv_version;
2066 mb_params.data_src_size = sizeof(drv_version);
2067 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2068 if (rc != ECORE_SUCCESS)
2069 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2074 enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
2075 struct ecore_ptt *p_ptt)
2077 enum _ecore_status_t rc;
2078 u32 resp = 0, param = 0;
2080 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2082 if (rc != ECORE_SUCCESS)
2083 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2088 enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
2089 struct ecore_ptt *p_ptt)
2091 u32 value, cpu_mode;
2093 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
2095 value = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2096 value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2097 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
2098 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2100 return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -1 : 0;
2103 enum _ecore_status_t
2104 ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
2105 struct ecore_ptt *p_ptt,
2106 enum ecore_ov_client client)
2108 enum _ecore_status_t rc;
2109 u32 resp = 0, param = 0;
2113 case ECORE_OV_CLIENT_DRV:
2114 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
2116 case ECORE_OV_CLIENT_USER:
2117 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
2119 case ECORE_OV_CLIENT_VENDOR_SPEC:
2120 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
2123 DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", client);
2127 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
2128 drv_mb_param, &resp, ¶m);
2129 if (rc != ECORE_SUCCESS)
2130 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2135 enum _ecore_status_t
2136 ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
2137 struct ecore_ptt *p_ptt,
2138 enum ecore_ov_driver_state drv_state)
2140 enum _ecore_status_t rc;
2141 u32 resp = 0, param = 0;
2144 switch (drv_state) {
2145 case ECORE_OV_DRIVER_STATE_NOT_LOADED:
2146 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
2148 case ECORE_OV_DRIVER_STATE_DISABLED:
2149 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
2151 case ECORE_OV_DRIVER_STATE_ACTIVE:
2152 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
2155 DP_NOTICE(p_hwfn, true, "Invalid driver state %d\n", drv_state);
2159 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
2160 drv_mb_param, &resp, ¶m);
2161 if (rc != ECORE_SUCCESS)
2162 DP_ERR(p_hwfn, "Failed to send driver state\n");
2167 enum _ecore_status_t
2168 ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2169 struct ecore_fc_npiv_tbl *p_table)
2174 enum _ecore_status_t
2175 ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn,
2176 struct ecore_ptt *p_ptt, u16 mtu)
2181 enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
2182 struct ecore_ptt *p_ptt,
2183 enum ecore_led_mode mode)
2185 u32 resp = 0, param = 0, drv_mb_param;
2186 enum _ecore_status_t rc;
2189 case ECORE_LED_MODE_ON:
2190 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
2192 case ECORE_LED_MODE_OFF:
2193 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
2195 case ECORE_LED_MODE_RESTORE:
2196 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
2199 DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode);
2203 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
2204 drv_mb_param, &resp, ¶m);
2205 if (rc != ECORE_SUCCESS)
2206 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2211 enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
2212 struct ecore_ptt *p_ptt,
2215 enum _ecore_status_t rc;
2216 u32 resp = 0, param = 0;
2218 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
2219 mask_parities, &resp, ¶m);
2221 if (rc != ECORE_SUCCESS) {
2223 "MCP response failure for mask parities, aborting\n");
2224 } else if (resp != FW_MSG_CODE_OK) {
2226 "MCP did not ack mask parity request. Old MFW?\n");
2233 enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
2236 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2237 u32 bytes_left, offset, bytes_to_copy, buf_size;
2238 struct ecore_mcp_nvm_params params;
2239 struct ecore_ptt *p_ptt;
2240 enum _ecore_status_t rc = ECORE_SUCCESS;
2242 p_ptt = ecore_ptt_acquire(p_hwfn);
2246 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2249 params.type = ECORE_MCP_NVM_RD;
2250 params.nvm_rd.buf_size = &buf_size;
2251 params.nvm_common.cmd = DRV_MSG_CODE_NVM_READ_NVRAM;
2252 while (bytes_left > 0) {
2253 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
2254 MCP_DRV_NVM_BUF_LEN);
2255 params.nvm_common.offset = (addr + offset) |
2256 (bytes_to_copy << DRV_MB_PARAM_NVM_LEN_SHIFT);
2257 params.nvm_rd.buf = (u32 *)(p_buf + offset);
2258 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2259 if (rc != ECORE_SUCCESS || (params.nvm_common.resp !=
2260 FW_MSG_CODE_NVM_OK)) {
2261 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2265 /* This can be a lengthy process, and it's possible scheduler
2266 * isn't preemptible. Sleep a bit to prevent CPU hogging.
2268 if (bytes_left % 0x1000 <
2269 (bytes_left - *params.nvm_rd.buf_size) % 0x1000)
2272 offset += *params.nvm_rd.buf_size;
2273 bytes_left -= *params.nvm_rd.buf_size;
2276 p_dev->mcp_nvm_resp = params.nvm_common.resp;
2277 ecore_ptt_release(p_hwfn, p_ptt);
2282 enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
2283 u32 addr, u8 *p_buf, u32 len)
2285 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2286 struct ecore_mcp_nvm_params params;
2287 struct ecore_ptt *p_ptt;
2288 enum _ecore_status_t rc;
2290 p_ptt = ecore_ptt_acquire(p_hwfn);
2294 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2295 params.type = ECORE_MCP_NVM_RD;
2296 params.nvm_rd.buf_size = &len;
2297 params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_READ) ?
2298 DRV_MSG_CODE_PHY_CORE_READ : DRV_MSG_CODE_PHY_RAW_READ;
2299 params.nvm_common.offset = addr;
2300 params.nvm_rd.buf = (u32 *)p_buf;
2301 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2302 if (rc != ECORE_SUCCESS)
2303 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2305 p_dev->mcp_nvm_resp = params.nvm_common.resp;
2306 ecore_ptt_release(p_hwfn, p_ptt);
2311 enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
2313 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2314 struct ecore_mcp_nvm_params params;
2315 struct ecore_ptt *p_ptt;
2317 p_ptt = ecore_ptt_acquire(p_hwfn);
2321 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2322 OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp));
2323 ecore_ptt_release(p_hwfn, p_ptt);
2325 return ECORE_SUCCESS;
2328 enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr)
2330 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2331 struct ecore_mcp_nvm_params params;
2332 struct ecore_ptt *p_ptt;
2333 enum _ecore_status_t rc;
2335 p_ptt = ecore_ptt_acquire(p_hwfn);
2338 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2339 params.type = ECORE_MCP_CMD;
2340 params.nvm_common.cmd = DRV_MSG_CODE_NVM_DEL_FILE;
2341 params.nvm_common.offset = addr;
2342 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2343 p_dev->mcp_nvm_resp = params.nvm_common.resp;
2344 ecore_ptt_release(p_hwfn, p_ptt);
2349 enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
2352 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2353 struct ecore_mcp_nvm_params params;
2354 struct ecore_ptt *p_ptt;
2355 enum _ecore_status_t rc;
2357 p_ptt = ecore_ptt_acquire(p_hwfn);
2360 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2361 params.type = ECORE_MCP_CMD;
2362 params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN;
2363 params.nvm_common.offset = addr;
2364 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2365 p_dev->mcp_nvm_resp = params.nvm_common.resp;
2366 ecore_ptt_release(p_hwfn, p_ptt);
2371 /* rc receives ECORE_INVAL as default parameter because
2372 * it might not enter the while loop if the len is 0
2374 enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
2375 u32 addr, u8 *p_buf, u32 len)
2377 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2378 enum _ecore_status_t rc = ECORE_INVAL;
2379 struct ecore_mcp_nvm_params params;
2380 struct ecore_ptt *p_ptt;
2381 u32 buf_idx, buf_size;
2383 p_ptt = ecore_ptt_acquire(p_hwfn);
2387 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2388 params.type = ECORE_MCP_NVM_WR;
2389 if (cmd == ECORE_PUT_FILE_DATA)
2390 params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
2392 params.nvm_common.cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
2394 while (buf_idx < len) {
2395 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
2396 MCP_DRV_NVM_BUF_LEN);
2397 params.nvm_common.offset = ((buf_size <<
2398 DRV_MB_PARAM_NVM_LEN_SHIFT)
2400 params.nvm_wr.buf_size = buf_size;
2401 params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
2402 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2403 if (rc != ECORE_SUCCESS ||
2404 ((params.nvm_common.resp != FW_MSG_CODE_NVM_OK) &&
2405 (params.nvm_common.resp !=
2406 FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK)))
2407 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2409 /* This can be a lengthy process, and it's possible scheduler
2410 * isn't preemptible. Sleep a bit to prevent CPU hogging.
2412 if (buf_idx % 0x1000 >
2413 (buf_idx + buf_size) % 0x1000)
2416 buf_idx += buf_size;
2419 p_dev->mcp_nvm_resp = params.nvm_common.resp;
2420 ecore_ptt_release(p_hwfn, p_ptt);
2425 enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
2426 u32 addr, u8 *p_buf, u32 len)
2428 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2429 struct ecore_mcp_nvm_params params;
2430 struct ecore_ptt *p_ptt;
2431 enum _ecore_status_t rc;
2433 p_ptt = ecore_ptt_acquire(p_hwfn);
2437 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2438 params.type = ECORE_MCP_NVM_WR;
2439 params.nvm_wr.buf_size = len;
2440 params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_WRITE) ?
2441 DRV_MSG_CODE_PHY_CORE_WRITE : DRV_MSG_CODE_PHY_RAW_WRITE;
2442 params.nvm_common.offset = addr;
2443 params.nvm_wr.buf = (u32 *)p_buf;
2444 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2445 if (rc != ECORE_SUCCESS)
2446 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2447 p_dev->mcp_nvm_resp = params.nvm_common.resp;
2448 ecore_ptt_release(p_hwfn, p_ptt);
2453 enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
2456 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2457 struct ecore_mcp_nvm_params params;
2458 struct ecore_ptt *p_ptt;
2459 enum _ecore_status_t rc;
2461 p_ptt = ecore_ptt_acquire(p_hwfn);
2465 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2466 params.type = ECORE_MCP_CMD;
2467 params.nvm_common.cmd = DRV_MSG_CODE_SET_SECURE_MODE;
2468 params.nvm_common.offset = addr;
2469 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2470 p_dev->mcp_nvm_resp = params.nvm_common.resp;
2471 ecore_ptt_release(p_hwfn, p_ptt);
2476 enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
2477 struct ecore_ptt *p_ptt,
2478 u32 port, u32 addr, u32 offset,
2481 struct ecore_mcp_nvm_params params;
2482 enum _ecore_status_t rc;
2483 u32 bytes_left, bytes_to_copy, buf_size;
2485 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2486 params.nvm_common.offset =
2487 (port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) |
2488 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT);
2492 params.type = ECORE_MCP_NVM_RD;
2493 params.nvm_rd.buf_size = &buf_size;
2494 params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_READ;
2495 while (bytes_left > 0) {
2496 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
2497 MAX_I2C_TRANSACTION_SIZE);
2498 params.nvm_rd.buf = (u32 *)(p_buf + offset);
2499 params.nvm_common.offset &=
2500 (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
2501 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
2502 params.nvm_common.offset |=
2504 DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT);
2505 params.nvm_common.offset |=
2506 (bytes_to_copy << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT);
2507 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2508 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
2509 FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
2511 } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
2512 FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
2513 return ECORE_UNKNOWN_ERROR;
2515 offset += *params.nvm_rd.buf_size;
2516 bytes_left -= *params.nvm_rd.buf_size;
2519 return ECORE_SUCCESS;
2522 enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
2523 struct ecore_ptt *p_ptt,
2524 u32 port, u32 addr, u32 offset,
2527 struct ecore_mcp_nvm_params params;
2528 enum _ecore_status_t rc;
2529 u32 buf_idx, buf_size;
2531 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2532 params.nvm_common.offset =
2533 (port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) |
2534 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT);
2535 params.type = ECORE_MCP_NVM_WR;
2536 params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_WRITE;
2538 while (buf_idx < len) {
2539 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
2540 MAX_I2C_TRANSACTION_SIZE);
2541 params.nvm_common.offset &=
2542 (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
2543 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
2544 params.nvm_common.offset |=
2545 ((offset + buf_idx) <<
2546 DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT);
2547 params.nvm_common.offset |=
2548 (buf_size << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT);
2549 params.nvm_wr.buf_size = buf_size;
2550 params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
2551 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2552 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
2553 FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
2555 } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
2556 FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
2557 return ECORE_UNKNOWN_ERROR;
2559 buf_idx += buf_size;
2562 return ECORE_SUCCESS;
2565 enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
2566 struct ecore_ptt *p_ptt,
2567 u16 gpio, u32 *gpio_val)
2569 enum _ecore_status_t rc = ECORE_SUCCESS;
2570 u32 drv_mb_param = 0, rsp;
2572 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT);
2574 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ,
2575 drv_mb_param, &rsp, gpio_val);
2577 if (rc != ECORE_SUCCESS)
2580 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
2581 return ECORE_UNKNOWN_ERROR;
2583 return ECORE_SUCCESS;
2586 enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
2587 struct ecore_ptt *p_ptt,
2588 u16 gpio, u16 gpio_val)
2590 enum _ecore_status_t rc = ECORE_SUCCESS;
2591 u32 drv_mb_param = 0, param, rsp;
2593 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT) |
2594 (gpio_val << DRV_MB_PARAM_GPIO_VALUE_SHIFT);
2596 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE,
2597 drv_mb_param, &rsp, ¶m);
2599 if (rc != ECORE_SUCCESS)
2602 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
2603 return ECORE_UNKNOWN_ERROR;
2605 return ECORE_SUCCESS;
2608 enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn,
2609 struct ecore_ptt *p_ptt,
2610 u16 gpio, u32 *gpio_direction,
2613 u32 drv_mb_param = 0, rsp, val = 0;
2614 enum _ecore_status_t rc = ECORE_SUCCESS;
2616 drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT;
2618 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO,
2619 drv_mb_param, &rsp, &val);
2620 if (rc != ECORE_SUCCESS)
2623 *gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >>
2624 DRV_MB_PARAM_GPIO_DIRECTION_SHIFT;
2625 *gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >>
2626 DRV_MB_PARAM_GPIO_CTRL_SHIFT;
2628 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
2629 return ECORE_UNKNOWN_ERROR;
2631 return ECORE_SUCCESS;
2634 enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn,
2635 struct ecore_ptt *p_ptt)
2637 u32 drv_mb_param = 0, rsp, param;
2638 enum _ecore_status_t rc = ECORE_SUCCESS;
2640 drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
2641 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2643 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2644 drv_mb_param, &rsp, ¶m);
2646 if (rc != ECORE_SUCCESS)
2649 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2650 (param != DRV_MB_PARAM_BIST_RC_PASSED))
2651 rc = ECORE_UNKNOWN_ERROR;
2656 enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn,
2657 struct ecore_ptt *p_ptt)
2659 u32 drv_mb_param, rsp, param;
2660 enum _ecore_status_t rc = ECORE_SUCCESS;
2662 drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
2663 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2665 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2666 drv_mb_param, &rsp, ¶m);
2668 if (rc != ECORE_SUCCESS)
2671 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2672 (param != DRV_MB_PARAM_BIST_RC_PASSED))
2673 rc = ECORE_UNKNOWN_ERROR;
2678 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images(
2679 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *num_images)
2681 u32 drv_mb_param = 0, rsp;
2682 enum _ecore_status_t rc = ECORE_SUCCESS;
2684 drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
2685 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2687 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2688 drv_mb_param, &rsp, num_images);
2690 if (rc != ECORE_SUCCESS)
2693 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
2694 rc = ECORE_UNKNOWN_ERROR;
2699 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att(
2700 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2701 struct bist_nvm_image_att *p_image_att, u32 image_index)
2703 struct ecore_mcp_nvm_params params;
2704 enum _ecore_status_t rc;
2707 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2708 params.nvm_common.offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
2709 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2710 params.nvm_common.offset |= (image_index <<
2711 DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT);
2713 params.type = ECORE_MCP_NVM_RD;
2714 params.nvm_rd.buf_size = &buf_size;
2715 params.nvm_common.cmd = DRV_MSG_CODE_BIST_TEST;
2716 params.nvm_rd.buf = (u32 *)p_image_att;
2718 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2719 if (rc != ECORE_SUCCESS)
2722 if (((params.nvm_common.resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2723 (p_image_att->return_code != 1))
2724 rc = ECORE_UNKNOWN_ERROR;
2729 enum _ecore_status_t
2730 ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn,
2731 struct ecore_ptt *p_ptt,
2732 struct ecore_temperature_info *p_temp_info)
2734 struct ecore_temperature_sensor *p_temp_sensor;
2735 struct temperature_status_stc mfw_temp_info;
2736 struct ecore_mcp_mb_params mb_params;
2738 enum _ecore_status_t rc;
2741 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2742 mb_params.cmd = DRV_MSG_CODE_GET_TEMPERATURE;
2743 mb_params.p_data_dst = &mfw_temp_info;
2744 mb_params.data_dst_size = sizeof(mfw_temp_info);
2745 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2746 if (rc != ECORE_SUCCESS)
2749 OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS != MAX_NUM_OF_SENSORS);
2750 p_temp_info->num_sensors = OSAL_MIN_T(u32, mfw_temp_info.num_of_sensors,
2751 ECORE_MAX_NUM_OF_SENSORS);
2752 for (i = 0; i < p_temp_info->num_sensors; i++) {
2753 val = mfw_temp_info.sensor[i];
2754 p_temp_sensor = &p_temp_info->sensors[i];
2755 p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >>
2756 SENSOR_LOCATION_SHIFT;
2757 p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >>
2758 THRESHOLD_HIGH_SHIFT;
2759 p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >>
2760 CRITICAL_TEMPERATURE_SHIFT;
2761 p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >>
2765 return ECORE_SUCCESS;
2768 enum _ecore_status_t ecore_mcp_get_mba_versions(
2769 struct ecore_hwfn *p_hwfn,
2770 struct ecore_ptt *p_ptt,
2771 struct ecore_mba_vers *p_mba_vers)
2773 struct ecore_mcp_nvm_params params;
2774 enum _ecore_status_t rc;
2777 OSAL_MEM_ZERO(¶ms, sizeof(params));
2778 params.type = ECORE_MCP_NVM_RD;
2779 params.nvm_common.cmd = DRV_MSG_CODE_GET_MBA_VERSION;
2780 params.nvm_common.offset = 0;
2781 params.nvm_rd.buf = &p_mba_vers->mba_vers[0];
2782 params.nvm_rd.buf_size = &buf_size;
2783 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2785 if (rc != ECORE_SUCCESS)
2788 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
2790 rc = ECORE_UNKNOWN_ERROR;
2792 if (buf_size != MCP_DRV_NVM_BUF_LEN)
2793 rc = ECORE_UNKNOWN_ERROR;
2798 enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
2799 struct ecore_ptt *p_ptt,
2804 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MEM_ECC_EVENTS,
2805 0, &rsp, (u32 *)num_events);
2808 static enum resource_id_enum
2809 ecore_mcp_get_mfw_res_id(enum ecore_resources res_id)
2811 enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
2815 mfw_res_id = RESOURCE_NUM_SB_E;
2817 case ECORE_L2_QUEUE:
2818 mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
2821 mfw_res_id = RESOURCE_NUM_VPORT_E;
2824 mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
2827 mfw_res_id = RESOURCE_NUM_PQ_E;
2830 mfw_res_id = RESOURCE_NUM_RL_E;
2834 /* Each VFC resource can accommodate both a MAC and a VLAN */
2835 mfw_res_id = RESOURCE_VFC_FILTER_E;
2838 mfw_res_id = RESOURCE_ILT_E;
2840 case ECORE_LL2_QUEUE:
2841 mfw_res_id = RESOURCE_LL2_QUEUE_E;
2843 case ECORE_RDMA_CNQ_RAM:
2844 case ECORE_CMDQS_CQS:
2845 /* CNQ/CMDQS are the same resource */
2846 mfw_res_id = RESOURCE_CQS_E;
2848 case ECORE_RDMA_STATS_QUEUE:
2849 mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
2852 mfw_res_id = RESOURCE_BDQ_E;
2861 #define ECORE_RESC_ALLOC_VERSION_MAJOR 2
2862 #define ECORE_RESC_ALLOC_VERSION_MINOR 0
2863 #define ECORE_RESC_ALLOC_VERSION \
2864 ((ECORE_RESC_ALLOC_VERSION_MAJOR << \
2865 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
2866 (ECORE_RESC_ALLOC_VERSION_MINOR << \
2867 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
2869 struct ecore_resc_alloc_in_params {
2871 enum ecore_resources res_id;
2875 struct ecore_resc_alloc_out_params {
2885 static enum _ecore_status_t
2886 ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn,
2887 struct ecore_ptt *p_ptt,
2888 struct ecore_resc_alloc_in_params *p_in_params,
2889 struct ecore_resc_alloc_out_params *p_out_params)
2891 struct ecore_mcp_mb_params mb_params;
2892 struct resource_info mfw_resc_info;
2893 enum _ecore_status_t rc;
2895 OSAL_MEM_ZERO(&mfw_resc_info, sizeof(mfw_resc_info));
2897 mfw_resc_info.res_id = ecore_mcp_get_mfw_res_id(p_in_params->res_id);
2898 if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
2900 "Failed to match resource %d [%s] with the MFW resources\n",
2901 p_in_params->res_id,
2902 ecore_hw_get_resc_name(p_in_params->res_id));
2906 switch (p_in_params->cmd) {
2907 case DRV_MSG_SET_RESOURCE_VALUE_MSG:
2908 mfw_resc_info.size = p_in_params->resc_max_val;
2910 case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
2913 DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
2918 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2919 mb_params.cmd = p_in_params->cmd;
2920 mb_params.param = ECORE_RESC_ALLOC_VERSION;
2921 mb_params.p_data_src = &mfw_resc_info;
2922 mb_params.data_src_size = sizeof(mfw_resc_info);
2923 mb_params.p_data_dst = mb_params.p_data_src;
2924 mb_params.data_dst_size = mb_params.data_src_size;
2926 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2927 "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
2928 p_in_params->cmd, p_in_params->res_id,
2929 ecore_hw_get_resc_name(p_in_params->res_id),
2930 ECORE_MFW_GET_FIELD(mb_params.param,
2931 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
2932 ECORE_MFW_GET_FIELD(mb_params.param,
2933 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
2934 p_in_params->resc_max_val);
2936 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2937 if (rc != ECORE_SUCCESS)
2940 p_out_params->mcp_resp = mb_params.mcp_resp;
2941 p_out_params->mcp_param = mb_params.mcp_param;
2942 p_out_params->resc_num = mfw_resc_info.size;
2943 p_out_params->resc_start = mfw_resc_info.offset;
2944 p_out_params->vf_resc_num = mfw_resc_info.vf_size;
2945 p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
2946 p_out_params->flags = mfw_resc_info.flags;
2948 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2949 "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
2950 ECORE_MFW_GET_FIELD(p_out_params->mcp_param,
2951 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
2952 ECORE_MFW_GET_FIELD(p_out_params->mcp_param,
2953 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
2954 p_out_params->resc_num, p_out_params->resc_start,
2955 p_out_params->vf_resc_num, p_out_params->vf_resc_start,
2956 p_out_params->flags);
2958 return ECORE_SUCCESS;
2961 enum _ecore_status_t
2962 ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2963 enum ecore_resources res_id, u32 resc_max_val,
2966 struct ecore_resc_alloc_out_params out_params;
2967 struct ecore_resc_alloc_in_params in_params;
2968 enum _ecore_status_t rc;
2970 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
2971 in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
2972 in_params.res_id = res_id;
2973 in_params.resc_max_val = resc_max_val;
2974 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
2975 rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
2977 if (rc != ECORE_SUCCESS)
2980 *p_mcp_resp = out_params.mcp_resp;
2982 return ECORE_SUCCESS;
2985 enum _ecore_status_t
2986 ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2987 enum ecore_resources res_id, u32 *p_mcp_resp,
2988 u32 *p_resc_num, u32 *p_resc_start)
2990 struct ecore_resc_alloc_out_params out_params;
2991 struct ecore_resc_alloc_in_params in_params;
2992 enum _ecore_status_t rc;
2994 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
2995 in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
2996 in_params.res_id = res_id;
2997 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
2998 rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3000 if (rc != ECORE_SUCCESS)
3003 *p_mcp_resp = out_params.mcp_resp;
3005 if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
3006 *p_resc_num = out_params.resc_num;
3007 *p_resc_start = out_params.resc_start;
3010 return ECORE_SUCCESS;
3013 enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn,
3014 struct ecore_ptt *p_ptt)
3016 u32 mcp_resp, mcp_param;
3018 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
3019 &mcp_resp, &mcp_param);
3022 static enum _ecore_status_t ecore_mcp_resource_cmd(struct ecore_hwfn *p_hwfn,
3023 struct ecore_ptt *p_ptt,
3024 u32 param, u32 *p_mcp_resp,
3027 enum _ecore_status_t rc;
3029 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
3030 p_mcp_resp, p_mcp_param);
3031 if (rc != ECORE_SUCCESS)
3034 if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3036 "The resource command is unsupported by the MFW\n");
3037 return ECORE_NOTIMPL;
3040 if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
3041 u8 opcode = ECORE_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
3043 DP_NOTICE(p_hwfn, false,
3044 "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
3052 enum _ecore_status_t
3053 __ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3054 struct ecore_resc_lock_params *p_params)
3056 u32 param = 0, mcp_resp, mcp_param;
3058 enum _ecore_status_t rc;
3060 switch (p_params->timeout) {
3061 case ECORE_MCP_RESC_LOCK_TO_DEFAULT:
3062 opcode = RESOURCE_OPCODE_REQ;
3063 p_params->timeout = 0;
3065 case ECORE_MCP_RESC_LOCK_TO_NONE:
3066 opcode = RESOURCE_OPCODE_REQ_WO_AGING;
3067 p_params->timeout = 0;
3070 opcode = RESOURCE_OPCODE_REQ_W_AGING;
3074 ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3075 ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3076 ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
3078 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3079 "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
3080 param, p_params->timeout, opcode, p_params->resource);
3082 /* Attempt to acquire the resource */
3083 rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
3085 if (rc != ECORE_SUCCESS)
3088 /* Analyze the response */
3089 p_params->owner = ECORE_MFW_GET_FIELD(mcp_param,
3090 RESOURCE_CMD_RSP_OWNER);
3091 opcode = ECORE_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3093 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3094 "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
3095 mcp_param, opcode, p_params->owner);
3098 case RESOURCE_OPCODE_GNT:
3099 p_params->b_granted = true;
3101 case RESOURCE_OPCODE_BUSY:
3102 p_params->b_granted = false;
3105 DP_NOTICE(p_hwfn, false,
3106 "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
3111 return ECORE_SUCCESS;
3114 enum _ecore_status_t
3115 ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3116 struct ecore_resc_lock_params *p_params)
3119 enum _ecore_status_t rc;
3122 /* No need for an interval before the first iteration */
3124 if (p_params->sleep_b4_retry) {
3125 u16 retry_interval_in_ms =
3126 DIV_ROUND_UP(p_params->retry_interval,
3129 OSAL_MSLEEP(retry_interval_in_ms);
3131 OSAL_UDELAY(p_params->retry_interval);
3135 rc = __ecore_mcp_resc_lock(p_hwfn, p_ptt, p_params);
3136 if (rc != ECORE_SUCCESS)
3139 if (p_params->b_granted)
3141 } while (retry_cnt++ < p_params->retry_num);
3143 return ECORE_SUCCESS;
3146 enum _ecore_status_t
3147 ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3148 struct ecore_resc_unlock_params *p_params)
3150 u32 param = 0, mcp_resp, mcp_param;
3152 enum _ecore_status_t rc;
3154 opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
3155 : RESOURCE_OPCODE_RELEASE;
3156 ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3157 ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3159 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3160 "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
3161 param, opcode, p_params->resource);
3163 /* Attempt to release the resource */
3164 rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
3166 if (rc != ECORE_SUCCESS)
3169 /* Analyze the response */
3170 opcode = ECORE_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3172 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3173 "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
3177 case RESOURCE_OPCODE_RELEASED_PREVIOUS:
3179 "Resource unlock request for an already released resource [%d]\n",
3180 p_params->resource);
3182 case RESOURCE_OPCODE_RELEASED:
3183 p_params->b_released = true;
3185 case RESOURCE_OPCODE_WRONG_OWNER:
3186 p_params->b_released = false;
3189 DP_NOTICE(p_hwfn, false,
3190 "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
3195 return ECORE_SUCCESS;