2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
11 #include "ecore_status.h"
12 #include "ecore_mcp.h"
13 #include "mcp_public.h"
16 #include "ecore_init_fw_funcs.h"
17 #include "ecore_sriov.h"
18 #include "ecore_iov_api.h"
19 #include "ecore_gtt_reg_addr.h"
20 #include "ecore_iro.h"
21 #include "ecore_dcbx.h"
23 #define CHIP_MCP_RESP_ITER_US 10
24 #define EMUL_MCP_RESP_ITER_US (1000 * 1000)
26 #define ECORE_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
27 #define ECORE_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
29 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
30 ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
33 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
34 ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
36 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
37 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
38 OFFSETOF(struct public_drv_mb, _field), _val)
40 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
41 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
42 OFFSETOF(struct public_drv_mb, _field))
44 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
45 DRV_ID_PDA_COMP_VER_SHIFT)
47 #define MCP_BYTES_PER_MBIT_SHIFT 17
51 static int loaded_port[MAX_NUM_PORTS] = { 0 };
54 bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn)
56 if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
61 void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
63 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
65 u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr);
67 p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
69 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
70 "port_addr = 0x%x, port_id 0x%02x\n",
71 p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
74 void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
76 u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
81 if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev))
85 if (!p_hwfn->mcp_info->public_base)
88 for (i = 0; i < length; i++) {
89 tmp = ecore_rd(p_hwfn, p_ptt,
90 p_hwfn->mcp_info->mfw_mb_addr +
91 (i << 2) + sizeof(u32));
93 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
94 OSAL_BE32_TO_CPU(tmp);
98 enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
100 if (p_hwfn->mcp_info) {
101 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
102 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
103 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->lock);
105 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
106 p_hwfn->mcp_info = OSAL_NULL;
108 return ECORE_SUCCESS;
111 static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
112 struct ecore_ptt *p_ptt)
114 struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
115 u32 drv_mb_offsize, mfw_mb_offsize;
116 u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
119 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
120 DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n");
121 p_info->public_base = 0;
126 p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
127 if (!p_info->public_base)
130 p_info->public_base |= GRCBASE_MCP;
132 /* Calculate the driver and MFW mailbox address */
133 drv_mb_offsize = ecore_rd(p_hwfn, p_ptt,
134 SECTION_OFFSIZE_ADDR(p_info->public_base,
136 p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
137 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
138 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x"
139 " mcp_pf_id = 0x%x\n",
140 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
142 /* Set the MFW MB address */
143 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
144 SECTION_OFFSIZE_ADDR(p_info->public_base,
146 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
147 p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
148 p_info->mfw_mb_addr);
150 /* Get the current driver mailbox sequence before sending
153 p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
154 DRV_MSG_SEQ_NUMBER_MASK;
156 /* Get current FW pulse sequence */
157 p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
160 p_info->mcp_hist = (u16)ecore_rd(p_hwfn, p_ptt,
161 MISCS_REG_GENERIC_POR_0);
163 return ECORE_SUCCESS;
166 enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
167 struct ecore_ptt *p_ptt)
169 struct ecore_mcp_info *p_info;
172 /* Allocate mcp_info structure */
173 p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
174 sizeof(*p_hwfn->mcp_info));
175 if (!p_hwfn->mcp_info)
177 p_info = p_hwfn->mcp_info;
179 if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
180 DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
181 /* Do not free mcp_info here, since public_base indicate that
182 * the MCP is not initialized
184 return ECORE_SUCCESS;
187 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
188 p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
189 p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
190 if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
193 /* Initialize the MFW spinlock */
194 OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->lock);
195 OSAL_SPIN_LOCK_INIT(&p_info->lock);
197 return ECORE_SUCCESS;
200 DP_NOTICE(p_hwfn, true, "Failed to allocate mcp memory\n");
201 ecore_mcp_free(p_hwfn);
205 /* Locks the MFW mailbox of a PF to ensure a single access.
206 * The lock is achieved in most cases by holding a spinlock, causing other
207 * threads to wait till a previous access is done.
208 * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single
209 * access is achieved by setting a blocking flag, which will fail other
210 * competing contexts to send their mailboxes.
212 static enum _ecore_status_t ecore_mcp_mb_lock(struct ecore_hwfn *p_hwfn,
215 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
217 /* The spinlock shouldn't be acquired when the mailbox command is
218 * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel
219 * pending [UN]LOAD_REQ command of another PF together with a spinlock
220 * (i.e. interrupts are disabled) - can lead to a deadlock.
221 * It is assumed that for a single PF, no other mailbox commands can be
222 * sent from another context while sending LOAD_REQ, and that any
223 * parallel commands to UNLOAD_REQ can be cancelled.
225 if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE)
226 p_hwfn->mcp_info->block_mb_sending = false;
228 if (p_hwfn->mcp_info->block_mb_sending) {
229 DP_NOTICE(p_hwfn, false,
230 "Trying to send a MFW mailbox command [0x%x] in parallel to [UN]LOAD_REQ. Aborting.\n",
232 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
236 if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) {
237 p_hwfn->mcp_info->block_mb_sending = true;
238 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
241 return ECORE_SUCCESS;
244 static void ecore_mcp_mb_unlock(struct ecore_hwfn *p_hwfn, u32 cmd)
246 if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
247 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
250 enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
251 struct ecore_ptt *p_ptt)
253 u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
254 u32 delay = CHIP_MCP_RESP_ITER_US;
255 u32 org_mcp_reset_seq, cnt = 0;
256 enum _ecore_status_t rc = ECORE_SUCCESS;
259 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
260 delay = EMUL_MCP_RESP_ITER_US;
262 /* Ensure that only a single thread is accessing the mailbox at a
265 rc = ecore_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
266 if (rc != ECORE_SUCCESS)
269 /* Set drv command along with the updated sequence */
270 org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
271 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
274 /* Wait for MFW response */
276 /* Give the FW up to 500 second (50*1000*10usec) */
277 } while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt,
278 MISCS_REG_GENERIC_POR_0)) &&
279 (cnt++ < ECORE_MCP_RESET_RETRIES));
281 if (org_mcp_reset_seq !=
282 ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
283 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
284 "MCP was reset after %d usec\n", cnt * delay);
286 DP_ERR(p_hwfn, "Failed to reset MCP\n");
290 ecore_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
295 /* Should be called while the dedicated spinlock is acquired */
296 static enum _ecore_status_t ecore_do_mcp_cmd(struct ecore_hwfn *p_hwfn,
297 struct ecore_ptt *p_ptt,
302 u32 delay = CHIP_MCP_RESP_ITER_US;
303 u32 seq, cnt = 1, actual_mb_seq;
304 enum _ecore_status_t rc = ECORE_SUCCESS;
307 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
308 delay = EMUL_MCP_RESP_ITER_US;
311 /* Get actual driver mailbox sequence */
312 actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
313 DRV_MSG_SEQ_NUMBER_MASK;
315 /* Use MCP history register to check if MCP reset occurred between
318 if (p_hwfn->mcp_info->mcp_hist !=
319 ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
320 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Rereading MCP offsets\n");
321 ecore_load_mcp_offsets(p_hwfn, p_ptt);
322 ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
324 seq = ++p_hwfn->mcp_info->drv_mb_seq;
327 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
329 /* Set drv command along with the updated sequence */
330 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
332 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
333 "wrote command (%x) to MFW MB param 0x%08x\n",
337 /* Wait for MFW response */
339 *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
341 /* Give the FW up to 5 second (500*10ms) */
342 } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
343 (cnt++ < ECORE_DRV_MB_MAX_RETRIES));
345 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
346 "[after %d ms] read (%x) seq is (%x) from FW MB\n",
347 cnt * delay, *o_mcp_resp, seq);
349 /* Is this a reply to our command? */
350 if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
351 *o_mcp_resp &= FW_MSG_CODE_MASK;
352 /* Get the MCP param */
353 *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
356 DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
360 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
366 static enum _ecore_status_t
367 ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
368 struct ecore_mcp_mb_params *p_mb_params)
371 enum _ecore_status_t rc;
373 /* MCP not initialized */
374 if (!ecore_mcp_is_init(p_hwfn)) {
375 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
379 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
380 OFFSETOF(struct public_drv_mb, union_data);
382 /* Ensure that only a single thread is accessing the mailbox at a
385 rc = ecore_mcp_mb_lock(p_hwfn, p_mb_params->cmd);
386 if (rc != ECORE_SUCCESS)
389 if (p_mb_params->p_data_src != OSAL_NULL)
390 ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr,
391 p_mb_params->p_data_src,
392 sizeof(*p_mb_params->p_data_src));
394 rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
395 p_mb_params->param, &p_mb_params->mcp_resp,
396 &p_mb_params->mcp_param);
398 if (p_mb_params->p_data_dst != OSAL_NULL)
399 ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
401 sizeof(*p_mb_params->p_data_dst));
403 ecore_mcp_mb_unlock(p_hwfn, p_mb_params->cmd);
408 enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
409 struct ecore_ptt *p_ptt, u32 cmd, u32 param,
410 u32 *o_mcp_resp, u32 *o_mcp_param)
412 struct ecore_mcp_mb_params mb_params;
413 enum _ecore_status_t rc;
416 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
417 if (cmd == DRV_MSG_CODE_UNLOAD_REQ) {
419 loaded_port[p_hwfn->port_id]--;
420 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n",
423 return ECORE_SUCCESS;
426 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
428 mb_params.param = param;
429 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
430 if (rc != ECORE_SUCCESS)
433 *o_mcp_resp = mb_params.mcp_resp;
434 *o_mcp_param = mb_params.mcp_param;
436 return ECORE_SUCCESS;
439 enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
440 struct ecore_ptt *p_ptt,
445 u32 i_txn_size, u32 *i_buf)
447 struct ecore_mcp_mb_params mb_params;
448 union drv_union_data union_data;
449 enum _ecore_status_t rc;
451 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
453 mb_params.param = param;
454 OSAL_MEMCPY(&union_data.raw_data, i_buf, i_txn_size);
455 mb_params.p_data_src = &union_data;
456 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
457 if (rc != ECORE_SUCCESS)
460 *o_mcp_resp = mb_params.mcp_resp;
461 *o_mcp_param = mb_params.mcp_param;
463 return ECORE_SUCCESS;
466 enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
467 struct ecore_ptt *p_ptt,
472 u32 *o_txn_size, u32 *o_buf)
474 enum _ecore_status_t rc;
477 /* MCP not initialized */
478 if (!ecore_mcp_is_init(p_hwfn)) {
479 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
483 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
484 rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, cmd, param, o_mcp_resp,
486 if (rc != ECORE_SUCCESS)
489 /* Get payload after operation completes successfully */
490 *o_txn_size = *o_mcp_param;
491 for (i = 0; i < *o_txn_size; i += 4)
492 o_buf[i / sizeof(u32)] = DRV_MB_RD(p_hwfn, p_ptt,
493 union_data.raw_data[i]);
496 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
501 static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
504 static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
507 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
508 else if (!loaded_port[p_hwfn->port_id])
509 load_phase = FW_MSG_CODE_DRV_LOAD_PORT;
511 load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
513 /* On CMT, always tell that it's engine */
514 if (p_hwfn->p_dev->num_hwfns > 1)
515 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
517 *p_load_code = load_phase;
519 loaded_port[p_hwfn->port_id]++;
521 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
522 "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n",
523 *p_load_code, loaded, p_hwfn->port_id,
524 loaded_port[p_hwfn->port_id]);
528 enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
529 struct ecore_ptt *p_ptt,
532 struct ecore_dev *p_dev = p_hwfn->p_dev;
533 struct ecore_mcp_mb_params mb_params;
534 union drv_union_data union_data;
536 enum _ecore_status_t rc;
539 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
540 ecore_mcp_mf_workaround(p_hwfn, p_load_code);
541 return ECORE_SUCCESS;
545 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
546 mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
547 mb_params.param = PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
549 OSAL_MEMCPY(&union_data.ver_str, p_dev->ver_str, MCP_DRV_VER_STR_SIZE);
550 mb_params.p_data_src = &union_data;
551 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
553 /* if mcp fails to respond we must abort */
554 if (rc != ECORE_SUCCESS) {
555 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
559 /* If MFW refused (e.g. other port is in diagnostic mode) we
560 * must abort. This can happen in the following cases:
561 * - Other port is in diagnostic mode
562 * - Previously loaded function on the engine is not compliant with
564 * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION.
567 if (!(*p_load_code) ||
568 ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) ||
569 ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) ||
570 ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) {
571 DP_ERR(p_hwfn, "MCP refused load request, aborting\n");
575 return ECORE_SUCCESS;
578 static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
579 struct ecore_ptt *p_ptt)
581 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
583 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
584 u32 path_addr = SECTION_ADDR(mfw_path_offsize,
585 ECORE_PATH_ID(p_hwfn));
586 u32 disabled_vfs[VF_MAX_STATIC / 32];
589 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
590 "Reading Disabled VF information from [offset %08x],"
592 mfw_path_offsize, path_addr);
594 for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
595 disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,
597 OFFSETOF(struct public_path,
600 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
601 "FLR-ed VFs [%08x,...,%08x] - %08x\n",
602 i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
605 if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs))
606 OSAL_VF_FLR_UPDATE(p_hwfn);
609 enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
610 struct ecore_ptt *p_ptt,
613 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
615 u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);
616 u32 func_addr = SECTION_ADDR(mfw_func_offsize,
618 struct ecore_mcp_mb_params mb_params;
619 union drv_union_data union_data;
621 enum _ecore_status_t rc;
624 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
625 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
626 "Acking VFs [%08x,...,%08x] - %08x\n",
627 i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
629 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
630 mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
631 OSAL_MEMCPY(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8);
632 mb_params.p_data_src = &union_data;
633 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
634 if (rc != ECORE_SUCCESS) {
635 DP_NOTICE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
636 "Failed to pass ACK for VF flr to MFW\n");
637 return ECORE_TIMEOUT;
640 /* TMP - clear the ACK bits; should be done by MFW */
641 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
642 ecore_wr(p_hwfn, p_ptt,
644 OFFSETOF(struct public_func, drv_ack_vf_disabled) +
650 static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
651 struct ecore_ptt *p_ptt)
653 u32 transceiver_state;
655 transceiver_state = ecore_rd(p_hwfn, p_ptt,
656 p_hwfn->mcp_info->port_addr +
657 OFFSETOF(struct public_port,
660 DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP),
661 "Received transceiver state update [0x%08x] from mfw"
663 transceiver_state, (u32)(p_hwfn->mcp_info->port_addr +
664 OFFSETOF(struct public_port,
667 transceiver_state = GET_FIELD(transceiver_state, PMM_TRANSCEIVER_STATE);
669 if (transceiver_state == PMM_TRANSCEIVER_STATE_PRESENT)
670 DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
672 DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
675 static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
676 struct ecore_ptt *p_ptt, bool b_reset)
678 struct ecore_mcp_link_state *p_link;
681 p_link = &p_hwfn->mcp_info->link_output;
682 OSAL_MEMSET(p_link, 0, sizeof(*p_link));
684 status = ecore_rd(p_hwfn, p_ptt,
685 p_hwfn->mcp_info->port_addr +
686 OFFSETOF(struct public_port, link_status));
687 DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP),
688 "Received link update [0x%08x] from mfw"
690 status, (u32)(p_hwfn->mcp_info->port_addr +
691 OFFSETOF(struct public_port,
694 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
695 "Resetting link indications\n");
699 if (p_hwfn->b_drv_link_init)
700 p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
702 p_link->link_up = false;
704 p_link->full_duplex = true;
705 switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
706 case LINK_STATUS_SPEED_AND_DUPLEX_100G:
707 p_link->speed = 100000;
709 case LINK_STATUS_SPEED_AND_DUPLEX_50G:
710 p_link->speed = 50000;
712 case LINK_STATUS_SPEED_AND_DUPLEX_40G:
713 p_link->speed = 40000;
715 case LINK_STATUS_SPEED_AND_DUPLEX_25G:
716 p_link->speed = 25000;
718 case LINK_STATUS_SPEED_AND_DUPLEX_20G:
719 p_link->speed = 20000;
721 case LINK_STATUS_SPEED_AND_DUPLEX_10G:
722 p_link->speed = 10000;
724 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
725 p_link->full_duplex = false;
727 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
728 p_link->speed = 1000;
734 /* We never store total line speed as p_link->speed is
735 * again changes according to bandwidth allocation.
737 if (p_link->link_up && p_link->speed)
738 p_link->line_speed = p_link->speed;
740 p_link->line_speed = 0;
742 /* Correct speed according to bandwidth allocation */
743 if (p_hwfn->mcp_info->func_info.bandwidth_max && p_link->speed) {
744 u8 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
746 __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
750 if (p_hwfn->mcp_info->func_info.bandwidth_min && p_link->speed) {
751 u8 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
753 __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
756 ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev,
757 p_link->min_pf_rate);
760 p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
761 p_link->an_complete = !!(status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
762 p_link->parallel_detection = !!(status &
763 LINK_STATUS_PARALLEL_DETECTION_USED);
764 p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
766 p_link->partner_adv_speed |=
767 (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
768 ECORE_LINK_PARTNER_SPEED_1G_FD : 0;
769 p_link->partner_adv_speed |=
770 (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
771 ECORE_LINK_PARTNER_SPEED_1G_HD : 0;
772 p_link->partner_adv_speed |=
773 (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
774 ECORE_LINK_PARTNER_SPEED_10G : 0;
775 p_link->partner_adv_speed |=
776 (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
777 ECORE_LINK_PARTNER_SPEED_20G : 0;
778 p_link->partner_adv_speed |=
779 (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
780 ECORE_LINK_PARTNER_SPEED_25G : 0;
781 p_link->partner_adv_speed |=
782 (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
783 ECORE_LINK_PARTNER_SPEED_40G : 0;
784 p_link->partner_adv_speed |=
785 (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
786 ECORE_LINK_PARTNER_SPEED_50G : 0;
787 p_link->partner_adv_speed |=
788 (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
789 ECORE_LINK_PARTNER_SPEED_100G : 0;
791 p_link->partner_tx_flow_ctrl_en =
792 !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
793 p_link->partner_rx_flow_ctrl_en =
794 !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
796 switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
797 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
798 p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE;
800 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
801 p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE;
803 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
804 p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE;
807 p_link->partner_adv_pause = 0;
810 p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
813 ecore_dcbx_eagle_workaround(p_hwfn, p_ptt, p_link->pfc_enabled);
815 OSAL_LINK_UPDATE(p_hwfn);
818 enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
819 struct ecore_ptt *p_ptt, bool b_up)
821 struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
822 struct ecore_mcp_mb_params mb_params;
823 union drv_union_data union_data;
824 struct pmm_phy_cfg *p_phy_cfg;
825 u32 param = 0, reply = 0, cmd;
826 enum _ecore_status_t rc = ECORE_SUCCESS;
829 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
830 return ECORE_SUCCESS;
833 /* Set the shmem configuration according to params */
834 p_phy_cfg = &union_data.drv_phy_cfg;
835 OSAL_MEMSET(p_phy_cfg, 0, sizeof(*p_phy_cfg));
836 cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
837 if (!params->speed.autoneg)
838 p_phy_cfg->speed = params->speed.forced_speed;
839 p_phy_cfg->pause |= (params->pause.autoneg) ? PMM_PAUSE_AUTONEG : 0;
840 p_phy_cfg->pause |= (params->pause.forced_rx) ? PMM_PAUSE_RX : 0;
841 p_phy_cfg->pause |= (params->pause.forced_tx) ? PMM_PAUSE_TX : 0;
842 p_phy_cfg->adv_speed = params->speed.advertised_speeds;
843 p_phy_cfg->loopback_mode = params->loopback_mode;
846 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
848 "Link on FPGA - Ask for loopback mode '5' at 10G\n");
849 p_phy_cfg->loopback_mode = 5;
850 p_phy_cfg->speed = 10000;
854 p_hwfn->b_drv_link_init = b_up;
857 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
858 "Configuring Link: Speed 0x%08x, Pause 0x%08x,"
859 " adv_speed 0x%08x, loopback 0x%08x,"
860 " features 0x%08x\n",
861 p_phy_cfg->speed, p_phy_cfg->pause,
862 p_phy_cfg->adv_speed, p_phy_cfg->loopback_mode,
863 p_phy_cfg->feature_config_flags);
865 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
867 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
869 mb_params.p_data_src = &union_data;
870 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
872 /* if mcp fails to respond we must abort */
873 if (rc != ECORE_SUCCESS) {
874 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
878 /* Reset the link status if needed */
880 ecore_mcp_handle_link_change(p_hwfn, p_ptt, true);
885 u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
886 struct ecore_ptt *p_ptt)
888 u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
890 /* TODO - Add support for VFs */
891 if (IS_VF(p_hwfn->p_dev))
894 path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
896 path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr);
897 path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn));
899 proc_kill_cnt = ecore_rd(p_hwfn, p_ptt,
901 OFFSETOF(struct public_path, process_kill)) &
902 PROCESS_KILL_COUNTER_MASK;
904 return proc_kill_cnt;
907 static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn,
908 struct ecore_ptt *p_ptt)
910 struct ecore_dev *p_dev = p_hwfn->p_dev;
913 /* Prevent possible attentions/interrupts during the recovery handling
914 * and till its load phase, during which they will be re-enabled.
916 ecore_int_igu_disable_int(p_hwfn, p_ptt);
918 DP_NOTICE(p_hwfn, false, "Received a process kill indication\n");
920 /* The following operations should be done once, and thus in CMT mode
921 * are carried out by only the first HW function.
923 if (p_hwfn != ECORE_LEADING_HWFN(p_dev))
926 if (p_dev->recov_in_prog) {
927 DP_NOTICE(p_hwfn, false,
928 "Ignoring the indication since a recovery"
929 " process is already in progress\n");
933 p_dev->recov_in_prog = true;
935 proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt);
936 DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt);
938 OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn);
941 static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
942 struct ecore_ptt *p_ptt,
943 enum MFW_DRV_MSG_TYPE type)
945 enum ecore_mcp_protocol_type stats_type;
946 union ecore_mcp_protocol_stats stats;
947 struct ecore_mcp_mb_params mb_params;
948 u32 hsi_param, param = 0, reply = 0;
949 union drv_union_data union_data;
952 case MFW_DRV_MSG_GET_LAN_STATS:
953 stats_type = ECORE_MCP_LAN_STATS;
954 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
957 DP_NOTICE(p_hwfn, false, "Invalid protocol type %d\n", type);
961 OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats);
963 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
964 mb_params.cmd = DRV_MSG_CODE_GET_STATS;
965 mb_params.param = hsi_param;
966 OSAL_MEMCPY(&union_data, &stats, sizeof(stats));
967 mb_params.p_data_src = &union_data;
968 ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
971 static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
972 struct ecore_ptt *p_ptt,
973 struct public_func *p_data, int pfid)
975 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
977 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
978 u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
981 OSAL_MEM_ZERO(p_data, sizeof(*p_data));
983 size = OSAL_MIN_T(u32, sizeof(*p_data), SECTION_SIZE(mfw_path_offsize));
984 for (i = 0; i < size / sizeof(u32); i++)
985 ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
986 func_addr + (i << 2));
992 ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
993 struct public_func *p_shmem_info)
995 struct ecore_mcp_function_info *p_info;
997 p_info = &p_hwfn->mcp_info->func_info;
999 /* TODO - bandwidth min/max should have valid values of 1-100,
1000 * as well as some indication that the feature is disabled.
1001 * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS
1002 * limit and correct value to min `1' and max `100' if limit isn't in
1005 p_info->bandwidth_min = (p_shmem_info->config &
1006 FUNC_MF_CFG_MIN_BW_MASK) >>
1007 FUNC_MF_CFG_MIN_BW_SHIFT;
1008 if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1010 "bandwidth minimum out of bounds [%02x]. Set to 1\n",
1011 p_info->bandwidth_min);
1012 p_info->bandwidth_min = 1;
1015 p_info->bandwidth_max = (p_shmem_info->config &
1016 FUNC_MF_CFG_MAX_BW_MASK) >>
1017 FUNC_MF_CFG_MAX_BW_SHIFT;
1018 if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1020 "bandwidth maximum out of bounds [%02x]. Set to 100\n",
1021 p_info->bandwidth_max);
1022 p_info->bandwidth_max = 100;
1027 ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1029 struct ecore_mcp_function_info *p_info;
1030 struct public_func shmem_info;
1031 u32 resp = 0, param = 0;
1033 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1035 ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1037 p_info = &p_hwfn->mcp_info->func_info;
1039 ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min);
1041 ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max);
1043 /* Acknowledge the MFW */
1044 ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1048 static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn,
1049 struct ecore_ptt *p_ptt)
1051 /* A single notification should be sent to upper driver in CMT mode */
1052 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1055 DP_NOTICE(p_hwfn, false,
1056 "Fan failure was detected on the network interface card"
1057 " and it's going to be shut down.\n");
1059 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
1062 enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
1063 struct ecore_ptt *p_ptt)
1065 struct ecore_mcp_info *info = p_hwfn->mcp_info;
1066 enum _ecore_status_t rc = ECORE_SUCCESS;
1070 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n");
1072 /* Read Messages from MFW */
1073 ecore_mcp_read_mb(p_hwfn, p_ptt);
1075 /* Compare current messages to old ones */
1076 for (i = 0; i < info->mfw_mb_length; i++) {
1077 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
1082 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1083 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
1084 i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
1087 case MFW_DRV_MSG_LINK_CHANGE:
1088 ecore_mcp_handle_link_change(p_hwfn, p_ptt, false);
1090 case MFW_DRV_MSG_VF_DISABLED:
1091 ecore_mcp_handle_vf_flr(p_hwfn, p_ptt);
1093 case MFW_DRV_MSG_LLDP_DATA_UPDATED:
1094 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1095 ECORE_DCBX_REMOTE_LLDP_MIB);
1097 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
1098 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1099 ECORE_DCBX_REMOTE_MIB);
1101 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
1102 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1103 ECORE_DCBX_OPERATIONAL_MIB);
1105 case MFW_DRV_MSG_ERROR_RECOVERY:
1106 ecore_mcp_handle_process_kill(p_hwfn, p_ptt);
1108 case MFW_DRV_MSG_GET_LAN_STATS:
1109 case MFW_DRV_MSG_GET_FCOE_STATS:
1110 case MFW_DRV_MSG_GET_ISCSI_STATS:
1111 case MFW_DRV_MSG_GET_RDMA_STATS:
1112 ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
1114 case MFW_DRV_MSG_BW_UPDATE:
1115 ecore_mcp_update_bw(p_hwfn, p_ptt);
1117 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
1118 ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
1120 case MFW_DRV_MSG_FAILURE_DETECTED:
1121 ecore_mcp_handle_fan_failure(p_hwfn, p_ptt);
1125 DP_NOTICE(p_hwfn, false,
1126 "Unimplemented MFW message %d\n", i);
1131 /* ACK everything */
1132 for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
1133 OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]);
1135 /* MFW expect answer in BE, so we force write in that format */
1136 ecore_wr(p_hwfn, p_ptt,
1137 info->mfw_mb_addr + sizeof(u32) +
1138 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
1139 sizeof(u32) + i * sizeof(u32), val);
1143 DP_NOTICE(p_hwfn, false,
1144 "Received an MFW message indication but no"
1149 /* Copy the new mfw messages into the shadow */
1150 OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
1155 enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_dev *p_dev,
1156 struct ecore_ptt *p_ptt,
1158 u32 *p_running_bundle_id)
1160 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1164 if (CHIP_REV_IS_EMUL(p_dev)) {
1165 DP_NOTICE(p_dev, false, "Emulation - can't get MFW version\n");
1166 return ECORE_SUCCESS;
1171 if (p_hwfn->vf_iov_info) {
1172 struct pfvf_acquire_resp_tlv *p_resp;
1174 p_resp = &p_hwfn->vf_iov_info->acquire_resp;
1175 *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
1176 return ECORE_SUCCESS;
1179 DP_VERBOSE(p_dev, ECORE_MSG_IOV,
1180 "VF requested MFW vers prior to ACQUIRE\n");
1184 global_offsize = ecore_rd(p_hwfn, p_ptt,
1185 SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
1189 ecore_rd(p_hwfn, p_ptt,
1190 SECTION_ADDR(global_offsize,
1191 0) + OFFSETOF(struct public_global, mfw_ver));
1193 if (p_running_bundle_id != OSAL_NULL) {
1194 *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt,
1195 SECTION_ADDR(global_offsize,
1197 OFFSETOF(struct public_global,
1198 running_bundle_id));
1201 return ECORE_SUCCESS;
1204 enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
1207 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[0];
1208 struct ecore_ptt *p_ptt;
1210 /* TODO - Add support for VFs */
1214 if (!ecore_mcp_is_init(p_hwfn)) {
1215 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
1219 *p_media_type = MEDIA_UNSPECIFIED;
1221 p_ptt = ecore_ptt_acquire(p_hwfn);
1225 *p_media_type = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1226 OFFSETOF(struct public_port, media_type));
1228 ecore_ptt_release(p_hwfn, p_ptt);
1230 return ECORE_SUCCESS;
1233 static enum _ecore_status_t
1234 ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn,
1235 struct public_func *p_info,
1236 enum ecore_pci_personality *p_proto)
1238 enum _ecore_status_t rc = ECORE_SUCCESS;
1240 switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
1241 case FUNC_MF_CFG_PROTOCOL_ETHERNET:
1242 *p_proto = ECORE_PCI_ETH;
1251 enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
1252 struct ecore_ptt *p_ptt)
1254 struct ecore_mcp_function_info *info;
1255 struct public_func shmem_info;
1257 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1258 info = &p_hwfn->mcp_info->func_info;
1260 info->pause_on_host = (shmem_info.config &
1261 FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
1263 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, &info->protocol)) {
1264 DP_ERR(p_hwfn, "Unknown personality %08x\n",
1265 (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
1269 ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1271 if (shmem_info.mac_upper || shmem_info.mac_lower) {
1272 info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
1273 info->mac[1] = (u8)(shmem_info.mac_upper);
1274 info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
1275 info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
1276 info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
1277 info->mac[5] = (u8)(shmem_info.mac_lower);
1279 /* TODO - are there protocols for which there's no MAC? */
1280 DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n");
1283 info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
1285 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP),
1286 "Read configuration from shmem: pause_on_host %02x"
1287 " protocol %02x BW [%02x - %02x]"
1288 " MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %" PRIx64
1289 " node %" PRIx64 " ovlan %04x\n",
1290 info->pause_on_host, info->protocol,
1291 info->bandwidth_min, info->bandwidth_max,
1292 info->mac[0], info->mac[1], info->mac[2],
1293 info->mac[3], info->mac[4], info->mac[5],
1294 info->wwn_port, info->wwn_node, info->ovlan);
1296 return ECORE_SUCCESS;
1299 struct ecore_mcp_link_params
1300 *ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn)
1302 if (!p_hwfn || !p_hwfn->mcp_info)
1304 return &p_hwfn->mcp_info->link_input;
1307 struct ecore_mcp_link_state
1308 *ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn)
1310 if (!p_hwfn || !p_hwfn->mcp_info)
1314 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
1315 DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n");
1316 p_hwfn->mcp_info->link_output.link_up = true;
1320 return &p_hwfn->mcp_info->link_output;
1323 struct ecore_mcp_link_capabilities
1324 *ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn)
1326 if (!p_hwfn || !p_hwfn->mcp_info)
1328 return &p_hwfn->mcp_info->link_capabilities;
1331 enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
1332 struct ecore_ptt *p_ptt)
1334 enum _ecore_status_t rc;
1335 u32 resp = 0, param = 0;
1337 rc = ecore_mcp_cmd(p_hwfn, p_ptt,
1338 DRV_MSG_CODE_NIG_DRAIN, 100, &resp, ¶m);
1340 /* Wait for the drain to complete before returning */
1346 const struct ecore_mcp_function_info
1347 *ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn)
1349 if (!p_hwfn || !p_hwfn->mcp_info)
1351 return &p_hwfn->mcp_info->func_info;
1354 enum _ecore_status_t ecore_mcp_nvm_command(struct ecore_hwfn *p_hwfn,
1355 struct ecore_ptt *p_ptt,
1356 struct ecore_mcp_nvm_params *params)
1358 enum _ecore_status_t rc;
1360 switch (params->type) {
1361 case ECORE_MCP_NVM_RD:
1362 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1363 params->nvm_common.offset,
1364 ¶ms->nvm_common.resp,
1365 ¶ms->nvm_common.param,
1366 params->nvm_rd.buf_size,
1367 params->nvm_rd.buf);
1370 rc = ecore_mcp_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1371 params->nvm_common.offset,
1372 ¶ms->nvm_common.resp,
1373 ¶ms->nvm_common.param);
1375 case ECORE_MCP_NVM_WR:
1376 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1377 params->nvm_common.offset,
1378 ¶ms->nvm_common.resp,
1379 ¶ms->nvm_common.param,
1380 params->nvm_wr.buf_size,
1381 params->nvm_wr.buf);
1390 int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
1391 struct ecore_ptt *p_ptt, u32 personalities)
1393 enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT;
1394 struct public_func shmem_info;
1395 int i, count = 0, num_pfs;
1397 num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
1399 for (i = 0; i < num_pfs; i++) {
1400 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1401 MCP_PF_ID_BY_REL(p_hwfn, i));
1402 if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
1405 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info,
1406 &protocol) != ECORE_SUCCESS)
1409 if ((1 << ((u32)protocol)) & personalities)
1416 enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
1417 struct ecore_ptt *p_ptt,
1423 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1424 DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n");
1429 if (IS_VF(p_hwfn->p_dev))
1432 flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
1433 flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
1434 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
1435 flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
1437 *p_flash_size = flash_size;
1439 return ECORE_SUCCESS;
1442 enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
1443 struct ecore_ptt *p_ptt)
1445 struct ecore_dev *p_dev = p_hwfn->p_dev;
1447 if (p_dev->recov_in_prog) {
1448 DP_NOTICE(p_hwfn, false,
1449 "Avoid triggering a recovery since such a process"
1450 " is already in progress\n");
1454 DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n");
1455 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
1457 return ECORE_SUCCESS;
1460 enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
1461 struct ecore_ptt *p_ptt,
1464 u32 resp = 0, param = 0, rc_param = 0;
1465 enum _ecore_status_t rc;
1467 param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
1468 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
1469 param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
1470 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
1472 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
1475 if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
1476 DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n",
1484 enum _ecore_status_t
1485 ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1486 struct ecore_mcp_drv_version *p_ver)
1488 u32 param = 0, reply = 0, num_words, i;
1489 struct drv_version_stc *p_drv_version;
1490 struct ecore_mcp_mb_params mb_params;
1491 union drv_union_data union_data;
1494 enum _ecore_status_t rc;
1497 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
1498 return ECORE_SUCCESS;
1501 p_drv_version = &union_data.drv_version;
1502 p_drv_version->version = p_ver->version;
1503 num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4;
1504 for (i = 0; i < num_words; i++) {
1505 p_name = &p_ver->name[i * sizeof(u32)];
1506 val = OSAL_CPU_TO_BE32(*(u32 *)p_name);
1507 *(u32 *)&p_drv_version->name[i * sizeof(u32)] = val;
1510 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1511 mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
1512 mb_params.p_data_src = &union_data;
1513 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1514 if (rc != ECORE_SUCCESS)
1515 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1520 enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
1521 struct ecore_ptt *p_ptt)
1523 enum _ecore_status_t rc;
1524 u32 resp = 0, param = 0;
1526 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
1528 if (rc != ECORE_SUCCESS)
1529 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1534 enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
1535 struct ecore_ptt *p_ptt)
1537 u32 value, cpu_mode;
1539 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
1541 value = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
1542 value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
1543 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
1544 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
1546 return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -1 : 0;
1549 enum _ecore_status_t
1550 ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
1551 struct ecore_ptt *p_ptt,
1552 enum ecore_ov_config_method config,
1553 enum ecore_ov_client client)
1555 enum _ecore_status_t rc;
1556 u32 resp = 0, param = 0;
1560 case ECORE_OV_CLIENT_DRV:
1561 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
1563 case ECORE_OV_CLIENT_USER:
1564 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
1567 DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", config);
1571 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
1572 drv_mb_param, &resp, ¶m);
1573 if (rc != ECORE_SUCCESS)
1574 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1579 enum _ecore_status_t
1580 ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
1581 struct ecore_ptt *p_ptt,
1582 enum ecore_ov_driver_state drv_state)
1584 enum _ecore_status_t rc;
1585 u32 resp = 0, param = 0;
1588 switch (drv_state) {
1589 case ECORE_OV_DRIVER_STATE_NOT_LOADED:
1590 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
1592 case ECORE_OV_DRIVER_STATE_DISABLED:
1593 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
1595 case ECORE_OV_DRIVER_STATE_ACTIVE:
1596 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
1599 DP_NOTICE(p_hwfn, true, "Invalid driver state %d\n", drv_state);
1603 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
1604 drv_state, &resp, ¶m);
1605 if (rc != ECORE_SUCCESS)
1606 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1611 enum _ecore_status_t
1612 ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1613 struct ecore_fc_npiv_tbl *p_table)
1618 enum _ecore_status_t
1619 ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn,
1620 struct ecore_ptt *p_ptt, u16 mtu)
1625 enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
1626 struct ecore_ptt *p_ptt,
1627 enum ecore_led_mode mode)
1629 u32 resp = 0, param = 0, drv_mb_param;
1630 enum _ecore_status_t rc;
1633 case ECORE_LED_MODE_ON:
1634 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
1636 case ECORE_LED_MODE_OFF:
1637 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
1639 case ECORE_LED_MODE_RESTORE:
1640 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
1643 DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode);
1647 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
1648 drv_mb_param, &resp, ¶m);
1649 if (rc != ECORE_SUCCESS)
1650 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1655 enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
1656 struct ecore_ptt *p_ptt,
1659 enum _ecore_status_t rc;
1660 u32 resp = 0, param = 0;
1662 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
1663 mask_parities, &resp, ¶m);
1665 if (rc != ECORE_SUCCESS) {
1667 "MCP response failure for mask parities, aborting\n");
1668 } else if (resp != FW_MSG_CODE_OK) {
1670 "MCP did not ack mask parity request. Old MFW?\n");
1677 enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
1680 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1681 u32 bytes_left, offset, bytes_to_copy, buf_size;
1682 struct ecore_mcp_nvm_params params;
1683 struct ecore_ptt *p_ptt;
1684 enum _ecore_status_t rc = ECORE_SUCCESS;
1686 p_ptt = ecore_ptt_acquire(p_hwfn);
1690 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
1693 params.type = ECORE_MCP_NVM_RD;
1694 params.nvm_rd.buf_size = &buf_size;
1695 params.nvm_common.cmd = DRV_MSG_CODE_NVM_READ_NVRAM;
1696 while (bytes_left > 0) {
1697 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
1698 MCP_DRV_NVM_BUF_LEN);
1699 params.nvm_common.offset = (addr + offset) |
1700 (bytes_to_copy << DRV_MB_PARAM_NVM_LEN_SHIFT);
1701 params.nvm_rd.buf = (u32 *)(p_buf + offset);
1702 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
1703 if (rc != ECORE_SUCCESS || (params.nvm_common.resp !=
1704 FW_MSG_CODE_NVM_OK)) {
1705 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
1708 offset += *params.nvm_rd.buf_size;
1709 bytes_left -= *params.nvm_rd.buf_size;
1712 p_dev->mcp_nvm_resp = params.nvm_common.resp;
1713 ecore_ptt_release(p_hwfn, p_ptt);
1718 enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
1719 u32 addr, u8 *p_buf, u32 len)
1721 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1722 struct ecore_mcp_nvm_params params;
1723 struct ecore_ptt *p_ptt;
1724 enum _ecore_status_t rc;
1726 p_ptt = ecore_ptt_acquire(p_hwfn);
1730 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
1731 params.type = ECORE_MCP_NVM_RD;
1732 params.nvm_rd.buf_size = &len;
1733 params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_READ) ?
1734 DRV_MSG_CODE_PHY_CORE_READ : DRV_MSG_CODE_PHY_RAW_READ;
1735 params.nvm_common.offset = addr;
1736 params.nvm_rd.buf = (u32 *)p_buf;
1737 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
1738 if (rc != ECORE_SUCCESS)
1739 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
1741 p_dev->mcp_nvm_resp = params.nvm_common.resp;
1742 ecore_ptt_release(p_hwfn, p_ptt);
1747 enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
1749 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1750 struct ecore_mcp_nvm_params params;
1751 struct ecore_ptt *p_ptt;
1753 p_ptt = ecore_ptt_acquire(p_hwfn);
1757 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
1758 OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp));
1759 ecore_ptt_release(p_hwfn, p_ptt);
1761 return ECORE_SUCCESS;
1764 enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr)
1766 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1767 struct ecore_mcp_nvm_params params;
1768 struct ecore_ptt *p_ptt;
1769 enum _ecore_status_t rc;
1771 p_ptt = ecore_ptt_acquire(p_hwfn);
1774 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
1775 params.type = ECORE_MCP_CMD;
1776 params.nvm_common.cmd = DRV_MSG_CODE_NVM_DEL_FILE;
1777 params.nvm_common.offset = addr;
1778 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
1779 p_dev->mcp_nvm_resp = params.nvm_common.resp;
1780 ecore_ptt_release(p_hwfn, p_ptt);
1785 enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
1788 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1789 struct ecore_mcp_nvm_params params;
1790 struct ecore_ptt *p_ptt;
1791 enum _ecore_status_t rc;
1793 p_ptt = ecore_ptt_acquire(p_hwfn);
1796 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
1797 params.type = ECORE_MCP_CMD;
1798 params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN;
1799 params.nvm_common.offset = addr;
1800 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
1801 p_dev->mcp_nvm_resp = params.nvm_common.resp;
1802 ecore_ptt_release(p_hwfn, p_ptt);
1807 /* rc receives ECORE_INVAL as default parameter because
1808 * it might not enter the while loop if the len is 0
1810 enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
1811 u32 addr, u8 *p_buf, u32 len)
1813 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1814 enum _ecore_status_t rc = ECORE_INVAL;
1815 struct ecore_mcp_nvm_params params;
1816 struct ecore_ptt *p_ptt;
1817 u32 buf_idx, buf_size;
1819 p_ptt = ecore_ptt_acquire(p_hwfn);
1823 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
1824 params.type = ECORE_MCP_NVM_WR;
1825 if (cmd == ECORE_PUT_FILE_DATA)
1826 params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
1828 params.nvm_common.cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
1830 while (buf_idx < len) {
1831 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
1832 MCP_DRV_NVM_BUF_LEN);
1833 params.nvm_common.offset = ((buf_size <<
1834 DRV_MB_PARAM_NVM_LEN_SHIFT)
1836 params.nvm_wr.buf_size = buf_size;
1837 params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
1838 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
1839 if (rc != ECORE_SUCCESS ||
1840 ((params.nvm_common.resp != FW_MSG_CODE_NVM_OK) &&
1841 (params.nvm_common.resp !=
1842 FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK)))
1843 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
1845 buf_idx += buf_size;
1848 p_dev->mcp_nvm_resp = params.nvm_common.resp;
1849 ecore_ptt_release(p_hwfn, p_ptt);
1854 enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
1855 u32 addr, u8 *p_buf, u32 len)
1857 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1858 struct ecore_mcp_nvm_params params;
1859 struct ecore_ptt *p_ptt;
1860 enum _ecore_status_t rc;
1862 p_ptt = ecore_ptt_acquire(p_hwfn);
1866 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
1867 params.type = ECORE_MCP_NVM_WR;
1868 params.nvm_wr.buf_size = len;
1869 params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_WRITE) ?
1870 DRV_MSG_CODE_PHY_CORE_WRITE : DRV_MSG_CODE_PHY_RAW_WRITE;
1871 params.nvm_common.offset = addr;
1872 params.nvm_wr.buf = (u32 *)p_buf;
1873 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
1874 if (rc != ECORE_SUCCESS)
1875 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
1876 p_dev->mcp_nvm_resp = params.nvm_common.resp;
1877 ecore_ptt_release(p_hwfn, p_ptt);
1882 enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
1885 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1886 struct ecore_mcp_nvm_params params;
1887 struct ecore_ptt *p_ptt;
1888 enum _ecore_status_t rc;
1890 p_ptt = ecore_ptt_acquire(p_hwfn);
1894 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
1895 params.type = ECORE_MCP_CMD;
1896 params.nvm_common.cmd = DRV_MSG_CODE_SET_SECURE_MODE;
1897 params.nvm_common.offset = addr;
1898 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
1899 p_dev->mcp_nvm_resp = params.nvm_common.resp;
1900 ecore_ptt_release(p_hwfn, p_ptt);
1905 enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
1906 struct ecore_ptt *p_ptt,
1907 u32 port, u32 addr, u32 offset,
1910 struct ecore_mcp_nvm_params params;
1911 enum _ecore_status_t rc;
1912 u32 bytes_left, bytes_to_copy, buf_size;
1914 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
1915 SET_FIELD(params.nvm_common.offset,
1916 DRV_MB_PARAM_TRANSCEIVER_PORT, port);
1917 SET_FIELD(params.nvm_common.offset,
1918 DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS, addr);
1922 params.type = ECORE_MCP_NVM_RD;
1923 params.nvm_rd.buf_size = &buf_size;
1924 params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_READ;
1925 while (bytes_left > 0) {
1926 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
1927 MAX_I2C_TRANSACTION_SIZE);
1928 params.nvm_rd.buf = (u32 *)(p_buf + offset);
1929 SET_FIELD(params.nvm_common.offset,
1930 DRV_MB_PARAM_TRANSCEIVER_OFFSET, addr + offset);
1931 SET_FIELD(params.nvm_common.offset,
1932 DRV_MB_PARAM_TRANSCEIVER_SIZE, bytes_to_copy);
1933 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
1934 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
1935 FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
1937 } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
1938 FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
1939 return ECORE_UNKNOWN_ERROR;
1941 offset += *params.nvm_rd.buf_size;
1942 bytes_left -= *params.nvm_rd.buf_size;
1945 return ECORE_SUCCESS;
1948 enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
1949 struct ecore_ptt *p_ptt,
1950 u32 port, u32 addr, u32 offset,
1953 struct ecore_mcp_nvm_params params;
1954 enum _ecore_status_t rc;
1955 u32 buf_idx, buf_size;
1957 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
1958 SET_FIELD(params.nvm_common.offset,
1959 DRV_MB_PARAM_TRANSCEIVER_PORT, port);
1960 SET_FIELD(params.nvm_common.offset,
1961 DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS, addr);
1962 params.type = ECORE_MCP_NVM_WR;
1963 params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_WRITE;
1965 while (buf_idx < len) {
1966 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
1967 MAX_I2C_TRANSACTION_SIZE);
1968 SET_FIELD(params.nvm_common.offset,
1969 DRV_MB_PARAM_TRANSCEIVER_OFFSET, offset + buf_idx);
1970 SET_FIELD(params.nvm_common.offset,
1971 DRV_MB_PARAM_TRANSCEIVER_SIZE, buf_size);
1972 params.nvm_wr.buf_size = buf_size;
1973 params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
1974 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
1975 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
1976 FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
1978 } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
1979 FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
1980 return ECORE_UNKNOWN_ERROR;
1982 buf_idx += buf_size;
1985 return ECORE_SUCCESS;
1988 enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
1989 struct ecore_ptt *p_ptt,
1990 u16 gpio, u32 *gpio_val)
1992 enum _ecore_status_t rc = ECORE_SUCCESS;
1993 u32 drv_mb_param = 0, rsp;
1995 SET_FIELD(drv_mb_param, DRV_MB_PARAM_GPIO_NUMBER, gpio);
1997 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ,
1998 drv_mb_param, &rsp, gpio_val);
2000 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
2001 return ECORE_UNKNOWN_ERROR;
2003 return ECORE_SUCCESS;
2006 enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
2007 struct ecore_ptt *p_ptt,
2008 u16 gpio, u16 gpio_val)
2010 enum _ecore_status_t rc = ECORE_SUCCESS;
2011 u32 drv_mb_param = 0, param, rsp;
2013 SET_FIELD(drv_mb_param, DRV_MB_PARAM_GPIO_NUMBER, gpio);
2014 SET_FIELD(drv_mb_param, DRV_MB_PARAM_GPIO_VALUE, gpio_val);
2016 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE,
2017 drv_mb_param, &rsp, ¶m);
2019 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
2020 return ECORE_UNKNOWN_ERROR;
2022 return ECORE_SUCCESS;
2025 enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn,
2026 struct ecore_ptt *p_ptt,
2027 u16 gpio, u32 *gpio_direction,
2030 u32 drv_mb_param = 0, rsp, val = 0;
2031 enum _ecore_status_t rc = ECORE_SUCCESS;
2033 drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT;
2035 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO,
2036 drv_mb_param, &rsp, &val);
2037 if (rc != ECORE_SUCCESS)
2040 *gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >>
2041 DRV_MB_PARAM_GPIO_DIRECTION_SHIFT;
2042 *gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >>
2043 DRV_MB_PARAM_GPIO_CTRL_SHIFT;
2045 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
2046 return ECORE_UNKNOWN_ERROR;
2048 return ECORE_SUCCESS;
2051 enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn,
2052 struct ecore_ptt *p_ptt)
2054 u32 drv_mb_param = 0, rsp, param;
2055 enum _ecore_status_t rc = ECORE_SUCCESS;
2057 drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
2058 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2060 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2061 drv_mb_param, &rsp, ¶m);
2063 if (rc != ECORE_SUCCESS)
2066 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2067 (param != DRV_MB_PARAM_BIST_RC_PASSED))
2068 rc = ECORE_UNKNOWN_ERROR;
2073 enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn,
2074 struct ecore_ptt *p_ptt)
2076 u32 drv_mb_param = 0, rsp, param;
2077 enum _ecore_status_t rc = ECORE_SUCCESS;
2079 drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
2080 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2082 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2083 drv_mb_param, &rsp, ¶m);
2085 if (rc != ECORE_SUCCESS)
2088 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2089 (param != DRV_MB_PARAM_BIST_RC_PASSED))
2090 rc = ECORE_UNKNOWN_ERROR;
2095 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images(
2096 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *num_images)
2098 u32 drv_mb_param = 0, rsp;
2099 enum _ecore_status_t rc = ECORE_SUCCESS;
2101 drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
2102 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2104 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2105 drv_mb_param, &rsp, num_images);
2107 if (rc != ECORE_SUCCESS)
2110 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
2111 rc = ECORE_UNKNOWN_ERROR;
2116 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att(
2117 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2118 struct bist_nvm_image_att *p_image_att, u32 image_index)
2120 struct ecore_mcp_nvm_params params;
2121 enum _ecore_status_t rc;
2124 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2125 params.nvm_common.offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
2126 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2127 params.nvm_common.offset |= (image_index <<
2128 DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT);
2130 params.type = ECORE_MCP_NVM_RD;
2131 params.nvm_rd.buf_size = &buf_size;
2132 params.nvm_common.cmd = DRV_MSG_CODE_BIST_TEST;
2133 params.nvm_rd.buf = (u32 *)p_image_att;
2135 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2136 if (rc != ECORE_SUCCESS)
2139 if (((params.nvm_common.resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2140 (p_image_att->return_code != 1))
2141 rc = ECORE_UNKNOWN_ERROR;
2146 enum _ecore_status_t ecore_mcp_get_nvm_image(struct ecore_hwfn *p_hwfn,
2147 struct ecore_ptt *p_ptt,
2148 enum ecore_nvm_images image_id,
2149 char *p_buffer, u16 buffer_len)
2151 struct bist_nvm_image_att image_att;
2152 /* enum nvm_image_type type; */ /* @DPDK */
2155 enum _ecore_status_t rc;
2157 OSAL_MEM_ZERO(p_buffer, buffer_len);
2159 /* Translate image_id into MFW definitions */
2161 case ECORE_NVM_IMAGE_ISCSI_CFG:
2163 type = 0x1d; /* NVM_TYPE_ISCSI_CFG; */
2165 case ECORE_NVM_IMAGE_FCOE_CFG:
2166 type = 0x1f; /* NVM_TYPE_FCOE_CFG; */
2169 DP_NOTICE(p_hwfn, false, "Unknown request of image_id %08x\n",
2174 /* Learn number of images, then traverse and see if one fits */
2175 rc = ecore_mcp_bist_nvm_test_get_num_images(p_hwfn, p_ptt,
2177 if ((rc != ECORE_SUCCESS) || (!num_images))
2180 for (i = 0; i < num_images; i++) {
2181 rc = ecore_mcp_bist_nvm_test_get_image_att(p_hwfn, p_ptt,
2183 if (rc != ECORE_SUCCESS)
2186 if (type == image_att.image_type)
2189 if (i == num_images) {
2190 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
2191 "Failed to find nvram image of type %08x\n",
2196 /* Validate sizes - both the image's and the supplied buffer's */
2197 if (image_att.len <= 4) {
2198 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
2199 "Image [%d] is too small - only %d bytes\n",
2200 image_id, image_att.len);
2204 /* Each NVM image is suffixed by CRC; Upper-layer has no need for it */
2207 if (image_att.len > buffer_len) {
2208 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
2209 "Image [%d] is too big - %08x bytes where only %08x are available\n",
2210 image_id, image_att.len, buffer_len);
2214 return ecore_mcp_nvm_read(p_hwfn->p_dev, image_att.nvm_start_addr,
2215 (u8 *)p_buffer, image_att.len);
2218 enum _ecore_status_t
2219 ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn,
2220 struct ecore_ptt *p_ptt,
2221 struct ecore_temperature_info *p_temp_info)
2223 struct ecore_temperature_sensor *p_temp_sensor;
2224 struct temperature_status_stc *p_mfw_temp_info;
2225 struct ecore_mcp_mb_params mb_params;
2226 union drv_union_data union_data;
2228 enum _ecore_status_t rc;
2231 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2232 mb_params.cmd = DRV_MSG_CODE_GET_TEMPERATURE;
2233 mb_params.p_data_dst = &union_data;
2234 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2235 if (rc != ECORE_SUCCESS)
2238 p_mfw_temp_info = &union_data.temp_info;
2240 OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS != MAX_NUM_OF_SENSORS);
2241 p_temp_info->num_sensors = OSAL_MIN_T(u32,
2242 p_mfw_temp_info->num_of_sensors,
2243 ECORE_MAX_NUM_OF_SENSORS);
2244 for (i = 0; i < p_temp_info->num_sensors; i++) {
2245 val = p_mfw_temp_info->sensor[i];
2246 p_temp_sensor = &p_temp_info->sensors[i];
2247 p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >>
2248 SENSOR_LOCATION_SHIFT;
2249 p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >>
2250 THRESHOLD_HIGH_SHIFT;
2251 p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >>
2252 CRITICAL_TEMPERATURE_SHIFT;
2253 p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >>
2257 return ECORE_SUCCESS;
2260 enum _ecore_status_t ecore_mcp_get_mba_versions(
2261 struct ecore_hwfn *p_hwfn,
2262 struct ecore_ptt *p_ptt,
2263 struct ecore_mba_vers *p_mba_vers)
2265 struct ecore_mcp_nvm_params params;
2266 enum _ecore_status_t rc;
2269 OSAL_MEM_ZERO(¶ms, sizeof(params));
2270 params.type = ECORE_MCP_NVM_RD;
2271 params.nvm_common.cmd = DRV_MSG_CODE_GET_MBA_VERSION;
2272 params.nvm_common.offset = 0;
2273 params.nvm_rd.buf = &p_mba_vers->mba_vers[0];
2274 params.nvm_rd.buf_size = &buf_size;
2275 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2277 if (rc != ECORE_SUCCESS)
2280 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
2282 rc = ECORE_UNKNOWN_ERROR;
2284 if (buf_size != MCP_DRV_NVM_BUF_LEN)
2285 rc = ECORE_UNKNOWN_ERROR;
2290 enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
2291 struct ecore_ptt *p_ptt,
2296 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MEM_ECC_EVENTS,
2297 0, &rsp, (u32 *)num_events);
2300 #define ECORE_RESC_ALLOC_VERSION_MAJOR 1
2301 #define ECORE_RESC_ALLOC_VERSION_MINOR 0
2302 #define ECORE_RESC_ALLOC_VERSION \
2303 ((ECORE_RESC_ALLOC_VERSION_MAJOR << \
2304 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
2305 (ECORE_RESC_ALLOC_VERSION_MINOR << \
2306 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
2308 enum _ecore_status_t ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn,
2309 struct ecore_ptt *p_ptt,
2310 struct resource_info *p_resc_info,
2311 u32 *p_mcp_resp, u32 *p_mcp_param)
2313 struct ecore_mcp_mb_params mb_params;
2314 union drv_union_data *p_union_data;
2315 enum _ecore_status_t rc;
2317 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2318 mb_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
2319 mb_params.param = ECORE_RESC_ALLOC_VERSION;
2320 p_union_data = (union drv_union_data *)p_resc_info;
2321 mb_params.p_data_src = p_union_data;
2322 mb_params.p_data_dst = p_union_data;
2323 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2324 if (rc != ECORE_SUCCESS)
2327 *p_mcp_resp = mb_params.mcp_resp;
2328 *p_mcp_param = mb_params.mcp_param;
2330 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2331 "MFW resource_info: version 0x%x, res_id 0x%x, size 0x%x, offset 0x%x, vf_size 0x%x, vf_offset 0x%x, flags 0x%x\n",
2332 *p_mcp_param, p_resc_info->res_id, p_resc_info->size,
2333 p_resc_info->offset, p_resc_info->vf_size,
2334 p_resc_info->vf_offset, p_resc_info->flags);
2336 return ECORE_SUCCESS;