2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
11 #include "ecore_status.h"
12 #include "ecore_mcp.h"
13 #include "mcp_public.h"
16 #include "ecore_init_fw_funcs.h"
17 #include "ecore_sriov.h"
18 #include "ecore_iov_api.h"
19 #include "ecore_gtt_reg_addr.h"
20 #include "ecore_iro.h"
22 #define CHIP_MCP_RESP_ITER_US 10
23 #define EMUL_MCP_RESP_ITER_US (1000 * 1000)
25 #define ECORE_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
26 #define ECORE_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
28 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
29 ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
32 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
33 ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
35 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
36 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
37 OFFSETOF(struct public_drv_mb, _field), _val)
39 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
40 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
41 OFFSETOF(struct public_drv_mb, _field))
43 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
44 DRV_ID_PDA_COMP_VER_SHIFT)
46 #define MCP_BYTES_PER_MBIT_SHIFT 17
50 static int loaded_port[MAX_NUM_PORTS] = { 0 };
53 bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn)
55 if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
60 void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
62 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
64 u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr);
66 p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
68 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
69 "port_addr = 0x%x, port_id 0x%02x\n",
70 p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
73 void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
75 u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
80 if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev))
84 if (!p_hwfn->mcp_info->public_base)
87 for (i = 0; i < length; i++) {
88 tmp = ecore_rd(p_hwfn, p_ptt,
89 p_hwfn->mcp_info->mfw_mb_addr +
90 (i << 2) + sizeof(u32));
92 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
93 OSAL_BE32_TO_CPU(tmp);
97 enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
99 if (p_hwfn->mcp_info) {
100 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
101 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
102 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->lock);
104 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
105 p_hwfn->mcp_info = OSAL_NULL;
107 return ECORE_SUCCESS;
110 static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
111 struct ecore_ptt *p_ptt)
113 struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
114 u32 drv_mb_offsize, mfw_mb_offsize;
115 u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
118 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
119 DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n");
120 p_info->public_base = 0;
125 p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
126 if (!p_info->public_base)
129 p_info->public_base |= GRCBASE_MCP;
131 /* Calculate the driver and MFW mailbox address */
132 drv_mb_offsize = ecore_rd(p_hwfn, p_ptt,
133 SECTION_OFFSIZE_ADDR(p_info->public_base,
135 p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
136 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
137 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
138 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
140 /* Set the MFW MB address */
141 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
142 SECTION_OFFSIZE_ADDR(p_info->public_base,
144 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
145 p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
146 p_info->mfw_mb_addr);
148 /* Get the current driver mailbox sequence before sending
151 p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
152 DRV_MSG_SEQ_NUMBER_MASK;
154 /* Get current FW pulse sequence */
155 p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
158 p_info->mcp_hist = (u16)ecore_rd(p_hwfn, p_ptt,
159 MISCS_REG_GENERIC_POR_0);
161 return ECORE_SUCCESS;
164 enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
165 struct ecore_ptt *p_ptt)
167 struct ecore_mcp_info *p_info;
170 /* Allocate mcp_info structure */
171 p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
172 sizeof(*p_hwfn->mcp_info));
173 if (!p_hwfn->mcp_info)
175 p_info = p_hwfn->mcp_info;
177 if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
178 DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
179 /* Do not free mcp_info here, since public_base indicate that
180 * the MCP is not initialized
182 return ECORE_SUCCESS;
185 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
186 p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
187 p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
188 if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
191 /* Initialize the MFW spinlock */
192 OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->lock);
193 OSAL_SPIN_LOCK_INIT(&p_info->lock);
195 return ECORE_SUCCESS;
198 DP_NOTICE(p_hwfn, true, "Failed to allocate mcp memory\n");
199 ecore_mcp_free(p_hwfn);
203 enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
204 struct ecore_ptt *p_ptt)
206 u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
207 u32 delay = CHIP_MCP_RESP_ITER_US;
208 u32 org_mcp_reset_seq, cnt = 0;
209 enum _ecore_status_t rc = ECORE_SUCCESS;
212 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
213 delay = EMUL_MCP_RESP_ITER_US;
216 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
218 /* Set drv command along with the updated sequence */
219 org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
220 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
223 /* Wait for MFW response */
225 /* Give the FW up to 500 second (50*1000*10usec) */
226 } while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt,
227 MISCS_REG_GENERIC_POR_0)) &&
228 (cnt++ < ECORE_MCP_RESET_RETRIES));
230 if (org_mcp_reset_seq !=
231 ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
232 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
233 "MCP was reset after %d usec\n", cnt * delay);
235 DP_ERR(p_hwfn, "Failed to reset MCP\n");
239 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
244 /* Should be called while the dedicated spinlock is acquired */
245 static enum _ecore_status_t ecore_do_mcp_cmd(struct ecore_hwfn *p_hwfn,
246 struct ecore_ptt *p_ptt,
251 u32 delay = CHIP_MCP_RESP_ITER_US;
252 u32 seq, cnt = 1, actual_mb_seq;
253 enum _ecore_status_t rc = ECORE_SUCCESS;
256 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
257 delay = EMUL_MCP_RESP_ITER_US;
260 /* Get actual driver mailbox sequence */
261 actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
262 DRV_MSG_SEQ_NUMBER_MASK;
264 /* Use MCP history register to check if MCP reset occurred between
267 if (p_hwfn->mcp_info->mcp_hist !=
268 ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
269 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Rereading MCP offsets\n");
270 ecore_load_mcp_offsets(p_hwfn, p_ptt);
271 ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
273 seq = ++p_hwfn->mcp_info->drv_mb_seq;
276 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
278 /* Set drv command along with the updated sequence */
279 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
281 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
282 "wrote command (%x) to MFW MB param 0x%08x\n",
286 /* Wait for MFW response */
288 *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
290 /* Give the FW up to 5 second (500*10ms) */
291 } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
292 (cnt++ < ECORE_DRV_MB_MAX_RETRIES));
294 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
295 "[after %d ms] read (%x) seq is (%x) from FW MB\n",
296 cnt * delay, *o_mcp_resp, seq);
298 /* Is this a reply to our command? */
299 if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
300 *o_mcp_resp &= FW_MSG_CODE_MASK;
301 /* Get the MCP param */
302 *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
305 DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
309 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
314 enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
315 struct ecore_ptt *p_ptt, u32 cmd, u32 param,
316 u32 *o_mcp_resp, u32 *o_mcp_param)
319 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
320 if (cmd == DRV_MSG_CODE_UNLOAD_REQ) {
322 loaded_port[p_hwfn->port_id]--;
323 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n",
326 return ECORE_SUCCESS;
330 return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, cmd, param, OSAL_NULL,
331 o_mcp_resp, o_mcp_param);
334 enum _ecore_status_t ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
335 struct ecore_ptt *p_ptt,
337 union drv_union_data *p_union_data,
342 enum _ecore_status_t rc;
344 /* MCP not initialized */
345 if (!ecore_mcp_is_init(p_hwfn)) {
346 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
350 /* Acquiring a spinlock is needed to ensure that only a single thread
351 * is accessing the mailbox at a certain time.
353 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
355 if (p_union_data != OSAL_NULL) {
356 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
357 OFFSETOF(struct public_drv_mb, union_data);
358 ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, p_union_data,
359 sizeof(*p_union_data));
362 rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, cmd, param, o_mcp_resp,
365 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
370 enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
371 struct ecore_ptt *p_ptt,
376 u32 i_txn_size, u32 *i_buf)
378 union drv_union_data union_data;
380 OSAL_MEMCPY((u32 *)&union_data.raw_data, i_buf, i_txn_size);
382 return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, cmd, param, &union_data,
383 o_mcp_resp, o_mcp_param);
386 enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
387 struct ecore_ptt *p_ptt,
392 u32 *o_txn_size, u32 *o_buf)
394 enum _ecore_status_t rc;
397 /* MCP not initialized */
398 if (!ecore_mcp_is_init(p_hwfn)) {
399 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
403 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
404 rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, cmd, param, o_mcp_resp,
406 if (rc != ECORE_SUCCESS)
409 /* Get payload after operation completes successfully */
410 *o_txn_size = *o_mcp_param;
411 for (i = 0; i < *o_txn_size; i += 4)
412 o_buf[i / sizeof(u32)] = DRV_MB_RD(p_hwfn, p_ptt,
413 union_data.raw_data[i]);
416 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
421 static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
424 static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
427 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
428 else if (!loaded_port[p_hwfn->port_id])
429 load_phase = FW_MSG_CODE_DRV_LOAD_PORT;
431 load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
433 /* On CMT, always tell that it's engine */
434 if (p_hwfn->p_dev->num_hwfns > 1)
435 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
437 *p_load_code = load_phase;
439 loaded_port[p_hwfn->port_id]++;
441 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
442 "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n",
443 *p_load_code, loaded, p_hwfn->port_id,
444 loaded_port[p_hwfn->port_id]);
448 enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
449 struct ecore_ptt *p_ptt,
452 struct ecore_dev *p_dev = p_hwfn->p_dev;
453 union drv_union_data union_data;
455 enum _ecore_status_t rc;
458 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
459 ecore_mcp_mf_workaround(p_hwfn, p_load_code);
460 return ECORE_SUCCESS;
464 OSAL_MEMCPY(&union_data.ver_str, p_dev->ver_str, MCP_DRV_VER_STR_SIZE);
466 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_REQ,
467 (PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
469 &union_data, p_load_code, ¶m);
471 /* if mcp fails to respond we must abort */
472 if (rc != ECORE_SUCCESS) {
473 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
477 /* If MFW refused (e.g. other port is in diagnostic mode) we
478 * must abort. This can happen in the following cases:
479 * - Other port is in diagnostic mode
480 * - Previously loaded function on the engine is not compliant with
482 * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION.
485 if (!(*p_load_code) ||
486 ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) ||
487 ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) ||
488 ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) {
489 DP_ERR(p_hwfn, "MCP refused load request, aborting\n");
493 return ECORE_SUCCESS;
496 static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
497 struct ecore_ptt *p_ptt)
499 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
501 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
502 u32 path_addr = SECTION_ADDR(mfw_path_offsize,
503 ECORE_PATH_ID(p_hwfn));
504 u32 disabled_vfs[VF_MAX_STATIC / 32];
507 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
508 "Reading Disabled VF information from [offset %08x],"
510 mfw_path_offsize, path_addr);
512 for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
513 disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,
515 OFFSETOF(struct public_path,
518 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
519 "FLR-ed VFs [%08x,...,%08x] - %08x\n",
520 i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
523 if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs))
524 OSAL_VF_FLR_UPDATE(p_hwfn);
527 enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
528 struct ecore_ptt *p_ptt,
531 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
533 u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);
534 u32 func_addr = SECTION_ADDR(mfw_func_offsize,
536 union drv_union_data union_data;
538 enum _ecore_status_t rc;
541 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
542 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
543 "Acking VFs [%08x,...,%08x] - %08x\n",
544 i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
546 OSAL_MEMCPY(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8);
548 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt,
549 DRV_MSG_CODE_VF_DISABLED_DONE, 0,
550 &union_data, &resp, ¶m);
551 if (rc != ECORE_SUCCESS) {
552 DP_NOTICE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
553 "Failed to pass ACK for VF flr to MFW\n");
554 return ECORE_TIMEOUT;
557 /* TMP - clear the ACK bits; should be done by MFW */
558 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
559 ecore_wr(p_hwfn, p_ptt,
561 OFFSETOF(struct public_func, drv_ack_vf_disabled) +
567 static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
568 struct ecore_ptt *p_ptt)
570 u32 transceiver_state;
572 transceiver_state = ecore_rd(p_hwfn, p_ptt,
573 p_hwfn->mcp_info->port_addr +
574 OFFSETOF(struct public_port,
577 DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP),
578 "Received transceiver state update [0x%08x] from mfw"
580 transceiver_state, (u32)(p_hwfn->mcp_info->port_addr +
581 OFFSETOF(struct public_port,
584 transceiver_state = GET_FIELD(transceiver_state, PMM_TRANSCEIVER_STATE);
586 if (transceiver_state == PMM_TRANSCEIVER_STATE_PRESENT)
587 DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
589 DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
592 static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
593 struct ecore_ptt *p_ptt, bool b_reset)
595 struct ecore_mcp_link_state *p_link;
598 p_link = &p_hwfn->mcp_info->link_output;
599 OSAL_MEMSET(p_link, 0, sizeof(*p_link));
601 status = ecore_rd(p_hwfn, p_ptt,
602 p_hwfn->mcp_info->port_addr +
603 OFFSETOF(struct public_port, link_status));
604 DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP),
605 "Received link update [0x%08x] from mfw"
607 status, (u32)(p_hwfn->mcp_info->port_addr +
608 OFFSETOF(struct public_port,
611 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
612 "Resetting link indications\n");
616 if (p_hwfn->b_drv_link_init)
617 p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
619 p_link->link_up = false;
621 p_link->full_duplex = true;
622 switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
623 case LINK_STATUS_SPEED_AND_DUPLEX_100G:
624 p_link->speed = 100000;
626 case LINK_STATUS_SPEED_AND_DUPLEX_50G:
627 p_link->speed = 50000;
629 case LINK_STATUS_SPEED_AND_DUPLEX_40G:
630 p_link->speed = 40000;
632 case LINK_STATUS_SPEED_AND_DUPLEX_25G:
633 p_link->speed = 25000;
635 case LINK_STATUS_SPEED_AND_DUPLEX_20G:
636 p_link->speed = 20000;
638 case LINK_STATUS_SPEED_AND_DUPLEX_10G:
639 p_link->speed = 10000;
641 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
642 p_link->full_duplex = false;
644 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
645 p_link->speed = 1000;
651 /* We never store total line speed as p_link->speed is
652 * again changes according to bandwidth allocation.
654 if (p_link->link_up && p_link->speed)
655 p_link->line_speed = p_link->speed;
657 p_link->line_speed = 0;
659 /* Correct speed according to bandwidth allocation */
660 if (p_hwfn->mcp_info->func_info.bandwidth_max && p_link->speed) {
661 u8 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
663 __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
667 if (p_hwfn->mcp_info->func_info.bandwidth_min && p_link->speed) {
668 u8 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
670 __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
673 ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev,
674 p_link->min_pf_rate);
677 p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
678 p_link->an_complete = !!(status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
679 p_link->parallel_detection = !!(status &
680 LINK_STATUS_PARALLEL_DETECTION_USED);
681 p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
683 p_link->partner_adv_speed |=
684 (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
685 ECORE_LINK_PARTNER_SPEED_1G_FD : 0;
686 p_link->partner_adv_speed |=
687 (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
688 ECORE_LINK_PARTNER_SPEED_1G_HD : 0;
689 p_link->partner_adv_speed |=
690 (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
691 ECORE_LINK_PARTNER_SPEED_10G : 0;
692 p_link->partner_adv_speed |=
693 (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
694 ECORE_LINK_PARTNER_SPEED_20G : 0;
695 p_link->partner_adv_speed |=
696 (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
697 ECORE_LINK_PARTNER_SPEED_25G : 0;
698 p_link->partner_adv_speed |=
699 (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
700 ECORE_LINK_PARTNER_SPEED_40G : 0;
701 p_link->partner_adv_speed |=
702 (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
703 ECORE_LINK_PARTNER_SPEED_50G : 0;
704 p_link->partner_adv_speed |=
705 (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
706 ECORE_LINK_PARTNER_SPEED_100G : 0;
708 p_link->partner_tx_flow_ctrl_en =
709 !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
710 p_link->partner_rx_flow_ctrl_en =
711 !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
713 switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
714 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
715 p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE;
717 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
718 p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE;
720 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
721 p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE;
724 p_link->partner_adv_pause = 0;
727 p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
729 OSAL_LINK_UPDATE(p_hwfn);
732 enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
733 struct ecore_ptt *p_ptt, bool b_up)
735 struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
736 union drv_union_data union_data;
737 struct pmm_phy_cfg *p_phy_cfg;
738 u32 param = 0, reply = 0, cmd;
739 enum _ecore_status_t rc = ECORE_SUCCESS;
742 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
743 return ECORE_SUCCESS;
746 /* Set the shmem configuration according to params */
747 p_phy_cfg = &union_data.drv_phy_cfg;
748 OSAL_MEMSET(p_phy_cfg, 0, sizeof(*p_phy_cfg));
749 cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
750 if (!params->speed.autoneg)
751 p_phy_cfg->speed = params->speed.forced_speed;
752 p_phy_cfg->pause |= (params->pause.autoneg) ? PMM_PAUSE_AUTONEG : 0;
753 p_phy_cfg->pause |= (params->pause.forced_rx) ? PMM_PAUSE_RX : 0;
754 p_phy_cfg->pause |= (params->pause.forced_tx) ? PMM_PAUSE_TX : 0;
755 p_phy_cfg->adv_speed = params->speed.advertised_speeds;
756 p_phy_cfg->loopback_mode = params->loopback_mode;
759 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
761 "Link on FPGA - Ask for loopback mode '5' at 10G\n");
762 p_phy_cfg->loopback_mode = 5;
763 p_phy_cfg->speed = 10000;
767 p_hwfn->b_drv_link_init = b_up;
770 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
771 "Configuring Link: Speed 0x%08x, Pause 0x%08x,"
772 " adv_speed 0x%08x, loopback 0x%08x,"
773 " features 0x%08x\n",
774 p_phy_cfg->speed, p_phy_cfg->pause,
775 p_phy_cfg->adv_speed, p_phy_cfg->loopback_mode,
776 p_phy_cfg->feature_config_flags);
778 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
780 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, cmd, 0, &union_data, &reply,
783 /* if mcp fails to respond we must abort */
784 if (rc != ECORE_SUCCESS) {
785 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
789 /* Reset the link status if needed */
791 ecore_mcp_handle_link_change(p_hwfn, p_ptt, true);
796 u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
797 struct ecore_ptt *p_ptt)
799 u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
801 /* TODO - Add support for VFs */
802 if (IS_VF(p_hwfn->p_dev))
805 path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
807 path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr);
808 path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn));
810 proc_kill_cnt = ecore_rd(p_hwfn, p_ptt,
812 OFFSETOF(struct public_path, process_kill)) &
813 PROCESS_KILL_COUNTER_MASK;
815 return proc_kill_cnt;
818 static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn,
819 struct ecore_ptt *p_ptt)
821 struct ecore_dev *p_dev = p_hwfn->p_dev;
824 /* Prevent possible attentions/interrupts during the recovery handling
825 * and till its load phase, during which they will be re-enabled.
827 ecore_int_igu_disable_int(p_hwfn, p_ptt);
829 DP_NOTICE(p_hwfn, false, "Received a process kill indication\n");
831 /* The following operations should be done once, and thus in CMT mode
832 * are carried out by only the first HW function.
834 if (p_hwfn != ECORE_LEADING_HWFN(p_dev))
837 if (p_dev->recov_in_prog) {
838 DP_NOTICE(p_hwfn, false,
839 "Ignoring the indication since a recovery"
840 " process is already in progress\n");
844 p_dev->recov_in_prog = true;
846 proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt);
847 DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt);
849 OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn);
852 static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
853 struct ecore_ptt *p_ptt,
854 enum MFW_DRV_MSG_TYPE type)
856 enum ecore_mcp_protocol_type stats_type;
857 union ecore_mcp_protocol_stats stats;
858 u32 hsi_param, param = 0, reply = 0;
859 union drv_union_data union_data;
862 case MFW_DRV_MSG_GET_LAN_STATS:
863 stats_type = ECORE_MCP_LAN_STATS;
864 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
867 DP_NOTICE(p_hwfn, false, "Invalid protocol type %d\n", type);
871 OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats);
873 OSAL_MEMCPY(&union_data, &stats, sizeof(stats));
875 ecore_mcp_cmd_and_union(p_hwfn, p_ptt, DRV_MSG_CODE_GET_STATS,
876 hsi_param, &union_data, &reply, ¶m);
879 static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
880 struct ecore_ptt *p_ptt,
881 struct public_func *p_data, int pfid)
883 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
885 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
886 u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
889 OSAL_MEM_ZERO(p_data, sizeof(*p_data));
891 size = OSAL_MIN_T(u32, sizeof(*p_data), SECTION_SIZE(mfw_path_offsize));
892 for (i = 0; i < size / sizeof(u32); i++)
893 ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
894 func_addr + (i << 2));
900 ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
901 struct public_func *p_shmem_info)
903 struct ecore_mcp_function_info *p_info;
905 p_info = &p_hwfn->mcp_info->func_info;
907 /* TODO - bandwidth min/max should have valid values of 1-100,
908 * as well as some indication that the feature is disabled.
909 * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS
910 * limit and correct value to min `1' and max `100' if limit isn't in
913 p_info->bandwidth_min = (p_shmem_info->config &
914 FUNC_MF_CFG_MIN_BW_MASK) >>
915 FUNC_MF_CFG_MIN_BW_SHIFT;
916 if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
918 "bandwidth minimum out of bounds [%02x]. Set to 1\n",
919 p_info->bandwidth_min);
920 p_info->bandwidth_min = 1;
923 p_info->bandwidth_max = (p_shmem_info->config &
924 FUNC_MF_CFG_MAX_BW_MASK) >>
925 FUNC_MF_CFG_MAX_BW_SHIFT;
926 if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
928 "bandwidth maximum out of bounds [%02x]. Set to 100\n",
929 p_info->bandwidth_max);
930 p_info->bandwidth_max = 100;
935 ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
937 struct ecore_mcp_function_info *p_info;
938 struct public_func shmem_info;
939 u32 resp = 0, param = 0;
941 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
943 ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
945 p_info = &p_hwfn->mcp_info->func_info;
947 ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min);
949 ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max);
951 /* Acknowledge the MFW */
952 ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
956 static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn,
957 struct ecore_ptt *p_ptt)
959 /* A single notification should be sent to upper driver in CMT mode */
960 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
963 DP_NOTICE(p_hwfn, false,
964 "Fan failure was detected on the network interface card"
965 " and it's going to be shut down.\n");
967 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
970 enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
971 struct ecore_ptt *p_ptt)
973 struct ecore_mcp_info *info = p_hwfn->mcp_info;
974 enum _ecore_status_t rc = ECORE_SUCCESS;
978 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n");
980 /* Read Messages from MFW */
981 ecore_mcp_read_mb(p_hwfn, p_ptt);
983 /* Compare current messages to old ones */
984 for (i = 0; i < info->mfw_mb_length; i++) {
985 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
990 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
991 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
992 i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
995 case MFW_DRV_MSG_LINK_CHANGE:
996 ecore_mcp_handle_link_change(p_hwfn, p_ptt, false);
998 case MFW_DRV_MSG_VF_DISABLED:
999 ecore_mcp_handle_vf_flr(p_hwfn, p_ptt);
1001 case MFW_DRV_MSG_ERROR_RECOVERY:
1002 ecore_mcp_handle_process_kill(p_hwfn, p_ptt);
1004 case MFW_DRV_MSG_GET_LAN_STATS:
1005 case MFW_DRV_MSG_GET_FCOE_STATS:
1006 case MFW_DRV_MSG_GET_ISCSI_STATS:
1007 case MFW_DRV_MSG_GET_RDMA_STATS:
1008 ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
1010 case MFW_DRV_MSG_BW_UPDATE:
1011 ecore_mcp_update_bw(p_hwfn, p_ptt);
1013 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
1014 ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
1016 case MFW_DRV_MSG_FAILURE_DETECTED:
1017 ecore_mcp_handle_fan_failure(p_hwfn, p_ptt);
1021 DP_NOTICE(p_hwfn, false,
1022 "Unimplemented MFW message %d\n", i);
1027 /* ACK everything */
1028 for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
1029 OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]);
1031 /* MFW expect answer in BE, so we force write in that format */
1032 ecore_wr(p_hwfn, p_ptt,
1033 info->mfw_mb_addr + sizeof(u32) +
1034 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
1035 sizeof(u32) + i * sizeof(u32), val);
1039 DP_NOTICE(p_hwfn, false,
1040 "Received an MFW message indication but no"
1045 /* Copy the new mfw messages into the shadow */
1046 OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
1051 enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_dev *p_dev,
1052 struct ecore_ptt *p_ptt,
1054 u32 *p_running_bundle_id)
1056 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1060 if (CHIP_REV_IS_EMUL(p_dev)) {
1061 DP_NOTICE(p_dev, false, "Emulation - can't get MFW version\n");
1062 return ECORE_SUCCESS;
1067 if (p_hwfn->vf_iov_info) {
1068 struct pfvf_acquire_resp_tlv *p_resp;
1070 p_resp = &p_hwfn->vf_iov_info->acquire_resp;
1071 *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
1072 return ECORE_SUCCESS;
1075 DP_VERBOSE(p_dev, ECORE_MSG_IOV,
1076 "VF requested MFW vers prior to ACQUIRE\n");
1080 global_offsize = ecore_rd(p_hwfn, p_ptt,
1081 SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
1085 ecore_rd(p_hwfn, p_ptt,
1086 SECTION_ADDR(global_offsize,
1087 0) + OFFSETOF(struct public_global, mfw_ver));
1089 if (p_running_bundle_id != OSAL_NULL) {
1090 *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt,
1091 SECTION_ADDR(global_offsize,
1093 OFFSETOF(struct public_global,
1094 running_bundle_id));
1097 return ECORE_SUCCESS;
1100 enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
1103 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[0];
1104 struct ecore_ptt *p_ptt;
1106 /* TODO - Add support for VFs */
1110 if (!ecore_mcp_is_init(p_hwfn)) {
1111 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
1115 *p_media_type = MEDIA_UNSPECIFIED;
1117 p_ptt = ecore_ptt_acquire(p_hwfn);
1121 *p_media_type = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1122 OFFSETOF(struct public_port, media_type));
1124 ecore_ptt_release(p_hwfn, p_ptt);
1126 return ECORE_SUCCESS;
1129 static enum _ecore_status_t
1130 ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn,
1131 struct public_func *p_info,
1132 enum ecore_pci_personality *p_proto)
1134 enum _ecore_status_t rc = ECORE_SUCCESS;
1136 switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
1137 case FUNC_MF_CFG_PROTOCOL_ETHERNET:
1138 *p_proto = ECORE_PCI_ETH;
1147 enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
1148 struct ecore_ptt *p_ptt)
1150 struct ecore_mcp_function_info *info;
1151 struct public_func shmem_info;
1153 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1154 info = &p_hwfn->mcp_info->func_info;
1156 info->pause_on_host = (shmem_info.config &
1157 FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
1159 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, &info->protocol)) {
1160 DP_ERR(p_hwfn, "Unknown personality %08x\n",
1161 (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
1165 ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1167 if (shmem_info.mac_upper || shmem_info.mac_lower) {
1168 info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
1169 info->mac[1] = (u8)(shmem_info.mac_upper);
1170 info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
1171 info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
1172 info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
1173 info->mac[5] = (u8)(shmem_info.mac_lower);
1175 /* TODO - are there protocols for which there's no MAC? */
1176 DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n");
1179 info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
1181 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP),
1182 "Read configuration from shmem: pause_on_host %02x"
1183 " protocol %02x BW [%02x - %02x]"
1184 " MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %lx"
1185 " node %lx ovlan %04x\n",
1186 info->pause_on_host, info->protocol,
1187 info->bandwidth_min, info->bandwidth_max,
1188 info->mac[0], info->mac[1], info->mac[2],
1189 info->mac[3], info->mac[4], info->mac[5],
1190 info->wwn_port, info->wwn_node, info->ovlan);
1192 return ECORE_SUCCESS;
1195 struct ecore_mcp_link_params
1196 *ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn)
1198 if (!p_hwfn || !p_hwfn->mcp_info)
1200 return &p_hwfn->mcp_info->link_input;
1203 struct ecore_mcp_link_state
1204 *ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn)
1206 if (!p_hwfn || !p_hwfn->mcp_info)
1210 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
1211 DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n");
1212 p_hwfn->mcp_info->link_output.link_up = true;
1216 return &p_hwfn->mcp_info->link_output;
1219 struct ecore_mcp_link_capabilities
1220 *ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn)
1222 if (!p_hwfn || !p_hwfn->mcp_info)
1224 return &p_hwfn->mcp_info->link_capabilities;
1227 enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
1228 struct ecore_ptt *p_ptt)
1230 enum _ecore_status_t rc;
1231 u32 resp = 0, param = 0;
1233 rc = ecore_mcp_cmd(p_hwfn, p_ptt,
1234 DRV_MSG_CODE_NIG_DRAIN, 100, &resp, ¶m);
1236 /* Wait for the drain to complete before returning */
1242 const struct ecore_mcp_function_info
1243 *ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn)
1245 if (!p_hwfn || !p_hwfn->mcp_info)
1247 return &p_hwfn->mcp_info->func_info;
1250 enum _ecore_status_t ecore_mcp_nvm_command(struct ecore_hwfn *p_hwfn,
1251 struct ecore_ptt *p_ptt,
1252 struct ecore_mcp_nvm_params *params)
1254 enum _ecore_status_t rc;
1256 switch (params->type) {
1257 case ECORE_MCP_NVM_RD:
1258 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1259 params->nvm_common.offset,
1260 ¶ms->nvm_common.resp,
1261 ¶ms->nvm_common.param,
1262 params->nvm_rd.buf_size,
1263 params->nvm_rd.buf);
1266 rc = ecore_mcp_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1267 params->nvm_common.offset,
1268 ¶ms->nvm_common.resp,
1269 ¶ms->nvm_common.param);
1271 case ECORE_MCP_NVM_WR:
1272 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1273 params->nvm_common.offset,
1274 ¶ms->nvm_common.resp,
1275 ¶ms->nvm_common.param,
1276 params->nvm_wr.buf_size,
1277 params->nvm_wr.buf);
1286 int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
1287 struct ecore_ptt *p_ptt, u32 personalities)
1289 enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT;
1290 struct public_func shmem_info;
1291 int i, count = 0, num_pfs;
1293 num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
1295 for (i = 0; i < num_pfs; i++) {
1296 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1297 MCP_PF_ID_BY_REL(p_hwfn, i));
1298 if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
1301 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info,
1302 &protocol) != ECORE_SUCCESS)
1305 if ((1 << ((u32)protocol)) & personalities)
1312 enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
1313 struct ecore_ptt *p_ptt,
1319 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1320 DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n");
1325 if (IS_VF(p_hwfn->p_dev))
1328 flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
1329 flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
1330 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
1331 flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
1333 *p_flash_size = flash_size;
1335 return ECORE_SUCCESS;
1338 enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
1339 struct ecore_ptt *p_ptt)
1341 struct ecore_dev *p_dev = p_hwfn->p_dev;
1343 if (p_dev->recov_in_prog) {
1344 DP_NOTICE(p_hwfn, false,
1345 "Avoid triggering a recovery since such a process"
1346 " is already in progress\n");
1350 DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n");
1351 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
1353 return ECORE_SUCCESS;
1356 enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
1357 struct ecore_ptt *p_ptt,
1360 u32 resp = 0, param = 0, rc_param = 0;
1361 enum _ecore_status_t rc;
1363 param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
1364 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
1365 param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
1366 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
1368 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
1371 if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
1372 DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n",
1380 enum _ecore_status_t
1381 ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1382 struct ecore_mcp_drv_version *p_ver)
1384 u32 param = 0, reply = 0, num_words, i;
1385 struct drv_version_stc *p_drv_version;
1386 union drv_union_data union_data;
1389 enum _ecore_status_t rc;
1392 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
1393 return ECORE_SUCCESS;
1396 p_drv_version = &union_data.drv_version;
1397 p_drv_version->version = p_ver->version;
1398 num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4;
1399 for (i = 0; i < num_words; i++) {
1400 p_name = &p_ver->name[i * sizeof(u32)];
1401 val = OSAL_CPU_TO_BE32(*(u32 *)p_name);
1402 *(u32 *)&p_drv_version->name[i * sizeof(u32)] = val;
1405 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, DRV_MSG_CODE_SET_VERSION, 0,
1406 &union_data, &reply, ¶m);
1407 if (rc != ECORE_SUCCESS)
1408 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1413 enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
1414 struct ecore_ptt *p_ptt)
1416 enum _ecore_status_t rc;
1417 u32 resp = 0, param = 0;
1419 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
1421 if (rc != ECORE_SUCCESS)
1422 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1427 enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
1428 struct ecore_ptt *p_ptt)
1430 u32 value, cpu_mode;
1432 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
1434 value = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
1435 value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
1436 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
1437 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
1439 return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -1 : 0;
1442 enum _ecore_status_t
1443 ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
1444 struct ecore_ptt *p_ptt,
1445 enum ecore_ov_config_method config,
1446 enum ecore_ov_client client)
1448 enum _ecore_status_t rc;
1449 u32 resp = 0, param = 0;
1453 case ECORE_OV_CLIENT_DRV:
1454 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
1456 case ECORE_OV_CLIENT_USER:
1457 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
1460 DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", config);
1464 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
1465 drv_mb_param, &resp, ¶m);
1466 if (rc != ECORE_SUCCESS)
1467 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1472 enum _ecore_status_t
1473 ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
1474 struct ecore_ptt *p_ptt,
1475 enum ecore_ov_driver_state drv_state)
1477 enum _ecore_status_t rc;
1478 u32 resp = 0, param = 0;
1481 switch (drv_state) {
1482 case ECORE_OV_DRIVER_STATE_NOT_LOADED:
1483 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
1485 case ECORE_OV_DRIVER_STATE_DISABLED:
1486 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
1488 case ECORE_OV_DRIVER_STATE_ACTIVE:
1489 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
1492 DP_NOTICE(p_hwfn, true, "Invalid driver state %d\n", drv_state);
1496 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
1497 drv_state, &resp, ¶m);
1498 if (rc != ECORE_SUCCESS)
1499 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1504 enum _ecore_status_t
1505 ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1506 struct ecore_fc_npiv_tbl *p_table)
1511 enum _ecore_status_t
1512 ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn,
1513 struct ecore_ptt *p_ptt, u16 mtu)
1518 enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
1519 struct ecore_ptt *p_ptt,
1520 enum ecore_led_mode mode)
1522 u32 resp = 0, param = 0, drv_mb_param;
1523 enum _ecore_status_t rc;
1526 case ECORE_LED_MODE_ON:
1527 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
1529 case ECORE_LED_MODE_OFF:
1530 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
1532 case ECORE_LED_MODE_RESTORE:
1533 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
1536 DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode);
1540 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
1541 drv_mb_param, &resp, ¶m);
1542 if (rc != ECORE_SUCCESS)
1543 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1548 enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
1549 struct ecore_ptt *p_ptt,
1552 enum _ecore_status_t rc;
1553 u32 resp = 0, param = 0;
1555 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
1556 mask_parities, &resp, ¶m);
1558 if (rc != ECORE_SUCCESS) {
1560 "MCP response failure for mask parities, aborting\n");
1561 } else if (resp != FW_MSG_CODE_OK) {
1563 "MCP did not ack mask parity request. Old MFW?\n");
1570 enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
1573 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1574 u32 bytes_left, offset, bytes_to_copy, buf_size;
1575 struct ecore_mcp_nvm_params params;
1576 struct ecore_ptt *p_ptt;
1577 enum _ecore_status_t rc = ECORE_SUCCESS;
1579 p_ptt = ecore_ptt_acquire(p_hwfn);
1583 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
1586 params.type = ECORE_MCP_NVM_RD;
1587 params.nvm_rd.buf_size = &buf_size;
1588 params.nvm_common.cmd = DRV_MSG_CODE_NVM_READ_NVRAM;
1589 while (bytes_left > 0) {
1590 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
1591 MCP_DRV_NVM_BUF_LEN);
1592 params.nvm_common.offset = (addr + offset) |
1593 (bytes_to_copy << DRV_MB_PARAM_NVM_LEN_SHIFT);
1594 params.nvm_rd.buf = (u32 *)(p_buf + offset);
1595 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
1596 if (rc != ECORE_SUCCESS || (params.nvm_common.resp !=
1597 FW_MSG_CODE_NVM_OK)) {
1598 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
1601 offset += *params.nvm_rd.buf_size;
1602 bytes_left -= *params.nvm_rd.buf_size;
1605 p_dev->mcp_nvm_resp = params.nvm_common.resp;
1606 ecore_ptt_release(p_hwfn, p_ptt);
1611 enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
1612 u32 addr, u8 *p_buf, u32 len)
1614 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1615 struct ecore_mcp_nvm_params params;
1616 struct ecore_ptt *p_ptt;
1617 enum _ecore_status_t rc;
1619 p_ptt = ecore_ptt_acquire(p_hwfn);
1623 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
1624 params.type = ECORE_MCP_NVM_RD;
1625 params.nvm_rd.buf_size = &len;
1626 params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_READ) ?
1627 DRV_MSG_CODE_PHY_CORE_READ : DRV_MSG_CODE_PHY_RAW_READ;
1628 params.nvm_common.offset = addr;
1629 params.nvm_rd.buf = (u32 *)p_buf;
1630 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
1631 if (rc != ECORE_SUCCESS)
1632 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
1634 p_dev->mcp_nvm_resp = params.nvm_common.resp;
1635 ecore_ptt_release(p_hwfn, p_ptt);
1640 enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
1642 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1643 struct ecore_mcp_nvm_params params;
1644 struct ecore_ptt *p_ptt;
1646 p_ptt = ecore_ptt_acquire(p_hwfn);
1650 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
1651 OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp));
1652 ecore_ptt_release(p_hwfn, p_ptt);
1654 return ECORE_SUCCESS;
1657 enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr)
1659 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1660 struct ecore_mcp_nvm_params params;
1661 struct ecore_ptt *p_ptt;
1662 enum _ecore_status_t rc;
1664 p_ptt = ecore_ptt_acquire(p_hwfn);
1667 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
1668 params.type = ECORE_MCP_CMD;
1669 params.nvm_common.cmd = DRV_MSG_CODE_NVM_DEL_FILE;
1670 params.nvm_common.offset = addr;
1671 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
1672 p_dev->mcp_nvm_resp = params.nvm_common.resp;
1673 ecore_ptt_release(p_hwfn, p_ptt);
1678 enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
1681 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1682 struct ecore_mcp_nvm_params params;
1683 struct ecore_ptt *p_ptt;
1684 enum _ecore_status_t rc;
1686 p_ptt = ecore_ptt_acquire(p_hwfn);
1689 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
1690 params.type = ECORE_MCP_CMD;
1691 params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN;
1692 params.nvm_common.offset = addr;
1693 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
1694 p_dev->mcp_nvm_resp = params.nvm_common.resp;
1695 ecore_ptt_release(p_hwfn, p_ptt);
1700 /* rc receives ECORE_INVAL as default parameter because
1701 * it might not enter the while loop if the len is 0
1703 enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
1704 u32 addr, u8 *p_buf, u32 len)
1706 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1707 enum _ecore_status_t rc = ECORE_INVAL;
1708 struct ecore_mcp_nvm_params params;
1709 struct ecore_ptt *p_ptt;
1710 u32 buf_idx, buf_size;
1712 p_ptt = ecore_ptt_acquire(p_hwfn);
1716 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
1717 params.type = ECORE_MCP_NVM_WR;
1718 if (cmd == ECORE_PUT_FILE_DATA)
1719 params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
1721 params.nvm_common.cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
1723 while (buf_idx < len) {
1724 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
1725 MCP_DRV_NVM_BUF_LEN);
1726 params.nvm_common.offset = ((buf_size <<
1727 DRV_MB_PARAM_NVM_LEN_SHIFT)
1729 params.nvm_wr.buf_size = buf_size;
1730 params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
1731 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
1732 if (rc != ECORE_SUCCESS ||
1733 ((params.nvm_common.resp != FW_MSG_CODE_NVM_OK) &&
1734 (params.nvm_common.resp !=
1735 FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK)))
1736 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
1738 buf_idx += buf_size;
1741 p_dev->mcp_nvm_resp = params.nvm_common.resp;
1742 ecore_ptt_release(p_hwfn, p_ptt);
1747 enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
1748 u32 addr, u8 *p_buf, u32 len)
1750 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1751 struct ecore_mcp_nvm_params params;
1752 struct ecore_ptt *p_ptt;
1753 enum _ecore_status_t rc;
1755 p_ptt = ecore_ptt_acquire(p_hwfn);
1759 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
1760 params.type = ECORE_MCP_NVM_WR;
1761 params.nvm_wr.buf_size = len;
1762 params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_WRITE) ?
1763 DRV_MSG_CODE_PHY_CORE_WRITE : DRV_MSG_CODE_PHY_RAW_WRITE;
1764 params.nvm_common.offset = addr;
1765 params.nvm_wr.buf = (u32 *)p_buf;
1766 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
1767 if (rc != ECORE_SUCCESS)
1768 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
1769 p_dev->mcp_nvm_resp = params.nvm_common.resp;
1770 ecore_ptt_release(p_hwfn, p_ptt);
1775 enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
1778 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1779 struct ecore_mcp_nvm_params params;
1780 struct ecore_ptt *p_ptt;
1781 enum _ecore_status_t rc;
1783 p_ptt = ecore_ptt_acquire(p_hwfn);
1787 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
1788 params.type = ECORE_MCP_CMD;
1789 params.nvm_common.cmd = DRV_MSG_CODE_SET_SECURE_MODE;
1790 params.nvm_common.offset = addr;
1791 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
1792 p_dev->mcp_nvm_resp = params.nvm_common.resp;
1793 ecore_ptt_release(p_hwfn, p_ptt);
1798 enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
1799 struct ecore_ptt *p_ptt,
1800 u32 port, u32 addr, u32 offset,
1803 struct ecore_mcp_nvm_params params;
1804 enum _ecore_status_t rc;
1805 u32 bytes_left, bytes_to_copy, buf_size;
1807 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
1808 SET_FIELD(params.nvm_common.offset,
1809 DRV_MB_PARAM_TRANSCEIVER_PORT, port);
1810 SET_FIELD(params.nvm_common.offset,
1811 DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS, addr);
1815 params.type = ECORE_MCP_NVM_RD;
1816 params.nvm_rd.buf_size = &buf_size;
1817 params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_READ;
1818 while (bytes_left > 0) {
1819 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
1820 MAX_I2C_TRANSACTION_SIZE);
1821 params.nvm_rd.buf = (u32 *)(p_buf + offset);
1822 SET_FIELD(params.nvm_common.offset,
1823 DRV_MB_PARAM_TRANSCEIVER_OFFSET, addr + offset);
1824 SET_FIELD(params.nvm_common.offset,
1825 DRV_MB_PARAM_TRANSCEIVER_SIZE, bytes_to_copy);
1826 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
1827 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
1828 FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
1830 } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
1831 FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
1832 return ECORE_UNKNOWN_ERROR;
1834 offset += *params.nvm_rd.buf_size;
1835 bytes_left -= *params.nvm_rd.buf_size;
1838 return ECORE_SUCCESS;
1841 enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
1842 struct ecore_ptt *p_ptt,
1843 u32 port, u32 addr, u32 offset,
1846 struct ecore_mcp_nvm_params params;
1847 enum _ecore_status_t rc;
1848 u32 buf_idx, buf_size;
1850 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
1851 SET_FIELD(params.nvm_common.offset,
1852 DRV_MB_PARAM_TRANSCEIVER_PORT, port);
1853 SET_FIELD(params.nvm_common.offset,
1854 DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS, addr);
1855 params.type = ECORE_MCP_NVM_WR;
1856 params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_WRITE;
1858 while (buf_idx < len) {
1859 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
1860 MAX_I2C_TRANSACTION_SIZE);
1861 SET_FIELD(params.nvm_common.offset,
1862 DRV_MB_PARAM_TRANSCEIVER_OFFSET, offset + buf_idx);
1863 SET_FIELD(params.nvm_common.offset,
1864 DRV_MB_PARAM_TRANSCEIVER_SIZE, buf_size);
1865 params.nvm_wr.buf_size = buf_size;
1866 params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
1867 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
1868 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
1869 FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
1871 } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
1872 FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
1873 return ECORE_UNKNOWN_ERROR;
1875 buf_idx += buf_size;
1878 return ECORE_SUCCESS;
1881 enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
1882 struct ecore_ptt *p_ptt,
1883 u16 gpio, u32 *gpio_val)
1885 enum _ecore_status_t rc = ECORE_SUCCESS;
1886 u32 drv_mb_param = 0, rsp;
1888 SET_FIELD(drv_mb_param, DRV_MB_PARAM_GPIO_NUMBER, gpio);
1890 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ,
1891 drv_mb_param, &rsp, gpio_val);
1893 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
1894 return ECORE_UNKNOWN_ERROR;
1896 return ECORE_SUCCESS;
1899 enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
1900 struct ecore_ptt *p_ptt,
1901 u16 gpio, u16 gpio_val)
1903 enum _ecore_status_t rc = ECORE_SUCCESS;
1904 u32 drv_mb_param = 0, param, rsp;
1906 SET_FIELD(drv_mb_param, DRV_MB_PARAM_GPIO_NUMBER, gpio);
1907 SET_FIELD(drv_mb_param, DRV_MB_PARAM_GPIO_VALUE, gpio_val);
1909 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE,
1910 drv_mb_param, &rsp, ¶m);
1912 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
1913 return ECORE_UNKNOWN_ERROR;
1915 return ECORE_SUCCESS;