2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
11 #include "ecore_status.h"
12 #include "ecore_mcp.h"
13 #include "mcp_public.h"
16 #include "ecore_init_fw_funcs.h"
17 #include "ecore_sriov.h"
19 #include "ecore_iov_api.h"
20 #include "ecore_gtt_reg_addr.h"
21 #include "ecore_iro.h"
22 #include "ecore_dcbx.h"
23 #include "ecore_sp_commands.h"
24 #include "ecore_cxt.h"
26 #define CHIP_MCP_RESP_ITER_US 10
27 #define EMUL_MCP_RESP_ITER_US (1000 * 1000)
29 #define ECORE_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
30 #define ECORE_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
32 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
33 ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
36 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
37 ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
39 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
40 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
41 OFFSETOF(struct public_drv_mb, _field), _val)
43 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
44 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
45 OFFSETOF(struct public_drv_mb, _field))
47 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
48 DRV_ID_PDA_COMP_VER_OFFSET)
50 #define MCP_BYTES_PER_MBIT_OFFSET 17
54 static int loaded_port[MAX_NUM_PORTS] = { 0 };
57 bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn)
59 if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
64 void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
66 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
68 u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr);
70 p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
72 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
73 "port_addr = 0x%x, port_id 0x%02x\n",
74 p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
77 void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
79 u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
84 if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev))
88 if (!p_hwfn->mcp_info->public_base)
91 for (i = 0; i < length; i++) {
92 tmp = ecore_rd(p_hwfn, p_ptt,
93 p_hwfn->mcp_info->mfw_mb_addr +
94 (i << 2) + sizeof(u32));
96 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
97 OSAL_BE32_TO_CPU(tmp);
101 struct ecore_mcp_cmd_elem {
102 osal_list_entry_t list;
103 struct ecore_mcp_mb_params *p_mb_params;
104 u16 expected_seq_num;
108 /* Must be called while cmd_lock is acquired */
109 static struct ecore_mcp_cmd_elem *
110 ecore_mcp_cmd_add_elem(struct ecore_hwfn *p_hwfn,
111 struct ecore_mcp_mb_params *p_mb_params,
112 u16 expected_seq_num)
114 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
116 p_cmd_elem = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
117 sizeof(*p_cmd_elem));
119 DP_NOTICE(p_hwfn, false,
120 "Failed to allocate `struct ecore_mcp_cmd_elem'\n");
124 p_cmd_elem->p_mb_params = p_mb_params;
125 p_cmd_elem->expected_seq_num = expected_seq_num;
126 OSAL_LIST_PUSH_HEAD(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
131 /* Must be called while cmd_lock is acquired */
132 static void ecore_mcp_cmd_del_elem(struct ecore_hwfn *p_hwfn,
133 struct ecore_mcp_cmd_elem *p_cmd_elem)
135 OSAL_LIST_REMOVE_ENTRY(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
136 OSAL_FREE(p_hwfn->p_dev, p_cmd_elem);
139 /* Must be called while cmd_lock is acquired */
140 static struct ecore_mcp_cmd_elem *
141 ecore_mcp_cmd_get_elem(struct ecore_hwfn *p_hwfn, u16 seq_num)
143 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
145 OSAL_LIST_FOR_EACH_ENTRY(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list,
146 struct ecore_mcp_cmd_elem) {
147 if (p_cmd_elem->expected_seq_num == seq_num)
154 enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
156 if (p_hwfn->mcp_info) {
157 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL, *p_tmp;
159 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
160 OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_cmd_elem, p_tmp,
161 &p_hwfn->mcp_info->cmd_list, list,
162 struct ecore_mcp_cmd_elem) {
163 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
165 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
167 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
168 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
169 #ifdef CONFIG_ECORE_LOCK_ALLOC
170 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->cmd_lock);
171 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->link_lock);
175 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
177 return ECORE_SUCCESS;
180 static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
181 struct ecore_ptt *p_ptt)
183 struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
184 u32 drv_mb_offsize, mfw_mb_offsize;
185 u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
188 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
189 DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n");
190 p_info->public_base = 0;
195 p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
196 if (!p_info->public_base)
199 p_info->public_base |= GRCBASE_MCP;
201 /* Calculate the driver and MFW mailbox address */
202 drv_mb_offsize = ecore_rd(p_hwfn, p_ptt,
203 SECTION_OFFSIZE_ADDR(p_info->public_base,
205 p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
206 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
207 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x"
208 " mcp_pf_id = 0x%x\n",
209 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
211 /* Set the MFW MB address */
212 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
213 SECTION_OFFSIZE_ADDR(p_info->public_base,
215 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
216 p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
217 p_info->mfw_mb_addr);
219 /* Get the current driver mailbox sequence before sending
222 p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
223 DRV_MSG_SEQ_NUMBER_MASK;
225 /* Get current FW pulse sequence */
226 p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
229 p_info->mcp_hist = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
231 return ECORE_SUCCESS;
234 enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
235 struct ecore_ptt *p_ptt)
237 struct ecore_mcp_info *p_info;
240 /* Allocate mcp_info structure */
241 p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
242 sizeof(*p_hwfn->mcp_info));
243 if (!p_hwfn->mcp_info)
245 p_info = p_hwfn->mcp_info;
247 if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
248 DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
249 /* Do not free mcp_info here, since public_base indicate that
250 * the MCP is not initialized
252 return ECORE_SUCCESS;
255 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
256 p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
257 p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
258 if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
261 /* Initialize the MFW spinlocks */
262 #ifdef CONFIG_ECORE_LOCK_ALLOC
263 OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock);
264 OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock);
266 OSAL_SPIN_LOCK_INIT(&p_info->cmd_lock);
267 OSAL_SPIN_LOCK_INIT(&p_info->link_lock);
269 OSAL_LIST_INIT(&p_info->cmd_list);
271 return ECORE_SUCCESS;
274 DP_NOTICE(p_hwfn, true, "Failed to allocate mcp memory\n");
275 ecore_mcp_free(p_hwfn);
279 static void ecore_mcp_reread_offsets(struct ecore_hwfn *p_hwfn,
280 struct ecore_ptt *p_ptt)
282 u32 generic_por_0 = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
284 /* Use MCP history register to check if MCP reset occurred between init
287 if (p_hwfn->mcp_info->mcp_hist != generic_por_0) {
288 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
289 "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
290 p_hwfn->mcp_info->mcp_hist, generic_por_0);
292 ecore_load_mcp_offsets(p_hwfn, p_ptt);
293 ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
297 enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
298 struct ecore_ptt *p_ptt)
300 u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0;
301 enum _ecore_status_t rc = ECORE_SUCCESS;
304 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
305 delay = EMUL_MCP_RESP_ITER_US;
308 if (p_hwfn->mcp_info->b_block_cmd) {
309 DP_NOTICE(p_hwfn, false,
310 "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
311 return ECORE_ABORTED;
314 /* Ensure that only a single thread is accessing the mailbox */
315 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
317 org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
319 /* Set drv command along with the updated sequence */
320 ecore_mcp_reread_offsets(p_hwfn, p_ptt);
321 seq = ++p_hwfn->mcp_info->drv_mb_seq;
322 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
325 /* Wait for MFW response */
327 /* Give the FW up to 500 second (50*1000*10usec) */
328 } while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt,
329 MISCS_REG_GENERIC_POR_0)) &&
330 (cnt++ < ECORE_MCP_RESET_RETRIES));
332 if (org_mcp_reset_seq !=
333 ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
334 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
335 "MCP was reset after %d usec\n", cnt * delay);
337 DP_ERR(p_hwfn, "Failed to reset MCP\n");
341 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
346 /* Must be called while cmd_lock is acquired */
347 static bool ecore_mcp_has_pending_cmd(struct ecore_hwfn *p_hwfn)
349 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
351 /* There is at most one pending command at a certain time, and if it
352 * exists - it is placed at the HEAD of the list.
354 if (!OSAL_LIST_IS_EMPTY(&p_hwfn->mcp_info->cmd_list)) {
355 p_cmd_elem = OSAL_LIST_FIRST_ENTRY(&p_hwfn->mcp_info->cmd_list,
356 struct ecore_mcp_cmd_elem,
358 return !p_cmd_elem->b_is_completed;
364 /* Must be called while cmd_lock is acquired */
365 static enum _ecore_status_t
366 ecore_mcp_update_pending_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
368 struct ecore_mcp_mb_params *p_mb_params;
369 struct ecore_mcp_cmd_elem *p_cmd_elem;
373 mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
374 seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK);
376 /* Return if no new non-handled response has been received */
377 if (seq_num != p_hwfn->mcp_info->drv_mb_seq)
380 p_cmd_elem = ecore_mcp_cmd_get_elem(p_hwfn, seq_num);
383 "Failed to find a pending mailbox cmd that expects sequence number %d\n",
385 return ECORE_UNKNOWN_ERROR;
388 p_mb_params = p_cmd_elem->p_mb_params;
390 /* Get the MFW response along with the sequence number */
391 p_mb_params->mcp_resp = mcp_resp;
393 /* Get the MFW param */
394 p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
396 /* Get the union data */
397 if (p_mb_params->p_data_dst != OSAL_NULL &&
398 p_mb_params->data_dst_size) {
399 u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
400 OFFSETOF(struct public_drv_mb,
402 ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
403 union_data_addr, p_mb_params->data_dst_size);
406 p_cmd_elem->b_is_completed = true;
408 return ECORE_SUCCESS;
411 /* Must be called while cmd_lock is acquired */
412 static void __ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
413 struct ecore_ptt *p_ptt,
414 struct ecore_mcp_mb_params *p_mb_params,
417 union drv_union_data union_data;
420 /* Set the union data */
421 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
422 OFFSETOF(struct public_drv_mb, union_data);
423 OSAL_MEM_ZERO(&union_data, sizeof(union_data));
424 if (p_mb_params->p_data_src != OSAL_NULL && p_mb_params->data_src_size)
425 OSAL_MEMCPY(&union_data, p_mb_params->p_data_src,
426 p_mb_params->data_src_size);
427 ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
430 /* Set the drv param */
431 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param);
433 /* Set the drv command along with the sequence number */
434 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
436 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
437 "MFW mailbox: command 0x%08x param 0x%08x\n",
438 (p_mb_params->cmd | seq_num), p_mb_params->param);
441 static void ecore_mcp_cmd_set_blocking(struct ecore_hwfn *p_hwfn,
444 p_hwfn->mcp_info->b_block_cmd = block_cmd;
446 DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n",
447 block_cmd ? "Block" : "Unblock");
450 static enum _ecore_status_t
451 _ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
452 struct ecore_mcp_mb_params *p_mb_params,
453 u32 max_retries, u32 delay)
455 struct ecore_mcp_cmd_elem *p_cmd_elem;
458 enum _ecore_status_t rc = ECORE_SUCCESS;
460 /* Wait until the mailbox is non-occupied */
462 /* Exit the loop if there is no pending command, or if the
463 * pending command is completed during this iteration.
464 * The spinlock stays locked until the command is sent.
467 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
469 if (!ecore_mcp_has_pending_cmd(p_hwfn))
472 rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt);
473 if (rc == ECORE_SUCCESS)
475 else if (rc != ECORE_AGAIN)
478 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
480 OSAL_MFW_CMD_PREEMPT(p_hwfn);
481 } while (++cnt < max_retries);
483 if (cnt >= max_retries) {
484 DP_NOTICE(p_hwfn, false,
485 "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
486 p_mb_params->cmd, p_mb_params->param);
490 /* Send the mailbox command */
491 ecore_mcp_reread_offsets(p_hwfn, p_ptt);
492 seq_num = ++p_hwfn->mcp_info->drv_mb_seq;
493 p_cmd_elem = ecore_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num);
499 __ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num);
500 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
502 /* Wait for the MFW response */
504 /* Exit the loop if the command is already completed, or if the
505 * command is completed during this iteration.
506 * The spinlock stays locked until the list element is removed.
510 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
512 if (p_cmd_elem->b_is_completed)
515 rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt);
516 if (rc == ECORE_SUCCESS)
518 else if (rc != ECORE_AGAIN)
521 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
522 OSAL_MFW_CMD_PREEMPT(p_hwfn);
523 } while (++cnt < max_retries);
525 if (cnt >= max_retries) {
526 DP_NOTICE(p_hwfn, false,
527 "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
528 p_mb_params->cmd, p_mb_params->param);
530 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
531 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
532 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
534 ecore_mcp_cmd_set_blocking(p_hwfn, true);
535 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
539 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
540 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
542 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
543 "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
544 p_mb_params->mcp_resp, p_mb_params->mcp_param,
545 (cnt * delay) / 1000, (cnt * delay) % 1000);
547 /* Clear the sequence number from the MFW response */
548 p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
550 return ECORE_SUCCESS;
553 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
557 static enum _ecore_status_t
558 ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
559 struct ecore_ptt *p_ptt,
560 struct ecore_mcp_mb_params *p_mb_params)
562 osal_size_t union_data_size = sizeof(union drv_union_data);
563 u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
564 u32 delay = CHIP_MCP_RESP_ITER_US;
567 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
568 delay = EMUL_MCP_RESP_ITER_US;
569 /* There is a built-in delay of 100usec in each MFW response read */
570 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
574 /* MCP not initialized */
575 if (!ecore_mcp_is_init(p_hwfn)) {
576 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
580 if (p_mb_params->data_src_size > union_data_size ||
581 p_mb_params->data_dst_size > union_data_size) {
583 "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
584 p_mb_params->data_src_size, p_mb_params->data_dst_size,
589 if (p_hwfn->mcp_info->b_block_cmd) {
590 DP_NOTICE(p_hwfn, false,
591 "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
592 p_mb_params->cmd, p_mb_params->param);
593 return ECORE_ABORTED;
596 return _ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
600 enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
601 struct ecore_ptt *p_ptt, u32 cmd, u32 param,
602 u32 *o_mcp_resp, u32 *o_mcp_param)
604 struct ecore_mcp_mb_params mb_params;
605 enum _ecore_status_t rc;
608 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
609 if (cmd == DRV_MSG_CODE_UNLOAD_REQ) {
611 loaded_port[p_hwfn->port_id]--;
612 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n",
615 return ECORE_SUCCESS;
619 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
621 mb_params.param = param;
622 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
623 if (rc != ECORE_SUCCESS)
626 *o_mcp_resp = mb_params.mcp_resp;
627 *o_mcp_param = mb_params.mcp_param;
629 return ECORE_SUCCESS;
632 enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
633 struct ecore_ptt *p_ptt,
638 u32 i_txn_size, u32 *i_buf)
640 struct ecore_mcp_mb_params mb_params;
641 enum _ecore_status_t rc;
643 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
645 mb_params.param = param;
646 mb_params.p_data_src = i_buf;
647 mb_params.data_src_size = (u8)i_txn_size;
648 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
649 if (rc != ECORE_SUCCESS)
652 *o_mcp_resp = mb_params.mcp_resp;
653 *o_mcp_param = mb_params.mcp_param;
655 return ECORE_SUCCESS;
658 enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
659 struct ecore_ptt *p_ptt,
664 u32 *o_txn_size, u32 *o_buf)
666 struct ecore_mcp_mb_params mb_params;
667 u8 raw_data[MCP_DRV_NVM_BUF_LEN];
668 enum _ecore_status_t rc;
670 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
672 mb_params.param = param;
673 mb_params.p_data_dst = raw_data;
675 /* Use the maximal value since the actual one is part of the response */
676 mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
678 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
679 if (rc != ECORE_SUCCESS)
682 *o_mcp_resp = mb_params.mcp_resp;
683 *o_mcp_param = mb_params.mcp_param;
685 *o_txn_size = *o_mcp_param;
687 OSAL_MEMCPY(o_buf, raw_data, RTE_MIN(*o_txn_size, MCP_DRV_NVM_BUF_LEN));
689 return ECORE_SUCCESS;
693 static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
696 static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
699 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
700 else if (!loaded_port[p_hwfn->port_id])
701 load_phase = FW_MSG_CODE_DRV_LOAD_PORT;
703 load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
705 /* On CMT, always tell that it's engine */
706 if (ECORE_IS_CMT(p_hwfn->p_dev))
707 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
709 *p_load_code = load_phase;
711 loaded_port[p_hwfn->port_id]++;
713 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
714 "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n",
715 *p_load_code, loaded, p_hwfn->port_id,
716 loaded_port[p_hwfn->port_id]);
721 ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role,
722 enum ecore_override_force_load override_force_load)
724 bool can_force_load = false;
726 switch (override_force_load) {
727 case ECORE_OVERRIDE_FORCE_LOAD_ALWAYS:
728 can_force_load = true;
730 case ECORE_OVERRIDE_FORCE_LOAD_NEVER:
731 can_force_load = false;
734 can_force_load = (drv_role == DRV_ROLE_OS &&
735 exist_drv_role == DRV_ROLE_PREBOOT) ||
736 (drv_role == DRV_ROLE_KDUMP &&
737 exist_drv_role == DRV_ROLE_OS);
741 return can_force_load;
744 static enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn,
745 struct ecore_ptt *p_ptt)
747 u32 resp = 0, param = 0;
748 enum _ecore_status_t rc;
750 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
752 if (rc != ECORE_SUCCESS)
753 DP_NOTICE(p_hwfn, false,
754 "Failed to send cancel load request, rc = %d\n", rc);
759 #define CONFIG_ECORE_L2_BITMAP_IDX (0x1 << 0)
760 #define CONFIG_ECORE_SRIOV_BITMAP_IDX (0x1 << 1)
761 #define CONFIG_ECORE_ROCE_BITMAP_IDX (0x1 << 2)
762 #define CONFIG_ECORE_IWARP_BITMAP_IDX (0x1 << 3)
763 #define CONFIG_ECORE_FCOE_BITMAP_IDX (0x1 << 4)
764 #define CONFIG_ECORE_ISCSI_BITMAP_IDX (0x1 << 5)
765 #define CONFIG_ECORE_LL2_BITMAP_IDX (0x1 << 6)
767 static u32 ecore_get_config_bitmap(void)
769 u32 config_bitmap = 0x0;
771 #ifdef CONFIG_ECORE_L2
772 config_bitmap |= CONFIG_ECORE_L2_BITMAP_IDX;
774 #ifdef CONFIG_ECORE_SRIOV
775 config_bitmap |= CONFIG_ECORE_SRIOV_BITMAP_IDX;
777 #ifdef CONFIG_ECORE_ROCE
778 config_bitmap |= CONFIG_ECORE_ROCE_BITMAP_IDX;
780 #ifdef CONFIG_ECORE_IWARP
781 config_bitmap |= CONFIG_ECORE_IWARP_BITMAP_IDX;
783 #ifdef CONFIG_ECORE_FCOE
784 config_bitmap |= CONFIG_ECORE_FCOE_BITMAP_IDX;
786 #ifdef CONFIG_ECORE_ISCSI
787 config_bitmap |= CONFIG_ECORE_ISCSI_BITMAP_IDX;
789 #ifdef CONFIG_ECORE_LL2
790 config_bitmap |= CONFIG_ECORE_LL2_BITMAP_IDX;
793 return config_bitmap;
796 struct ecore_load_req_in_params {
798 #define ECORE_LOAD_REQ_HSI_VER_DEFAULT 0
799 #define ECORE_LOAD_REQ_HSI_VER_1 1
806 bool avoid_eng_reset;
809 struct ecore_load_req_out_params {
819 static enum _ecore_status_t
820 __ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
821 struct ecore_load_req_in_params *p_in_params,
822 struct ecore_load_req_out_params *p_out_params)
824 struct ecore_mcp_mb_params mb_params;
825 struct load_req_stc load_req;
826 struct load_rsp_stc load_rsp;
828 enum _ecore_status_t rc;
830 OSAL_MEM_ZERO(&load_req, sizeof(load_req));
831 load_req.drv_ver_0 = p_in_params->drv_ver_0;
832 load_req.drv_ver_1 = p_in_params->drv_ver_1;
833 load_req.fw_ver = p_in_params->fw_ver;
834 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role);
835 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
836 p_in_params->timeout_val);
837 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE, p_in_params->force_cmd);
838 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
839 p_in_params->avoid_eng_reset);
841 hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ?
842 DRV_ID_MCP_HSI_VER_CURRENT :
843 (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_OFFSET);
845 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
846 mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
847 mb_params.param = PDA_COMP | hsi_ver | p_hwfn->p_dev->drv_type;
848 mb_params.p_data_src = &load_req;
849 mb_params.data_src_size = sizeof(load_req);
850 mb_params.p_data_dst = &load_rsp;
851 mb_params.data_dst_size = sizeof(load_rsp);
853 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
854 "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
856 GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
857 GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
858 GET_MFW_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
859 GET_MFW_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
861 if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1)
862 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
863 "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
864 load_req.drv_ver_0, load_req.drv_ver_1,
865 load_req.fw_ver, load_req.misc0,
866 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE),
867 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO),
868 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE),
869 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0));
871 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
872 if (rc != ECORE_SUCCESS) {
873 DP_NOTICE(p_hwfn, false,
874 "Failed to send load request, rc = %d\n", rc);
878 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
879 "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
880 p_out_params->load_code = mb_params.mcp_resp;
882 if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
883 p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
884 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
885 "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
886 load_rsp.drv_ver_0, load_rsp.drv_ver_1,
887 load_rsp.fw_ver, load_rsp.misc0,
888 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
889 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
890 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0));
892 p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
893 p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
894 p_out_params->exist_fw_ver = load_rsp.fw_ver;
895 p_out_params->exist_drv_role =
896 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
897 p_out_params->mfw_hsi_ver =
898 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
899 p_out_params->drv_exists =
900 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
901 LOAD_RSP_FLAGS0_DRV_EXISTS;
904 return ECORE_SUCCESS;
907 static void ecore_get_mfw_drv_role(enum ecore_drv_role drv_role,
911 case ECORE_DRV_ROLE_OS:
912 *p_mfw_drv_role = DRV_ROLE_OS;
914 case ECORE_DRV_ROLE_KDUMP:
915 *p_mfw_drv_role = DRV_ROLE_KDUMP;
920 enum ecore_load_req_force {
921 ECORE_LOAD_REQ_FORCE_NONE,
922 ECORE_LOAD_REQ_FORCE_PF,
923 ECORE_LOAD_REQ_FORCE_ALL,
926 static void ecore_get_mfw_force_cmd(enum ecore_load_req_force force_cmd,
930 case ECORE_LOAD_REQ_FORCE_NONE:
931 *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
933 case ECORE_LOAD_REQ_FORCE_PF:
934 *p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
936 case ECORE_LOAD_REQ_FORCE_ALL:
937 *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
942 enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
943 struct ecore_ptt *p_ptt,
944 struct ecore_load_req_params *p_params)
946 struct ecore_load_req_out_params out_params;
947 struct ecore_load_req_in_params in_params;
948 u8 mfw_drv_role = 0, mfw_force_cmd;
949 enum _ecore_status_t rc;
952 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
953 ecore_mcp_mf_workaround(p_hwfn, &p_params->load_code);
954 return ECORE_SUCCESS;
958 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
959 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT;
960 in_params.drv_ver_0 = ECORE_VERSION;
961 in_params.drv_ver_1 = ecore_get_config_bitmap();
962 in_params.fw_ver = STORM_FW_VERSION;
963 ecore_get_mfw_drv_role(p_params->drv_role, &mfw_drv_role);
964 in_params.drv_role = mfw_drv_role;
965 in_params.timeout_val = p_params->timeout_val;
966 ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_NONE, &mfw_force_cmd);
967 in_params.force_cmd = mfw_force_cmd;
968 in_params.avoid_eng_reset = p_params->avoid_eng_reset;
970 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
971 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
972 if (rc != ECORE_SUCCESS)
975 /* First handle cases where another load request should/might be sent:
976 * - MFW expects the old interface [HSI version = 1]
977 * - MFW responds that a force load request is required
979 if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
981 "MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n");
983 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1;
984 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
985 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
987 if (rc != ECORE_SUCCESS)
989 } else if (out_params.load_code ==
990 FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
991 if (ecore_mcp_can_force_load(in_params.drv_role,
992 out_params.exist_drv_role,
993 p_params->override_force_load)) {
995 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, 0x%08x_%08x}, existing={%d, 0x%08x, 0x%08x_%08x}]\n",
996 in_params.drv_role, in_params.fw_ver,
997 in_params.drv_ver_0, in_params.drv_ver_1,
998 out_params.exist_drv_role,
999 out_params.exist_fw_ver,
1000 out_params.exist_drv_ver_0,
1001 out_params.exist_drv_ver_1);
1003 ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_ALL,
1006 in_params.force_cmd = mfw_force_cmd;
1007 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
1008 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
1010 if (rc != ECORE_SUCCESS)
1013 DP_NOTICE(p_hwfn, false,
1014 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
1015 in_params.drv_role, in_params.fw_ver,
1016 in_params.drv_ver_0, in_params.drv_ver_1,
1017 out_params.exist_drv_role,
1018 out_params.exist_fw_ver,
1019 out_params.exist_drv_ver_0,
1020 out_params.exist_drv_ver_1);
1022 ecore_mcp_cancel_load_req(p_hwfn, p_ptt);
1027 /* Now handle the other types of responses.
1028 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
1029 * expected here after the additional revised load requests were sent.
1031 switch (out_params.load_code) {
1032 case FW_MSG_CODE_DRV_LOAD_ENGINE:
1033 case FW_MSG_CODE_DRV_LOAD_PORT:
1034 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
1035 if (out_params.mfw_hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
1036 out_params.drv_exists) {
1037 /* The role and fw/driver version match, but the PF is
1038 * already loaded and has not been unloaded gracefully.
1039 * This is unexpected since a quasi-FLR request was
1040 * previously sent as part of ecore_hw_prepare().
1042 DP_NOTICE(p_hwfn, false,
1043 "PF is already loaded - shouldn't have got here since a quasi-FLR request was previously sent!\n");
1048 DP_NOTICE(p_hwfn, false,
1049 "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
1050 out_params.load_code);
1054 p_params->load_code = out_params.load_code;
1056 return ECORE_SUCCESS;
1059 enum _ecore_status_t ecore_mcp_load_done(struct ecore_hwfn *p_hwfn,
1060 struct ecore_ptt *p_ptt)
1062 u32 resp = 0, param = 0;
1063 enum _ecore_status_t rc;
1065 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp,
1067 if (rc != ECORE_SUCCESS) {
1068 DP_NOTICE(p_hwfn, false,
1069 "Failed to send a LOAD_DONE command, rc = %d\n", rc);
1073 /* Check if there is a DID mismatch between nvm-cfg/efuse */
1074 if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
1075 DP_NOTICE(p_hwfn, false,
1076 "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
1078 return ECORE_SUCCESS;
1081 enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn,
1082 struct ecore_ptt *p_ptt)
1084 u32 wol_param, mcp_resp, mcp_param;
1087 wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
1089 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param,
1090 &mcp_resp, &mcp_param);
1093 enum _ecore_status_t ecore_mcp_unload_done(struct ecore_hwfn *p_hwfn,
1094 struct ecore_ptt *p_ptt)
1096 struct ecore_mcp_mb_params mb_params;
1097 struct mcp_mac wol_mac;
1099 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1100 mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
1102 return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1105 static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
1106 struct ecore_ptt *p_ptt)
1108 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1110 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1111 u32 path_addr = SECTION_ADDR(mfw_path_offsize,
1112 ECORE_PATH_ID(p_hwfn));
1113 u32 disabled_vfs[VF_MAX_STATIC / 32];
1116 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1117 "Reading Disabled VF information from [offset %08x],"
1118 " path_addr %08x\n",
1119 mfw_path_offsize, path_addr);
1121 for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
1122 disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,
1124 OFFSETOF(struct public_path,
1127 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
1128 "FLR-ed VFs [%08x,...,%08x] - %08x\n",
1129 i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
1132 if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs))
1133 OSAL_VF_FLR_UPDATE(p_hwfn);
1136 enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
1137 struct ecore_ptt *p_ptt,
1140 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1142 u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1143 u32 func_addr = SECTION_ADDR(mfw_func_offsize,
1145 struct ecore_mcp_mb_params mb_params;
1146 enum _ecore_status_t rc;
1149 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1150 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
1151 "Acking VFs [%08x,...,%08x] - %08x\n",
1152 i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
1154 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1155 mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
1156 mb_params.p_data_src = vfs_to_ack;
1157 mb_params.data_src_size = VF_MAX_STATIC / 8;
1158 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt,
1160 if (rc != ECORE_SUCCESS) {
1161 DP_NOTICE(p_hwfn, false,
1162 "Failed to pass ACK for VF flr to MFW\n");
1163 return ECORE_TIMEOUT;
1166 /* TMP - clear the ACK bits; should be done by MFW */
1167 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1168 ecore_wr(p_hwfn, p_ptt,
1170 OFFSETOF(struct public_func, drv_ack_vf_disabled) +
1171 i * sizeof(u32), 0);
1176 static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
1177 struct ecore_ptt *p_ptt)
1179 u32 transceiver_state;
1181 transceiver_state = ecore_rd(p_hwfn, p_ptt,
1182 p_hwfn->mcp_info->port_addr +
1183 OFFSETOF(struct public_port,
1186 DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP),
1187 "Received transceiver state update [0x%08x] from mfw"
1189 transceiver_state, (u32)(p_hwfn->mcp_info->port_addr +
1190 OFFSETOF(struct public_port,
1191 transceiver_data)));
1193 transceiver_state = GET_MFW_FIELD(transceiver_state,
1194 ETH_TRANSCEIVER_STATE);
1196 if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
1197 DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
1199 DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
1201 OSAL_TRANSCEIVER_UPDATE(p_hwfn);
1204 static void ecore_mcp_read_eee_config(struct ecore_hwfn *p_hwfn,
1205 struct ecore_ptt *p_ptt,
1206 struct ecore_mcp_link_state *p_link)
1208 u32 eee_status, val;
1210 p_link->eee_adv_caps = 0;
1211 p_link->eee_lp_adv_caps = 0;
1212 eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1213 OFFSETOF(struct public_port, eee_status));
1214 p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT);
1215 val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET;
1216 if (val & EEE_1G_ADV)
1217 p_link->eee_adv_caps |= ECORE_EEE_1G_ADV;
1218 if (val & EEE_10G_ADV)
1219 p_link->eee_adv_caps |= ECORE_EEE_10G_ADV;
1220 val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET;
1221 if (val & EEE_1G_ADV)
1222 p_link->eee_lp_adv_caps |= ECORE_EEE_1G_ADV;
1223 if (val & EEE_10G_ADV)
1224 p_link->eee_lp_adv_caps |= ECORE_EEE_10G_ADV;
1227 static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
1228 struct ecore_ptt *p_ptt,
1229 struct public_func *p_data,
1232 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1234 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1235 u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1238 OSAL_MEM_ZERO(p_data, sizeof(*p_data));
1240 size = OSAL_MIN_T(u32, sizeof(*p_data),
1241 SECTION_SIZE(mfw_path_offsize));
1242 for (i = 0; i < size / sizeof(u32); i++)
1243 ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
1244 func_addr + (i << 2));
1249 static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
1250 struct ecore_ptt *p_ptt,
1253 struct ecore_mcp_link_state *p_link;
1257 /* Prevent SW/attentions from doing this at the same time */
1258 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->link_lock);
1260 p_link = &p_hwfn->mcp_info->link_output;
1261 OSAL_MEMSET(p_link, 0, sizeof(*p_link));
1263 status = ecore_rd(p_hwfn, p_ptt,
1264 p_hwfn->mcp_info->port_addr +
1265 OFFSETOF(struct public_port, link_status));
1266 DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP),
1267 "Received link update [0x%08x] from mfw"
1269 status, (u32)(p_hwfn->mcp_info->port_addr +
1270 OFFSETOF(struct public_port,
1273 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1274 "Resetting link indications\n");
1278 if (p_hwfn->b_drv_link_init) {
1279 /* Link indication with modern MFW arrives as per-PF
1282 if (p_hwfn->mcp_info->capabilities &
1283 FW_MB_PARAM_FEATURE_SUPPORT_VLINK) {
1284 struct public_func shmem_info;
1286 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1288 p_link->link_up = !!(shmem_info.status &
1289 FUNC_STATUS_VIRTUAL_LINK_UP);
1291 p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1294 p_link->link_up = false;
1297 p_link->full_duplex = true;
1298 switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
1299 case LINK_STATUS_SPEED_AND_DUPLEX_100G:
1300 p_link->speed = 100000;
1302 case LINK_STATUS_SPEED_AND_DUPLEX_50G:
1303 p_link->speed = 50000;
1305 case LINK_STATUS_SPEED_AND_DUPLEX_40G:
1306 p_link->speed = 40000;
1308 case LINK_STATUS_SPEED_AND_DUPLEX_25G:
1309 p_link->speed = 25000;
1311 case LINK_STATUS_SPEED_AND_DUPLEX_20G:
1312 p_link->speed = 20000;
1314 case LINK_STATUS_SPEED_AND_DUPLEX_10G:
1315 p_link->speed = 10000;
1317 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
1318 p_link->full_duplex = false;
1320 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
1321 p_link->speed = 1000;
1327 /* We never store total line speed as p_link->speed is
1328 * again changes according to bandwidth allocation.
1330 if (p_link->link_up && p_link->speed)
1331 p_link->line_speed = p_link->speed;
1333 p_link->line_speed = 0;
1335 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
1336 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
1338 /* Max bandwidth configuration */
1339 __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
1342 /* Mintz bandwidth configuration */
1343 __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
1345 ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev, p_ptt,
1346 p_link->min_pf_rate);
1348 p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
1349 p_link->an_complete = !!(status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
1350 p_link->parallel_detection = !!(status &
1351 LINK_STATUS_PARALLEL_DETECTION_USED);
1352 p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
1354 p_link->partner_adv_speed |=
1355 (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
1356 ECORE_LINK_PARTNER_SPEED_1G_FD : 0;
1357 p_link->partner_adv_speed |=
1358 (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
1359 ECORE_LINK_PARTNER_SPEED_1G_HD : 0;
1360 p_link->partner_adv_speed |=
1361 (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
1362 ECORE_LINK_PARTNER_SPEED_10G : 0;
1363 p_link->partner_adv_speed |=
1364 (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
1365 ECORE_LINK_PARTNER_SPEED_20G : 0;
1366 p_link->partner_adv_speed |=
1367 (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
1368 ECORE_LINK_PARTNER_SPEED_25G : 0;
1369 p_link->partner_adv_speed |=
1370 (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
1371 ECORE_LINK_PARTNER_SPEED_40G : 0;
1372 p_link->partner_adv_speed |=
1373 (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
1374 ECORE_LINK_PARTNER_SPEED_50G : 0;
1375 p_link->partner_adv_speed |=
1376 (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
1377 ECORE_LINK_PARTNER_SPEED_100G : 0;
1379 p_link->partner_tx_flow_ctrl_en =
1380 !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
1381 p_link->partner_rx_flow_ctrl_en =
1382 !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
1384 switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
1385 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
1386 p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE;
1388 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
1389 p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE;
1391 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
1392 p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE;
1395 p_link->partner_adv_pause = 0;
1398 p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
1400 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
1401 ecore_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
1403 OSAL_LINK_UPDATE(p_hwfn, p_ptt);
1405 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->link_lock);
1408 enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
1409 struct ecore_ptt *p_ptt, bool b_up)
1411 struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
1412 struct ecore_mcp_mb_params mb_params;
1413 struct eth_phy_cfg phy_cfg;
1414 enum _ecore_status_t rc = ECORE_SUCCESS;
1418 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
1419 return ECORE_SUCCESS;
1422 /* Set the shmem configuration according to params */
1423 OSAL_MEM_ZERO(&phy_cfg, sizeof(phy_cfg));
1424 cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
1425 if (!params->speed.autoneg)
1426 phy_cfg.speed = params->speed.forced_speed;
1427 phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
1428 phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
1429 phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1430 phy_cfg.adv_speed = params->speed.advertised_speeds;
1431 phy_cfg.loopback_mode = params->loopback_mode;
1433 /* There are MFWs that share this capability regardless of whether
1434 * this is feasible or not. And given that at the very least adv_caps
1435 * would be set internally by ecore, we want to make sure LFA would
1438 if ((p_hwfn->mcp_info->capabilities &
1439 FW_MB_PARAM_FEATURE_SUPPORT_EEE) &&
1440 params->eee.enable) {
1441 phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
1442 if (params->eee.tx_lpi_enable)
1443 phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
1444 if (params->eee.adv_caps & ECORE_EEE_1G_ADV)
1445 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G;
1446 if (params->eee.adv_caps & ECORE_EEE_10G_ADV)
1447 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G;
1448 phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer <<
1449 EEE_TX_TIMER_USEC_OFFSET) &
1450 EEE_TX_TIMER_USEC_MASK;
1453 p_hwfn->b_drv_link_init = b_up;
1456 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1457 "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x\n",
1458 phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed,
1459 phy_cfg.loopback_mode);
1461 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
1463 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1464 mb_params.cmd = cmd;
1465 mb_params.p_data_src = &phy_cfg;
1466 mb_params.data_src_size = sizeof(phy_cfg);
1467 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1469 /* if mcp fails to respond we must abort */
1470 if (rc != ECORE_SUCCESS) {
1471 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1475 /* Mimic link-change attention, done for several reasons:
1476 * - On reset, there's no guarantee MFW would trigger
1478 * - On initialization, older MFWs might not indicate link change
1479 * during LFA, so we'll never get an UP indication.
1481 ecore_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
1483 return ECORE_SUCCESS;
1486 u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
1487 struct ecore_ptt *p_ptt)
1489 u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
1491 /* TODO - Add support for VFs */
1492 if (IS_VF(p_hwfn->p_dev))
1495 path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1497 path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr);
1498 path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn));
1500 proc_kill_cnt = ecore_rd(p_hwfn, p_ptt,
1502 OFFSETOF(struct public_path, process_kill)) &
1503 PROCESS_KILL_COUNTER_MASK;
1505 return proc_kill_cnt;
1508 static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn,
1509 struct ecore_ptt *p_ptt)
1511 struct ecore_dev *p_dev = p_hwfn->p_dev;
1514 /* Prevent possible attentions/interrupts during the recovery handling
1515 * and till its load phase, during which they will be re-enabled.
1517 ecore_int_igu_disable_int(p_hwfn, p_ptt);
1519 DP_NOTICE(p_hwfn, false, "Received a process kill indication\n");
1521 /* The following operations should be done once, and thus in CMT mode
1522 * are carried out by only the first HW function.
1524 if (p_hwfn != ECORE_LEADING_HWFN(p_dev))
1527 if (p_dev->recov_in_prog) {
1528 DP_NOTICE(p_hwfn, false,
1529 "Ignoring the indication since a recovery"
1530 " process is already in progress\n");
1534 p_dev->recov_in_prog = true;
1536 proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt);
1537 DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt);
1539 OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn);
1542 static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
1543 struct ecore_ptt *p_ptt,
1544 enum MFW_DRV_MSG_TYPE type)
1546 enum ecore_mcp_protocol_type stats_type;
1547 union ecore_mcp_protocol_stats stats;
1548 struct ecore_mcp_mb_params mb_params;
1550 enum _ecore_status_t rc;
1553 case MFW_DRV_MSG_GET_LAN_STATS:
1554 stats_type = ECORE_MCP_LAN_STATS;
1555 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1558 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1559 "Invalid protocol type %d\n", type);
1563 OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats);
1565 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1566 mb_params.cmd = DRV_MSG_CODE_GET_STATS;
1567 mb_params.param = hsi_param;
1568 mb_params.p_data_src = &stats;
1569 mb_params.data_src_size = sizeof(stats);
1570 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1571 if (rc != ECORE_SUCCESS)
1572 DP_ERR(p_hwfn, "Failed to send protocol stats, rc = %d\n", rc);
1575 static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
1576 struct public_func *p_shmem_info)
1578 struct ecore_mcp_function_info *p_info;
1580 p_info = &p_hwfn->mcp_info->func_info;
1582 /* TODO - bandwidth min/max should have valid values of 1-100,
1583 * as well as some indication that the feature is disabled.
1584 * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS
1585 * limit and correct value to min `1' and max `100' if limit isn't in
1588 p_info->bandwidth_min = (p_shmem_info->config &
1589 FUNC_MF_CFG_MIN_BW_MASK) >>
1590 FUNC_MF_CFG_MIN_BW_OFFSET;
1591 if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1593 "bandwidth minimum out of bounds [%02x]. Set to 1\n",
1594 p_info->bandwidth_min);
1595 p_info->bandwidth_min = 1;
1598 p_info->bandwidth_max = (p_shmem_info->config &
1599 FUNC_MF_CFG_MAX_BW_MASK) >>
1600 FUNC_MF_CFG_MAX_BW_OFFSET;
1601 if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1603 "bandwidth maximum out of bounds [%02x]. Set to 100\n",
1604 p_info->bandwidth_max);
1605 p_info->bandwidth_max = 100;
1610 ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1612 struct ecore_mcp_function_info *p_info;
1613 struct public_func shmem_info;
1614 u32 resp = 0, param = 0;
1616 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1618 ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1620 p_info = &p_hwfn->mcp_info->func_info;
1622 ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min);
1624 ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max);
1626 /* Acknowledge the MFW */
1627 ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1631 static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn)
1633 /* A single notification should be sent to upper driver in CMT mode */
1634 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1637 DP_NOTICE(p_hwfn, false,
1638 "Fan failure was detected on the network interface card"
1639 " and it's going to be shut down.\n");
1641 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
1644 struct ecore_mdump_cmd_params {
1653 static enum _ecore_status_t
1654 ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1655 struct ecore_mdump_cmd_params *p_mdump_cmd_params)
1657 struct ecore_mcp_mb_params mb_params;
1658 enum _ecore_status_t rc;
1660 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1661 mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
1662 mb_params.param = p_mdump_cmd_params->cmd;
1663 mb_params.p_data_src = p_mdump_cmd_params->p_data_src;
1664 mb_params.data_src_size = p_mdump_cmd_params->data_src_size;
1665 mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst;
1666 mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size;
1667 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1668 if (rc != ECORE_SUCCESS)
1671 p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
1673 if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
1675 "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n",
1676 p_mdump_cmd_params->cmd);
1678 } else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
1680 "The mdump command is not supported by the MFW\n");
1687 static enum _ecore_status_t ecore_mcp_mdump_ack(struct ecore_hwfn *p_hwfn,
1688 struct ecore_ptt *p_ptt)
1690 struct ecore_mdump_cmd_params mdump_cmd_params;
1692 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1693 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK;
1695 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1698 enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn,
1699 struct ecore_ptt *p_ptt,
1702 struct ecore_mdump_cmd_params mdump_cmd_params;
1704 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1705 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_SET_VALUES;
1706 mdump_cmd_params.p_data_src = &epoch;
1707 mdump_cmd_params.data_src_size = sizeof(epoch);
1709 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1712 enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
1713 struct ecore_ptt *p_ptt)
1715 struct ecore_mdump_cmd_params mdump_cmd_params;
1717 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1718 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_TRIGGER;
1720 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1723 static enum _ecore_status_t
1724 ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1725 struct mdump_config_stc *p_mdump_config)
1727 struct ecore_mdump_cmd_params mdump_cmd_params;
1728 enum _ecore_status_t rc;
1730 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1731 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_CONFIG;
1732 mdump_cmd_params.p_data_dst = p_mdump_config;
1733 mdump_cmd_params.data_dst_size = sizeof(*p_mdump_config);
1735 rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1736 if (rc != ECORE_SUCCESS)
1739 if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1741 "Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n",
1742 mdump_cmd_params.mcp_resp);
1743 rc = ECORE_UNKNOWN_ERROR;
1749 enum _ecore_status_t
1750 ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1751 struct ecore_mdump_info *p_mdump_info)
1753 u32 addr, global_offsize, global_addr;
1754 struct mdump_config_stc mdump_config;
1755 enum _ecore_status_t rc;
1757 OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info));
1759 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1761 global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1762 global_addr = SECTION_ADDR(global_offsize, 0);
1763 p_mdump_info->reason = ecore_rd(p_hwfn, p_ptt,
1765 OFFSETOF(struct public_global,
1768 if (p_mdump_info->reason) {
1769 rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config);
1770 if (rc != ECORE_SUCCESS)
1773 p_mdump_info->version = mdump_config.version;
1774 p_mdump_info->config = mdump_config.config;
1775 p_mdump_info->epoch = mdump_config.epoc;
1776 p_mdump_info->num_of_logs = mdump_config.num_of_logs;
1777 p_mdump_info->valid_logs = mdump_config.valid_logs;
1779 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1780 "MFW mdump info: reason %d, version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n",
1781 p_mdump_info->reason, p_mdump_info->version,
1782 p_mdump_info->config, p_mdump_info->epoch,
1783 p_mdump_info->num_of_logs, p_mdump_info->valid_logs);
1785 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1786 "MFW mdump info: reason %d\n", p_mdump_info->reason);
1789 return ECORE_SUCCESS;
1792 enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
1793 struct ecore_ptt *p_ptt)
1795 struct ecore_mdump_cmd_params mdump_cmd_params;
1797 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1798 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLEAR_LOGS;
1800 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1803 enum _ecore_status_t
1804 ecore_mcp_mdump_get_retain(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1805 struct ecore_mdump_retain_data *p_mdump_retain)
1807 struct ecore_mdump_cmd_params mdump_cmd_params;
1808 struct mdump_retain_data_stc mfw_mdump_retain;
1809 enum _ecore_status_t rc;
1811 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1812 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN;
1813 mdump_cmd_params.p_data_dst = &mfw_mdump_retain;
1814 mdump_cmd_params.data_dst_size = sizeof(mfw_mdump_retain);
1816 rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1817 if (rc != ECORE_SUCCESS)
1820 if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1822 "Failed to get the mdump retained data [mcp_resp 0x%x]\n",
1823 mdump_cmd_params.mcp_resp);
1824 return ECORE_UNKNOWN_ERROR;
1827 p_mdump_retain->valid = mfw_mdump_retain.valid;
1828 p_mdump_retain->epoch = mfw_mdump_retain.epoch;
1829 p_mdump_retain->pf = mfw_mdump_retain.pf;
1830 p_mdump_retain->status = mfw_mdump_retain.status;
1832 return ECORE_SUCCESS;
1835 enum _ecore_status_t ecore_mcp_mdump_clr_retain(struct ecore_hwfn *p_hwfn,
1836 struct ecore_ptt *p_ptt)
1838 struct ecore_mdump_cmd_params mdump_cmd_params;
1840 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1841 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLR_RETAIN;
1843 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1846 static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
1847 struct ecore_ptt *p_ptt)
1849 struct ecore_mdump_retain_data mdump_retain;
1850 enum _ecore_status_t rc;
1852 /* In CMT mode - no need for more than a single acknowledgment to the
1853 * MFW, and no more than a single notification to the upper driver.
1855 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1858 rc = ecore_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain);
1859 if (rc == ECORE_SUCCESS && mdump_retain.valid) {
1860 DP_NOTICE(p_hwfn, false,
1861 "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n",
1862 mdump_retain.epoch, mdump_retain.pf,
1863 mdump_retain.status);
1865 DP_NOTICE(p_hwfn, false,
1866 "The MFW notified that a critical error occurred in the device\n");
1869 if (p_hwfn->p_dev->allow_mdump) {
1870 DP_NOTICE(p_hwfn, false,
1871 "Not acknowledging the notification to allow the MFW crash dump\n");
1875 DP_NOTICE(p_hwfn, false,
1876 "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n");
1877 ecore_mcp_mdump_ack(p_hwfn, p_ptt);
1878 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
1882 ecore_mcp_read_ufp_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1884 struct public_func shmem_info;
1887 if (!OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits))
1890 OSAL_MEMSET(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info));
1891 port_cfg = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1892 OFFSETOF(struct public_port, oem_cfg_port));
1893 val = GET_MFW_FIELD(port_cfg, OEM_CFG_CHANNEL_TYPE);
1894 if (val != OEM_CFG_CHANNEL_TYPE_STAGGED)
1895 DP_NOTICE(p_hwfn, false, "Incorrect UFP Channel type %d\n",
1898 val = GET_MFW_FIELD(port_cfg, OEM_CFG_SCHED_TYPE);
1899 if (val == OEM_CFG_SCHED_TYPE_ETS)
1900 p_hwfn->ufp_info.mode = ECORE_UFP_MODE_ETS;
1901 else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW)
1902 p_hwfn->ufp_info.mode = ECORE_UFP_MODE_VNIC_BW;
1904 DP_NOTICE(p_hwfn, false, "Unknown UFP scheduling mode %d\n",
1907 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1909 val = GET_MFW_FIELD(shmem_info.oem_cfg_func, OEM_CFG_FUNC_TC);
1910 p_hwfn->ufp_info.tc = (u8)val;
1911 val = GET_MFW_FIELD(shmem_info.oem_cfg_func,
1912 OEM_CFG_FUNC_HOST_PRI_CTRL);
1913 if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC)
1914 p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_VNIC;
1915 else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS)
1916 p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_OS;
1918 DP_NOTICE(p_hwfn, false, "Unknown Host priority control %d\n",
1921 DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
1922 "UFP shmem config: mode = %d tc = %d pri_type = %d\n",
1923 p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc,
1924 p_hwfn->ufp_info.pri_type);
1927 static enum _ecore_status_t
1928 ecore_mcp_handle_ufp_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1930 ecore_mcp_read_ufp_config(p_hwfn, p_ptt);
1932 if (p_hwfn->ufp_info.mode == ECORE_UFP_MODE_VNIC_BW) {
1933 p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc;
1934 p_hwfn->hw_info.offload_tc = p_hwfn->ufp_info.tc;
1936 ecore_qm_reconf(p_hwfn, p_ptt);
1938 /* Merge UFP TC with the dcbx TC data */
1939 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1940 ECORE_DCBX_OPERATIONAL_MIB);
1943 /* update storm FW with negotiation results */
1944 ecore_sp_pf_update_ufp(p_hwfn);
1946 return ECORE_SUCCESS;
1949 enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
1950 struct ecore_ptt *p_ptt)
1952 struct ecore_mcp_info *info = p_hwfn->mcp_info;
1953 enum _ecore_status_t rc = ECORE_SUCCESS;
1957 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n");
1959 /* Read Messages from MFW */
1960 ecore_mcp_read_mb(p_hwfn, p_ptt);
1962 /* Compare current messages to old ones */
1963 for (i = 0; i < info->mfw_mb_length; i++) {
1964 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
1969 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1970 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
1971 i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
1974 case MFW_DRV_MSG_LINK_CHANGE:
1975 ecore_mcp_handle_link_change(p_hwfn, p_ptt, false);
1977 case MFW_DRV_MSG_VF_DISABLED:
1978 ecore_mcp_handle_vf_flr(p_hwfn, p_ptt);
1980 case MFW_DRV_MSG_LLDP_DATA_UPDATED:
1981 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1982 ECORE_DCBX_REMOTE_LLDP_MIB);
1984 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
1985 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1986 ECORE_DCBX_REMOTE_MIB);
1988 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
1989 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1990 ECORE_DCBX_OPERATIONAL_MIB);
1991 /* clear the user-config cache */
1992 OSAL_MEMSET(&p_hwfn->p_dcbx_info->set, 0,
1993 sizeof(struct ecore_dcbx_set));
1995 case MFW_DRV_MSG_LLDP_RECEIVED_TLVS_UPDATED:
1996 ecore_lldp_mib_update_event(p_hwfn, p_ptt);
1998 case MFW_DRV_MSG_OEM_CFG_UPDATE:
1999 ecore_mcp_handle_ufp_event(p_hwfn, p_ptt);
2001 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
2002 ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
2004 case MFW_DRV_MSG_ERROR_RECOVERY:
2005 ecore_mcp_handle_process_kill(p_hwfn, p_ptt);
2007 case MFW_DRV_MSG_GET_LAN_STATS:
2008 case MFW_DRV_MSG_GET_FCOE_STATS:
2009 case MFW_DRV_MSG_GET_ISCSI_STATS:
2010 case MFW_DRV_MSG_GET_RDMA_STATS:
2011 ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
2013 case MFW_DRV_MSG_BW_UPDATE:
2014 ecore_mcp_update_bw(p_hwfn, p_ptt);
2016 case MFW_DRV_MSG_FAILURE_DETECTED:
2017 ecore_mcp_handle_fan_failure(p_hwfn);
2019 case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
2020 ecore_mcp_handle_critical_error(p_hwfn, p_ptt);
2023 DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
2028 /* ACK everything */
2029 for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
2030 OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]);
2032 /* MFW expect answer in BE, so we force write in that format */
2033 ecore_wr(p_hwfn, p_ptt,
2034 info->mfw_mb_addr + sizeof(u32) +
2035 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
2036 sizeof(u32) + i * sizeof(u32), val);
2040 DP_NOTICE(p_hwfn, false,
2041 "Received an MFW message indication but no"
2046 /* Copy the new mfw messages into the shadow */
2047 OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
2052 enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
2053 struct ecore_ptt *p_ptt,
2055 u32 *p_running_bundle_id)
2060 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
2061 DP_NOTICE(p_hwfn, false, "Emulation - can't get MFW version\n");
2062 return ECORE_SUCCESS;
2066 if (IS_VF(p_hwfn->p_dev)) {
2067 if (p_hwfn->vf_iov_info) {
2068 struct pfvf_acquire_resp_tlv *p_resp;
2070 p_resp = &p_hwfn->vf_iov_info->acquire_resp;
2071 *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
2072 return ECORE_SUCCESS;
2074 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2075 "VF requested MFW version prior to ACQUIRE\n");
2080 global_offsize = ecore_rd(p_hwfn, p_ptt,
2081 SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
2085 ecore_rd(p_hwfn, p_ptt,
2086 SECTION_ADDR(global_offsize,
2087 0) + OFFSETOF(struct public_global, mfw_ver));
2089 if (p_running_bundle_id != OSAL_NULL) {
2090 *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt,
2091 SECTION_ADDR(global_offsize,
2093 OFFSETOF(struct public_global,
2094 running_bundle_id));
2097 return ECORE_SUCCESS;
2100 enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,
2101 struct ecore_ptt *p_ptt,
2105 /* TODO - Add support for VFs */
2106 if (IS_VF(p_hwfn->p_dev))
2109 if (!ecore_mcp_is_init(p_hwfn)) {
2110 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
2115 *p_media_type = MEDIA_UNSPECIFIED;
2118 *p_media_type = ecore_rd(p_hwfn, p_ptt,
2119 p_hwfn->mcp_info->port_addr +
2120 OFFSETOF(struct public_port,
2124 return ECORE_SUCCESS;
2128 /* Old MFW has a global configuration for all PFs regarding RDMA support */
2130 ecore_mcp_get_shmem_proto_legacy(struct ecore_hwfn *p_hwfn,
2131 enum ecore_pci_personality *p_proto)
2133 *p_proto = ECORE_PCI_ETH;
2135 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2136 "According to Legacy capabilities, L2 personality is %08x\n",
2141 static enum _ecore_status_t
2142 ecore_mcp_get_shmem_proto_mfw(struct ecore_hwfn *p_hwfn,
2143 struct ecore_ptt *p_ptt,
2144 enum ecore_pci_personality *p_proto)
2146 u32 resp = 0, param = 0;
2147 enum _ecore_status_t rc;
2149 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2150 "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
2151 (u32)*p_proto, resp, param);
2152 return ECORE_SUCCESS;
2155 static enum _ecore_status_t
2156 ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn,
2157 struct public_func *p_info,
2158 struct ecore_ptt *p_ptt,
2159 enum ecore_pci_personality *p_proto)
2161 enum _ecore_status_t rc = ECORE_SUCCESS;
2163 switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
2164 case FUNC_MF_CFG_PROTOCOL_ETHERNET:
2165 if (ecore_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto) !=
2167 ecore_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
2176 enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
2177 struct ecore_ptt *p_ptt)
2179 struct ecore_mcp_function_info *info;
2180 struct public_func shmem_info;
2182 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
2183 info = &p_hwfn->mcp_info->func_info;
2185 info->pause_on_host = (shmem_info.config &
2186 FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
2188 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2190 DP_ERR(p_hwfn, "Unknown personality %08x\n",
2191 (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
2195 ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
2197 if (shmem_info.mac_upper || shmem_info.mac_lower) {
2198 info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
2199 info->mac[1] = (u8)(shmem_info.mac_upper);
2200 info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
2201 info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
2202 info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
2203 info->mac[5] = (u8)(shmem_info.mac_lower);
2205 /* TODO - are there protocols for which there's no MAC? */
2206 DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n");
2209 /* TODO - are these calculations true for BE machine? */
2210 info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
2211 (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
2212 info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
2213 (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
2215 info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
2217 info->mtu = (u16)shmem_info.mtu_size;
2222 info->mtu = (u16)shmem_info.mtu_size;
2224 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP),
2225 "Read configuration from shmem: pause_on_host %02x"
2226 " protocol %02x BW [%02x - %02x]"
2227 " MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %lx"
2228 " node %lx ovlan %04x\n",
2229 info->pause_on_host, info->protocol,
2230 info->bandwidth_min, info->bandwidth_max,
2231 info->mac[0], info->mac[1], info->mac[2],
2232 info->mac[3], info->mac[4], info->mac[5],
2233 (unsigned long)info->wwn_port,
2234 (unsigned long)info->wwn_node, info->ovlan);
2236 return ECORE_SUCCESS;
2239 struct ecore_mcp_link_params
2240 *ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn)
2242 if (!p_hwfn || !p_hwfn->mcp_info)
2244 return &p_hwfn->mcp_info->link_input;
2247 struct ecore_mcp_link_state
2248 *ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn)
2250 if (!p_hwfn || !p_hwfn->mcp_info)
2254 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
2255 DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n");
2256 p_hwfn->mcp_info->link_output.link_up = true;
2260 return &p_hwfn->mcp_info->link_output;
2263 struct ecore_mcp_link_capabilities
2264 *ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn)
2266 if (!p_hwfn || !p_hwfn->mcp_info)
2268 return &p_hwfn->mcp_info->link_capabilities;
2271 enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
2272 struct ecore_ptt *p_ptt)
2274 u32 resp = 0, param = 0;
2275 enum _ecore_status_t rc;
2277 rc = ecore_mcp_cmd(p_hwfn, p_ptt,
2278 DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, ¶m);
2280 /* Wait for the drain to complete before returning */
2286 const struct ecore_mcp_function_info
2287 *ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn)
2289 if (!p_hwfn || !p_hwfn->mcp_info)
2291 return &p_hwfn->mcp_info->func_info;
2294 int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
2295 struct ecore_ptt *p_ptt, u32 personalities)
2297 enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT;
2298 struct public_func shmem_info;
2299 int i, count = 0, num_pfs;
2301 num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
2303 for (i = 0; i < num_pfs; i++) {
2304 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
2305 MCP_PF_ID_BY_REL(p_hwfn, i));
2306 if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
2309 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2314 if ((1 << ((u32)protocol)) & personalities)
2321 enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
2322 struct ecore_ptt *p_ptt,
2328 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
2329 DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n");
2334 if (IS_VF(p_hwfn->p_dev))
2337 flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
2338 flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
2339 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
2340 flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_OFFSET));
2342 *p_flash_size = flash_size;
2344 return ECORE_SUCCESS;
2347 enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
2348 struct ecore_ptt *p_ptt)
2350 struct ecore_dev *p_dev = p_hwfn->p_dev;
2352 if (p_dev->recov_in_prog) {
2353 DP_NOTICE(p_hwfn, false,
2354 "Avoid triggering a recovery since such a process"
2355 " is already in progress\n");
2359 DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n");
2360 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
2362 return ECORE_SUCCESS;
2365 static enum _ecore_status_t
2366 ecore_mcp_config_vf_msix_bb(struct ecore_hwfn *p_hwfn,
2367 struct ecore_ptt *p_ptt,
2370 u32 resp = 0, param = 0, rc_param = 0;
2371 enum _ecore_status_t rc;
2373 /* Only Leader can configure MSIX, and need to take CMT into account */
2375 if (!IS_LEAD_HWFN(p_hwfn))
2376 return ECORE_SUCCESS;
2377 num *= p_hwfn->p_dev->num_hwfns;
2379 param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_OFFSET) &
2380 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
2381 param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_OFFSET) &
2382 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
2384 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
2387 if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
2388 DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n",
2392 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2393 "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
2400 static enum _ecore_status_t
2401 ecore_mcp_config_vf_msix_ah(struct ecore_hwfn *p_hwfn,
2402 struct ecore_ptt *p_ptt,
2405 u32 resp = 0, param = num, rc_param = 0;
2406 enum _ecore_status_t rc;
2408 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX,
2409 param, &resp, &rc_param);
2411 if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) {
2412 DP_NOTICE(p_hwfn, true, "MFW failed to set MSI-X for VFs\n");
2415 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2416 "Requested 0x%02x MSI-x interrupts for VFs\n",
2423 enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
2424 struct ecore_ptt *p_ptt,
2427 if (ECORE_IS_BB(p_hwfn->p_dev))
2428 return ecore_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
2430 return ecore_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num);
2433 enum _ecore_status_t
2434 ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2435 struct ecore_mcp_drv_version *p_ver)
2437 struct ecore_mcp_mb_params mb_params;
2438 struct drv_version_stc drv_version;
2442 enum _ecore_status_t rc;
2445 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
2446 return ECORE_SUCCESS;
2449 OSAL_MEM_ZERO(&drv_version, sizeof(drv_version));
2450 drv_version.version = p_ver->version;
2451 num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4;
2452 for (i = 0; i < num_words; i++) {
2453 /* The driver name is expected to be in a big-endian format */
2454 p_name = &p_ver->name[i * sizeof(u32)];
2455 val = OSAL_CPU_TO_BE32(*(u32 *)p_name);
2456 *(u32 *)&drv_version.name[i * sizeof(u32)] = val;
2459 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2460 mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
2461 mb_params.p_data_src = &drv_version;
2462 mb_params.data_src_size = sizeof(drv_version);
2463 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2464 if (rc != ECORE_SUCCESS)
2465 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2470 /* A maximal 100 msec waiting time for the MCP to halt */
2471 #define ECORE_MCP_HALT_SLEEP_MS 10
2472 #define ECORE_MCP_HALT_MAX_RETRIES 10
2474 enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
2475 struct ecore_ptt *p_ptt)
2477 u32 resp = 0, param = 0, cpu_state, cnt = 0;
2478 enum _ecore_status_t rc;
2480 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2482 if (rc != ECORE_SUCCESS) {
2483 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2488 OSAL_MSLEEP(ECORE_MCP_HALT_SLEEP_MS);
2489 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2490 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
2492 } while (++cnt < ECORE_MCP_HALT_MAX_RETRIES);
2494 if (cnt == ECORE_MCP_HALT_MAX_RETRIES) {
2495 DP_NOTICE(p_hwfn, false,
2496 "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2497 ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
2501 ecore_mcp_cmd_set_blocking(p_hwfn, true);
2503 return ECORE_SUCCESS;
2506 #define ECORE_MCP_RESUME_SLEEP_MS 10
2508 enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
2509 struct ecore_ptt *p_ptt)
2511 u32 cpu_mode, cpu_state;
2513 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
2515 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2516 cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2517 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
2519 OSAL_MSLEEP(ECORE_MCP_RESUME_SLEEP_MS);
2520 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2522 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
2523 DP_NOTICE(p_hwfn, false,
2524 "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2525 cpu_mode, cpu_state);
2529 ecore_mcp_cmd_set_blocking(p_hwfn, false);
2531 return ECORE_SUCCESS;
2534 enum _ecore_status_t
2535 ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
2536 struct ecore_ptt *p_ptt,
2537 enum ecore_ov_client client)
2539 enum _ecore_status_t rc;
2540 u32 resp = 0, param = 0;
2544 case ECORE_OV_CLIENT_DRV:
2545 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
2547 case ECORE_OV_CLIENT_USER:
2548 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
2550 case ECORE_OV_CLIENT_VENDOR_SPEC:
2551 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
2554 DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", client);
2558 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
2559 drv_mb_param, &resp, ¶m);
2560 if (rc != ECORE_SUCCESS)
2561 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2566 enum _ecore_status_t
2567 ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
2568 struct ecore_ptt *p_ptt,
2569 enum ecore_ov_driver_state drv_state)
2571 enum _ecore_status_t rc;
2572 u32 resp = 0, param = 0;
2575 switch (drv_state) {
2576 case ECORE_OV_DRIVER_STATE_NOT_LOADED:
2577 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
2579 case ECORE_OV_DRIVER_STATE_DISABLED:
2580 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
2582 case ECORE_OV_DRIVER_STATE_ACTIVE:
2583 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
2586 DP_NOTICE(p_hwfn, true, "Invalid driver state %d\n", drv_state);
2590 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
2591 drv_mb_param, &resp, ¶m);
2592 if (rc != ECORE_SUCCESS)
2593 DP_ERR(p_hwfn, "Failed to send driver state\n");
2598 enum _ecore_status_t
2599 ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2600 struct ecore_fc_npiv_tbl *p_table)
2605 enum _ecore_status_t
2606 ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn,
2607 struct ecore_ptt *p_ptt, u16 mtu)
2612 enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
2613 struct ecore_ptt *p_ptt,
2614 enum ecore_led_mode mode)
2616 u32 resp = 0, param = 0, drv_mb_param;
2617 enum _ecore_status_t rc;
2620 case ECORE_LED_MODE_ON:
2621 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
2623 case ECORE_LED_MODE_OFF:
2624 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
2626 case ECORE_LED_MODE_RESTORE:
2627 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
2630 DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode);
2634 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
2635 drv_mb_param, &resp, ¶m);
2636 if (rc != ECORE_SUCCESS)
2637 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2642 enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
2643 struct ecore_ptt *p_ptt,
2646 u32 resp = 0, param = 0;
2647 enum _ecore_status_t rc;
2649 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
2650 mask_parities, &resp, ¶m);
2652 if (rc != ECORE_SUCCESS) {
2654 "MCP response failure for mask parities, aborting\n");
2655 } else if (resp != FW_MSG_CODE_OK) {
2657 "MCP did not ack mask parity request. Old MFW?\n");
2664 enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
2667 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2668 u32 bytes_left, offset, bytes_to_copy, buf_size;
2669 u32 nvm_offset, resp, param;
2670 struct ecore_ptt *p_ptt;
2671 enum _ecore_status_t rc = ECORE_SUCCESS;
2673 p_ptt = ecore_ptt_acquire(p_hwfn);
2679 while (bytes_left > 0) {
2680 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
2681 MCP_DRV_NVM_BUF_LEN);
2682 nvm_offset = (addr + offset) | (bytes_to_copy <<
2683 DRV_MB_PARAM_NVM_LEN_OFFSET);
2684 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
2685 DRV_MSG_CODE_NVM_READ_NVRAM,
2686 nvm_offset, &resp, ¶m, &buf_size,
2687 (u32 *)(p_buf + offset));
2688 if (rc != ECORE_SUCCESS) {
2689 DP_NOTICE(p_dev, false,
2690 "ecore_mcp_nvm_rd_cmd() failed, rc = %d\n",
2692 resp = FW_MSG_CODE_ERROR;
2696 if (resp != FW_MSG_CODE_NVM_OK) {
2697 DP_NOTICE(p_dev, false,
2698 "nvm read failed, resp = 0x%08x\n", resp);
2699 rc = ECORE_UNKNOWN_ERROR;
2703 /* This can be a lengthy process, and it's possible scheduler
2704 * isn't preemptible. Sleep a bit to prevent CPU hogging.
2706 if (bytes_left % 0x1000 <
2707 (bytes_left - buf_size) % 0x1000)
2711 bytes_left -= buf_size;
2714 p_dev->mcp_nvm_resp = resp;
2715 ecore_ptt_release(p_hwfn, p_ptt);
2720 enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
2721 u32 addr, u8 *p_buf, u32 len)
2723 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2724 struct ecore_ptt *p_ptt;
2726 enum _ecore_status_t rc;
2728 p_ptt = ecore_ptt_acquire(p_hwfn);
2732 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
2733 (cmd == ECORE_PHY_CORE_READ) ?
2734 DRV_MSG_CODE_PHY_CORE_READ :
2735 DRV_MSG_CODE_PHY_RAW_READ,
2736 addr, &resp, ¶m, &len, (u32 *)p_buf);
2737 if (rc != ECORE_SUCCESS)
2738 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2740 p_dev->mcp_nvm_resp = resp;
2741 ecore_ptt_release(p_hwfn, p_ptt);
2746 enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
2748 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2749 struct ecore_ptt *p_ptt;
2751 p_ptt = ecore_ptt_acquire(p_hwfn);
2755 OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp));
2756 ecore_ptt_release(p_hwfn, p_ptt);
2758 return ECORE_SUCCESS;
2761 enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr)
2763 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2764 struct ecore_ptt *p_ptt;
2766 enum _ecore_status_t rc;
2768 p_ptt = ecore_ptt_acquire(p_hwfn);
2771 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_DEL_FILE, addr,
2773 p_dev->mcp_nvm_resp = resp;
2774 ecore_ptt_release(p_hwfn, p_ptt);
2779 enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
2782 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2783 struct ecore_ptt *p_ptt;
2785 enum _ecore_status_t rc;
2787 p_ptt = ecore_ptt_acquire(p_hwfn);
2790 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, addr,
2792 p_dev->mcp_nvm_resp = resp;
2793 ecore_ptt_release(p_hwfn, p_ptt);
2798 /* rc receives ECORE_INVAL as default parameter because
2799 * it might not enter the while loop if the len is 0
2801 enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
2802 u32 addr, u8 *p_buf, u32 len)
2804 u32 buf_idx, buf_size, nvm_cmd, nvm_offset, resp, param;
2805 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2806 enum _ecore_status_t rc = ECORE_INVAL;
2807 struct ecore_ptt *p_ptt;
2809 p_ptt = ecore_ptt_acquire(p_hwfn);
2814 case ECORE_PUT_FILE_DATA:
2815 nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
2817 case ECORE_NVM_WRITE_NVRAM:
2818 nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
2820 case ECORE_EXT_PHY_FW_UPGRADE:
2821 nvm_cmd = DRV_MSG_CODE_EXT_PHY_FW_UPGRADE;
2824 DP_NOTICE(p_hwfn, true, "Invalid nvm write command 0x%x\n",
2831 while (buf_idx < len) {
2832 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
2833 MCP_DRV_NVM_BUF_LEN);
2834 nvm_offset = ((buf_size << DRV_MB_PARAM_NVM_LEN_OFFSET) |
2837 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset,
2838 &resp, ¶m, buf_size,
2839 (u32 *)&p_buf[buf_idx]);
2840 if (rc != ECORE_SUCCESS) {
2841 DP_NOTICE(p_dev, false,
2842 "ecore_mcp_nvm_write() failed, rc = %d\n",
2844 resp = FW_MSG_CODE_ERROR;
2848 if (resp != FW_MSG_CODE_OK &&
2849 resp != FW_MSG_CODE_NVM_OK &&
2850 resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK) {
2851 DP_NOTICE(p_dev, false,
2852 "nvm write failed, resp = 0x%08x\n", resp);
2853 rc = ECORE_UNKNOWN_ERROR;
2857 /* This can be a lengthy process, and it's possible scheduler
2858 * isn't preemptible. Sleep a bit to prevent CPU hogging.
2860 if (buf_idx % 0x1000 >
2861 (buf_idx + buf_size) % 0x1000)
2864 buf_idx += buf_size;
2867 p_dev->mcp_nvm_resp = resp;
2869 ecore_ptt_release(p_hwfn, p_ptt);
2874 enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
2875 u32 addr, u8 *p_buf, u32 len)
2877 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2878 struct ecore_ptt *p_ptt;
2879 u32 resp, param, nvm_cmd;
2880 enum _ecore_status_t rc;
2882 p_ptt = ecore_ptt_acquire(p_hwfn);
2886 nvm_cmd = (cmd == ECORE_PHY_CORE_WRITE) ? DRV_MSG_CODE_PHY_CORE_WRITE :
2887 DRV_MSG_CODE_PHY_RAW_WRITE;
2888 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, addr,
2889 &resp, ¶m, len, (u32 *)p_buf);
2890 if (rc != ECORE_SUCCESS)
2891 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2892 p_dev->mcp_nvm_resp = resp;
2893 ecore_ptt_release(p_hwfn, p_ptt);
2898 enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
2901 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2902 struct ecore_ptt *p_ptt;
2904 enum _ecore_status_t rc;
2906 p_ptt = ecore_ptt_acquire(p_hwfn);
2910 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_SECURE_MODE, addr,
2912 p_dev->mcp_nvm_resp = resp;
2913 ecore_ptt_release(p_hwfn, p_ptt);
2918 enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
2919 struct ecore_ptt *p_ptt,
2920 u32 port, u32 addr, u32 offset,
2923 u32 bytes_left, bytes_to_copy, buf_size, nvm_offset;
2925 enum _ecore_status_t rc;
2927 nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) |
2928 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET);
2932 while (bytes_left > 0) {
2933 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
2934 MAX_I2C_TRANSACTION_SIZE);
2935 nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
2936 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
2937 nvm_offset |= ((addr + offset) <<
2938 DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET);
2939 nvm_offset |= (bytes_to_copy <<
2940 DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET);
2941 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
2942 DRV_MSG_CODE_TRANSCEIVER_READ,
2943 nvm_offset, &resp, ¶m, &buf_size,
2944 (u32 *)(p_buf + offset));
2945 if ((resp & FW_MSG_CODE_MASK) ==
2946 FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
2948 } else if ((resp & FW_MSG_CODE_MASK) !=
2949 FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
2950 return ECORE_UNKNOWN_ERROR;
2953 bytes_left -= buf_size;
2956 return ECORE_SUCCESS;
2959 enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
2960 struct ecore_ptt *p_ptt,
2961 u32 port, u32 addr, u32 offset,
2964 u32 buf_idx, buf_size, nvm_offset, resp, param;
2965 enum _ecore_status_t rc;
2967 nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) |
2968 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET);
2970 while (buf_idx < len) {
2971 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
2972 MAX_I2C_TRANSACTION_SIZE);
2973 nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
2974 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
2975 nvm_offset |= ((offset + buf_idx) <<
2976 DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET);
2977 nvm_offset |= (buf_size <<
2978 DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET);
2979 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt,
2980 DRV_MSG_CODE_TRANSCEIVER_WRITE,
2981 nvm_offset, &resp, ¶m, buf_size,
2982 (u32 *)&p_buf[buf_idx]);
2983 if ((resp & FW_MSG_CODE_MASK) ==
2984 FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
2986 } else if ((resp & FW_MSG_CODE_MASK) !=
2987 FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
2988 return ECORE_UNKNOWN_ERROR;
2990 buf_idx += buf_size;
2993 return ECORE_SUCCESS;
2996 enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
2997 struct ecore_ptt *p_ptt,
2998 u16 gpio, u32 *gpio_val)
3000 enum _ecore_status_t rc = ECORE_SUCCESS;
3001 u32 drv_mb_param = 0, rsp;
3003 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET);
3005 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ,
3006 drv_mb_param, &rsp, gpio_val);
3008 if (rc != ECORE_SUCCESS)
3011 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3012 return ECORE_UNKNOWN_ERROR;
3014 return ECORE_SUCCESS;
3017 enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
3018 struct ecore_ptt *p_ptt,
3019 u16 gpio, u16 gpio_val)
3021 enum _ecore_status_t rc = ECORE_SUCCESS;
3022 u32 drv_mb_param = 0, param, rsp;
3024 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET) |
3025 (gpio_val << DRV_MB_PARAM_GPIO_VALUE_OFFSET);
3027 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE,
3028 drv_mb_param, &rsp, ¶m);
3030 if (rc != ECORE_SUCCESS)
3033 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3034 return ECORE_UNKNOWN_ERROR;
3036 return ECORE_SUCCESS;
3039 enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn,
3040 struct ecore_ptt *p_ptt,
3041 u16 gpio, u32 *gpio_direction,
3044 u32 drv_mb_param = 0, rsp, val = 0;
3045 enum _ecore_status_t rc = ECORE_SUCCESS;
3047 drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET;
3049 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO,
3050 drv_mb_param, &rsp, &val);
3051 if (rc != ECORE_SUCCESS)
3054 *gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >>
3055 DRV_MB_PARAM_GPIO_DIRECTION_OFFSET;
3056 *gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >>
3057 DRV_MB_PARAM_GPIO_CTRL_OFFSET;
3059 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3060 return ECORE_UNKNOWN_ERROR;
3062 return ECORE_SUCCESS;
3065 enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn,
3066 struct ecore_ptt *p_ptt)
3068 u32 drv_mb_param = 0, rsp, param;
3069 enum _ecore_status_t rc = ECORE_SUCCESS;
3071 drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
3072 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3074 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3075 drv_mb_param, &rsp, ¶m);
3077 if (rc != ECORE_SUCCESS)
3080 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3081 (param != DRV_MB_PARAM_BIST_RC_PASSED))
3082 rc = ECORE_UNKNOWN_ERROR;
3087 enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn,
3088 struct ecore_ptt *p_ptt)
3090 u32 drv_mb_param, rsp, param;
3091 enum _ecore_status_t rc = ECORE_SUCCESS;
3093 drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
3094 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3096 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3097 drv_mb_param, &rsp, ¶m);
3099 if (rc != ECORE_SUCCESS)
3102 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3103 (param != DRV_MB_PARAM_BIST_RC_PASSED))
3104 rc = ECORE_UNKNOWN_ERROR;
3109 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images(
3110 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *num_images)
3112 u32 drv_mb_param = 0, rsp;
3113 enum _ecore_status_t rc = ECORE_SUCCESS;
3115 drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
3116 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3118 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3119 drv_mb_param, &rsp, num_images);
3121 if (rc != ECORE_SUCCESS)
3124 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
3125 rc = ECORE_UNKNOWN_ERROR;
3130 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att(
3131 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3132 struct bist_nvm_image_att *p_image_att, u32 image_index)
3134 u32 buf_size, nvm_offset, resp, param;
3135 enum _ecore_status_t rc;
3137 nvm_offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
3138 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3139 nvm_offset |= (image_index <<
3140 DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_OFFSET);
3141 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3142 nvm_offset, &resp, ¶m, &buf_size,
3143 (u32 *)p_image_att);
3144 if (rc != ECORE_SUCCESS)
3147 if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3148 (p_image_att->return_code != 1))
3149 rc = ECORE_UNKNOWN_ERROR;
3154 enum _ecore_status_t
3155 ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn,
3156 struct ecore_ptt *p_ptt,
3157 struct ecore_temperature_info *p_temp_info)
3159 struct ecore_temperature_sensor *p_temp_sensor;
3160 struct temperature_status_stc mfw_temp_info;
3161 struct ecore_mcp_mb_params mb_params;
3163 enum _ecore_status_t rc;
3166 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3167 mb_params.cmd = DRV_MSG_CODE_GET_TEMPERATURE;
3168 mb_params.p_data_dst = &mfw_temp_info;
3169 mb_params.data_dst_size = sizeof(mfw_temp_info);
3170 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3171 if (rc != ECORE_SUCCESS)
3174 OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS != MAX_NUM_OF_SENSORS);
3175 p_temp_info->num_sensors = OSAL_MIN_T(u32, mfw_temp_info.num_of_sensors,
3176 ECORE_MAX_NUM_OF_SENSORS);
3177 for (i = 0; i < p_temp_info->num_sensors; i++) {
3178 val = mfw_temp_info.sensor[i];
3179 p_temp_sensor = &p_temp_info->sensors[i];
3180 p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >>
3181 SENSOR_LOCATION_OFFSET;
3182 p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >>
3183 THRESHOLD_HIGH_OFFSET;
3184 p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >>
3185 CRITICAL_TEMPERATURE_OFFSET;
3186 p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >>
3187 CURRENT_TEMP_OFFSET;
3190 return ECORE_SUCCESS;
3193 enum _ecore_status_t ecore_mcp_get_mba_versions(
3194 struct ecore_hwfn *p_hwfn,
3195 struct ecore_ptt *p_ptt,
3196 struct ecore_mba_vers *p_mba_vers)
3198 u32 buf_size, resp, param;
3199 enum _ecore_status_t rc;
3201 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MBA_VERSION,
3202 0, &resp, ¶m, &buf_size,
3203 &p_mba_vers->mba_vers[0]);
3205 if (rc != ECORE_SUCCESS)
3208 if ((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
3209 rc = ECORE_UNKNOWN_ERROR;
3211 if (buf_size != MCP_DRV_NVM_BUF_LEN)
3212 rc = ECORE_UNKNOWN_ERROR;
3217 enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
3218 struct ecore_ptt *p_ptt,
3223 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MEM_ECC_EVENTS,
3224 0, &rsp, (u32 *)num_events);
3227 static enum resource_id_enum
3228 ecore_mcp_get_mfw_res_id(enum ecore_resources res_id)
3230 enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
3234 mfw_res_id = RESOURCE_NUM_SB_E;
3236 case ECORE_L2_QUEUE:
3237 mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
3240 mfw_res_id = RESOURCE_NUM_VPORT_E;
3243 mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
3246 mfw_res_id = RESOURCE_NUM_PQ_E;
3249 mfw_res_id = RESOURCE_NUM_RL_E;
3253 /* Each VFC resource can accommodate both a MAC and a VLAN */
3254 mfw_res_id = RESOURCE_VFC_FILTER_E;
3257 mfw_res_id = RESOURCE_ILT_E;
3259 case ECORE_LL2_QUEUE:
3260 mfw_res_id = RESOURCE_LL2_QUEUE_E;
3262 case ECORE_RDMA_CNQ_RAM:
3263 case ECORE_CMDQS_CQS:
3264 /* CNQ/CMDQS are the same resource */
3265 mfw_res_id = RESOURCE_CQS_E;
3267 case ECORE_RDMA_STATS_QUEUE:
3268 mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
3271 mfw_res_id = RESOURCE_BDQ_E;
3280 #define ECORE_RESC_ALLOC_VERSION_MAJOR 2
3281 #define ECORE_RESC_ALLOC_VERSION_MINOR 0
3282 #define ECORE_RESC_ALLOC_VERSION \
3283 ((ECORE_RESC_ALLOC_VERSION_MAJOR << \
3284 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_OFFSET) | \
3285 (ECORE_RESC_ALLOC_VERSION_MINOR << \
3286 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_OFFSET))
3288 struct ecore_resc_alloc_in_params {
3290 enum ecore_resources res_id;
3294 struct ecore_resc_alloc_out_params {
3304 #define ECORE_RECOVERY_PROLOG_SLEEP_MS 100
3306 enum _ecore_status_t ecore_recovery_prolog(struct ecore_dev *p_dev)
3308 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3309 struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
3310 enum _ecore_status_t rc;
3312 /* Allow ongoing PCIe transactions to complete */
3313 OSAL_MSLEEP(ECORE_RECOVERY_PROLOG_SLEEP_MS);
3315 /* Clear the PF's internal FID_enable in the PXP */
3316 rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_ptt, false);
3317 if (rc != ECORE_SUCCESS)
3318 DP_NOTICE(p_hwfn, false,
3319 "ecore_pglueb_set_pfid_enable() failed. rc = %d.\n",
3325 static enum _ecore_status_t
3326 ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn,
3327 struct ecore_ptt *p_ptt,
3328 struct ecore_resc_alloc_in_params *p_in_params,
3329 struct ecore_resc_alloc_out_params *p_out_params)
3331 struct ecore_mcp_mb_params mb_params;
3332 struct resource_info mfw_resc_info;
3333 enum _ecore_status_t rc;
3335 OSAL_MEM_ZERO(&mfw_resc_info, sizeof(mfw_resc_info));
3337 mfw_resc_info.res_id = ecore_mcp_get_mfw_res_id(p_in_params->res_id);
3338 if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
3340 "Failed to match resource %d [%s] with the MFW resources\n",
3341 p_in_params->res_id,
3342 ecore_hw_get_resc_name(p_in_params->res_id));
3346 switch (p_in_params->cmd) {
3347 case DRV_MSG_SET_RESOURCE_VALUE_MSG:
3348 mfw_resc_info.size = p_in_params->resc_max_val;
3350 case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
3353 DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
3358 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3359 mb_params.cmd = p_in_params->cmd;
3360 mb_params.param = ECORE_RESC_ALLOC_VERSION;
3361 mb_params.p_data_src = &mfw_resc_info;
3362 mb_params.data_src_size = sizeof(mfw_resc_info);
3363 mb_params.p_data_dst = mb_params.p_data_src;
3364 mb_params.data_dst_size = mb_params.data_src_size;
3366 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3367 "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
3368 p_in_params->cmd, p_in_params->res_id,
3369 ecore_hw_get_resc_name(p_in_params->res_id),
3370 GET_MFW_FIELD(mb_params.param,
3371 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3372 GET_MFW_FIELD(mb_params.param,
3373 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3374 p_in_params->resc_max_val);
3376 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3377 if (rc != ECORE_SUCCESS)
3380 p_out_params->mcp_resp = mb_params.mcp_resp;
3381 p_out_params->mcp_param = mb_params.mcp_param;
3382 p_out_params->resc_num = mfw_resc_info.size;
3383 p_out_params->resc_start = mfw_resc_info.offset;
3384 p_out_params->vf_resc_num = mfw_resc_info.vf_size;
3385 p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
3386 p_out_params->flags = mfw_resc_info.flags;
3388 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3389 "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
3390 GET_MFW_FIELD(p_out_params->mcp_param,
3391 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3392 GET_MFW_FIELD(p_out_params->mcp_param,
3393 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3394 p_out_params->resc_num, p_out_params->resc_start,
3395 p_out_params->vf_resc_num, p_out_params->vf_resc_start,
3396 p_out_params->flags);
3398 return ECORE_SUCCESS;
3401 enum _ecore_status_t
3402 ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3403 enum ecore_resources res_id, u32 resc_max_val,
3406 struct ecore_resc_alloc_out_params out_params;
3407 struct ecore_resc_alloc_in_params in_params;
3408 enum _ecore_status_t rc;
3410 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
3411 in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
3412 in_params.res_id = res_id;
3413 in_params.resc_max_val = resc_max_val;
3414 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
3415 rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3417 if (rc != ECORE_SUCCESS)
3420 *p_mcp_resp = out_params.mcp_resp;
3422 return ECORE_SUCCESS;
3425 enum _ecore_status_t
3426 ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3427 enum ecore_resources res_id, u32 *p_mcp_resp,
3428 u32 *p_resc_num, u32 *p_resc_start)
3430 struct ecore_resc_alloc_out_params out_params;
3431 struct ecore_resc_alloc_in_params in_params;
3432 enum _ecore_status_t rc;
3434 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
3435 in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
3436 in_params.res_id = res_id;
3437 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
3438 rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3440 if (rc != ECORE_SUCCESS)
3443 *p_mcp_resp = out_params.mcp_resp;
3445 if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
3446 *p_resc_num = out_params.resc_num;
3447 *p_resc_start = out_params.resc_start;
3450 return ECORE_SUCCESS;
3453 enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn,
3454 struct ecore_ptt *p_ptt)
3456 u32 mcp_resp, mcp_param;
3458 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
3459 &mcp_resp, &mcp_param);
3462 static enum _ecore_status_t ecore_mcp_resource_cmd(struct ecore_hwfn *p_hwfn,
3463 struct ecore_ptt *p_ptt,
3464 u32 param, u32 *p_mcp_resp,
3467 enum _ecore_status_t rc;
3469 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
3470 p_mcp_resp, p_mcp_param);
3471 if (rc != ECORE_SUCCESS)
3474 if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3476 "The resource command is unsupported by the MFW\n");
3477 return ECORE_NOTIMPL;
3480 if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
3481 u8 opcode = GET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
3483 DP_NOTICE(p_hwfn, false,
3484 "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
3492 enum _ecore_status_t
3493 __ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3494 struct ecore_resc_lock_params *p_params)
3496 u32 param = 0, mcp_resp, mcp_param;
3498 enum _ecore_status_t rc;
3500 switch (p_params->timeout) {
3501 case ECORE_MCP_RESC_LOCK_TO_DEFAULT:
3502 opcode = RESOURCE_OPCODE_REQ;
3503 p_params->timeout = 0;
3505 case ECORE_MCP_RESC_LOCK_TO_NONE:
3506 opcode = RESOURCE_OPCODE_REQ_WO_AGING;
3507 p_params->timeout = 0;
3510 opcode = RESOURCE_OPCODE_REQ_W_AGING;
3514 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3515 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3516 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
3518 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3519 "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
3520 param, p_params->timeout, opcode, p_params->resource);
3522 /* Attempt to acquire the resource */
3523 rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
3525 if (rc != ECORE_SUCCESS)
3528 /* Analyze the response */
3529 p_params->owner = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER);
3530 opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3532 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3533 "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
3534 mcp_param, opcode, p_params->owner);
3537 case RESOURCE_OPCODE_GNT:
3538 p_params->b_granted = true;
3540 case RESOURCE_OPCODE_BUSY:
3541 p_params->b_granted = false;
3544 DP_NOTICE(p_hwfn, false,
3545 "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
3550 return ECORE_SUCCESS;
3553 enum _ecore_status_t
3554 ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3555 struct ecore_resc_lock_params *p_params)
3558 enum _ecore_status_t rc;
3561 /* No need for an interval before the first iteration */
3563 if (p_params->sleep_b4_retry) {
3564 u16 retry_interval_in_ms =
3565 DIV_ROUND_UP(p_params->retry_interval,
3568 OSAL_MSLEEP(retry_interval_in_ms);
3570 OSAL_UDELAY(p_params->retry_interval);
3574 rc = __ecore_mcp_resc_lock(p_hwfn, p_ptt, p_params);
3575 if (rc != ECORE_SUCCESS)
3578 if (p_params->b_granted)
3580 } while (retry_cnt++ < p_params->retry_num);
3582 return ECORE_SUCCESS;
3585 void ecore_mcp_resc_lock_default_init(struct ecore_resc_lock_params *p_lock,
3586 struct ecore_resc_unlock_params *p_unlock,
3587 enum ecore_resc_lock resource,
3588 bool b_is_permanent)
3590 if (p_lock != OSAL_NULL) {
3591 OSAL_MEM_ZERO(p_lock, sizeof(*p_lock));
3593 /* Permanent resources don't require aging, and there's no
3594 * point in trying to acquire them more than once since it's
3595 * unexpected another entity would release them.
3597 if (b_is_permanent) {
3598 p_lock->timeout = ECORE_MCP_RESC_LOCK_TO_NONE;
3600 p_lock->retry_num = ECORE_MCP_RESC_LOCK_RETRY_CNT_DFLT;
3601 p_lock->retry_interval =
3602 ECORE_MCP_RESC_LOCK_RETRY_VAL_DFLT;
3603 p_lock->sleep_b4_retry = true;
3606 p_lock->resource = resource;
3609 if (p_unlock != OSAL_NULL) {
3610 OSAL_MEM_ZERO(p_unlock, sizeof(*p_unlock));
3611 p_unlock->resource = resource;
3615 enum _ecore_status_t
3616 ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3617 struct ecore_resc_unlock_params *p_params)
3619 u32 param = 0, mcp_resp, mcp_param;
3621 enum _ecore_status_t rc;
3623 opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
3624 : RESOURCE_OPCODE_RELEASE;
3625 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3626 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3628 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3629 "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
3630 param, opcode, p_params->resource);
3632 /* Attempt to release the resource */
3633 rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
3635 if (rc != ECORE_SUCCESS)
3638 /* Analyze the response */
3639 opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3641 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3642 "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
3646 case RESOURCE_OPCODE_RELEASED_PREVIOUS:
3648 "Resource unlock request for an already released resource [%d]\n",
3649 p_params->resource);
3651 case RESOURCE_OPCODE_RELEASED:
3652 p_params->b_released = true;
3654 case RESOURCE_OPCODE_WRONG_OWNER:
3655 p_params->b_released = false;
3658 DP_NOTICE(p_hwfn, false,
3659 "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
3664 return ECORE_SUCCESS;
3667 bool ecore_mcp_is_smart_an_supported(struct ecore_hwfn *p_hwfn)
3669 return !!(p_hwfn->mcp_info->capabilities &
3670 FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ);
3673 enum _ecore_status_t ecore_mcp_get_capabilities(struct ecore_hwfn *p_hwfn,
3674 struct ecore_ptt *p_ptt)
3677 enum _ecore_status_t rc;
3679 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT,
3680 0, &mcp_resp, &p_hwfn->mcp_info->capabilities);
3681 if (rc == ECORE_SUCCESS)
3682 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_PROBE),
3683 "MFW supported features: %08x\n",
3684 p_hwfn->mcp_info->capabilities);
3689 enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn,
3690 struct ecore_ptt *p_ptt)
3692 u32 mcp_resp, mcp_param, features;
3694 features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ |
3695 DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE |
3696 DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK;
3698 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
3699 features, &mcp_resp, &mcp_param);
3702 enum _ecore_status_t
3703 ecore_mcp_drv_attribute(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3704 struct ecore_mcp_drv_attr *p_drv_attr)
3706 struct attribute_cmd_write_stc attr_cmd_write;
3707 enum _attribute_commands_e mfw_attr_cmd;
3708 struct ecore_mcp_mb_params mb_params;
3709 enum _ecore_status_t rc;
3711 switch (p_drv_attr->attr_cmd) {
3712 case ECORE_MCP_DRV_ATTR_CMD_READ:
3713 mfw_attr_cmd = ATTRIBUTE_CMD_READ;
3715 case ECORE_MCP_DRV_ATTR_CMD_WRITE:
3716 mfw_attr_cmd = ATTRIBUTE_CMD_WRITE;
3718 case ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR:
3719 mfw_attr_cmd = ATTRIBUTE_CMD_READ_CLEAR;
3721 case ECORE_MCP_DRV_ATTR_CMD_CLEAR:
3722 mfw_attr_cmd = ATTRIBUTE_CMD_CLEAR;
3725 DP_NOTICE(p_hwfn, false, "Unknown attribute command %d\n",
3726 p_drv_attr->attr_cmd);
3730 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3731 mb_params.cmd = DRV_MSG_CODE_ATTRIBUTE;
3732 SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_KEY,
3733 p_drv_attr->attr_num);
3734 SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_CMD,
3736 if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_WRITE) {
3737 OSAL_MEM_ZERO(&attr_cmd_write, sizeof(attr_cmd_write));
3738 attr_cmd_write.val = p_drv_attr->val;
3739 attr_cmd_write.mask = p_drv_attr->mask;
3740 attr_cmd_write.offset = p_drv_attr->offset;
3742 mb_params.p_data_src = &attr_cmd_write;
3743 mb_params.data_src_size = sizeof(attr_cmd_write);
3746 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3747 if (rc != ECORE_SUCCESS)
3750 if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3752 "The attribute command is not supported by the MFW\n");
3753 return ECORE_NOTIMPL;
3754 } else if (mb_params.mcp_resp != FW_MSG_CODE_OK) {
3756 "Failed to send an attribute command [mcp_resp 0x%x, attr_cmd %d, attr_num %d]\n",
3757 mb_params.mcp_resp, p_drv_attr->attr_cmd,
3758 p_drv_attr->attr_num);
3762 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3763 "Attribute Command: cmd %d [mfw_cmd %d], num %d, in={val 0x%08x, mask 0x%08x, offset 0x%08x}, out={val 0x%08x}\n",
3764 p_drv_attr->attr_cmd, mfw_attr_cmd, p_drv_attr->attr_num,
3765 p_drv_attr->val, p_drv_attr->mask, p_drv_attr->offset,
3766 mb_params.mcp_param);
3768 if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ ||
3769 p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR)
3770 p_drv_attr->val = mb_params.mcp_param;
3772 return ECORE_SUCCESS;
3775 void ecore_mcp_wol_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3776 u32 offset, u32 val)
3778 struct ecore_mcp_mb_params mb_params = {0};
3779 enum _ecore_status_t rc = ECORE_SUCCESS;
3782 mb_params.cmd = DRV_MSG_CODE_WRITE_WOL_REG;
3783 mb_params.param = offset;
3784 mb_params.p_data_src = &dword;
3785 mb_params.data_src_size = sizeof(dword);
3787 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3788 if (rc != ECORE_SUCCESS) {
3789 DP_NOTICE(p_hwfn, false,
3790 "Failed to wol write request, rc = %d\n", rc);
3793 if (mb_params.mcp_resp != FW_MSG_CODE_WOL_READ_WRITE_OK) {
3794 DP_NOTICE(p_hwfn, false,
3795 "Failed to write value 0x%x to offset 0x%x [mcp_resp 0x%x]\n",
3796 val, offset, mb_params.mcp_resp);
3797 rc = ECORE_UNKNOWN_ERROR;