1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2016 - 2018 Cavium Inc.
9 #include "ecore_status.h"
11 #include "ecore_mcp.h"
12 #include "mcp_public.h"
15 #include "ecore_init_fw_funcs.h"
16 #include "ecore_sriov.h"
18 #include "ecore_iov_api.h"
19 #include "ecore_gtt_reg_addr.h"
20 #include "ecore_iro.h"
21 #include "ecore_dcbx.h"
22 #include "ecore_sp_commands.h"
23 #include "ecore_cxt.h"
25 #define CHIP_MCP_RESP_ITER_US 10
26 #define EMUL_MCP_RESP_ITER_US (1000 * 1000)
28 #define ECORE_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
29 #define ECORE_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
31 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
32 ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
35 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
36 ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
38 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
39 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
40 OFFSETOF(struct public_drv_mb, _field), _val)
42 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
43 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
44 OFFSETOF(struct public_drv_mb, _field))
46 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
47 DRV_ID_PDA_COMP_VER_OFFSET)
49 #define MCP_BYTES_PER_MBIT_OFFSET 17
53 static int loaded_port[MAX_NUM_PORTS] = { 0 };
56 bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn)
58 if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
63 void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
65 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
67 u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr);
69 p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
71 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
72 "port_addr = 0x%x, port_id 0x%02x\n",
73 p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
76 void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
78 u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
83 if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev))
87 if (!p_hwfn->mcp_info->public_base)
90 for (i = 0; i < length; i++) {
91 tmp = ecore_rd(p_hwfn, p_ptt,
92 p_hwfn->mcp_info->mfw_mb_addr +
93 (i << 2) + sizeof(u32));
95 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
96 OSAL_BE32_TO_CPU(tmp);
100 struct ecore_mcp_cmd_elem {
101 osal_list_entry_t list;
102 struct ecore_mcp_mb_params *p_mb_params;
103 u16 expected_seq_num;
107 /* Must be called while cmd_lock is acquired */
108 static struct ecore_mcp_cmd_elem *
109 ecore_mcp_cmd_add_elem(struct ecore_hwfn *p_hwfn,
110 struct ecore_mcp_mb_params *p_mb_params,
111 u16 expected_seq_num)
113 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
115 p_cmd_elem = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
116 sizeof(*p_cmd_elem));
118 DP_NOTICE(p_hwfn, false,
119 "Failed to allocate `struct ecore_mcp_cmd_elem'\n");
123 p_cmd_elem->p_mb_params = p_mb_params;
124 p_cmd_elem->expected_seq_num = expected_seq_num;
125 OSAL_LIST_PUSH_HEAD(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
130 /* Must be called while cmd_lock is acquired */
131 static void ecore_mcp_cmd_del_elem(struct ecore_hwfn *p_hwfn,
132 struct ecore_mcp_cmd_elem *p_cmd_elem)
134 OSAL_LIST_REMOVE_ENTRY(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
135 OSAL_FREE(p_hwfn->p_dev, p_cmd_elem);
138 /* Must be called while cmd_lock is acquired */
139 static struct ecore_mcp_cmd_elem *
140 ecore_mcp_cmd_get_elem(struct ecore_hwfn *p_hwfn, u16 seq_num)
142 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
144 OSAL_LIST_FOR_EACH_ENTRY(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list,
145 struct ecore_mcp_cmd_elem) {
146 if (p_cmd_elem->expected_seq_num == seq_num)
153 enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
155 if (p_hwfn->mcp_info) {
156 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL, *p_tmp;
158 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
159 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
161 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
162 OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_cmd_elem, p_tmp,
163 &p_hwfn->mcp_info->cmd_list, list,
164 struct ecore_mcp_cmd_elem) {
165 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
167 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
169 #ifdef CONFIG_ECORE_LOCK_ALLOC
170 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->cmd_lock);
171 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->link_lock);
175 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
177 return ECORE_SUCCESS;
180 static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
181 struct ecore_ptt *p_ptt)
183 struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
184 u32 drv_mb_offsize, mfw_mb_offsize;
185 u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
188 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
189 DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n");
190 p_info->public_base = 0;
195 p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
196 if (!p_info->public_base)
199 p_info->public_base |= GRCBASE_MCP;
201 /* Calculate the driver and MFW mailbox address */
202 drv_mb_offsize = ecore_rd(p_hwfn, p_ptt,
203 SECTION_OFFSIZE_ADDR(p_info->public_base,
205 p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
206 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
207 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x"
208 " mcp_pf_id = 0x%x\n",
209 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
211 /* Set the MFW MB address */
212 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
213 SECTION_OFFSIZE_ADDR(p_info->public_base,
215 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
216 p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
217 p_info->mfw_mb_addr);
219 /* Get the current driver mailbox sequence before sending
222 p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
223 DRV_MSG_SEQ_NUMBER_MASK;
225 /* Get current FW pulse sequence */
226 p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
229 p_info->mcp_hist = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
231 return ECORE_SUCCESS;
234 enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
235 struct ecore_ptt *p_ptt)
237 struct ecore_mcp_info *p_info;
240 /* Allocate mcp_info structure */
241 p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
242 sizeof(*p_hwfn->mcp_info));
243 if (!p_hwfn->mcp_info) {
244 DP_NOTICE(p_hwfn, false, "Failed to allocate mcp_info\n");
247 p_info = p_hwfn->mcp_info;
249 /* Initialize the MFW spinlocks */
250 #ifdef CONFIG_ECORE_LOCK_ALLOC
251 if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock)) {
252 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
255 if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock)) {
256 OSAL_SPIN_LOCK_DEALLOC(&p_info->cmd_lock);
257 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
261 OSAL_SPIN_LOCK_INIT(&p_info->cmd_lock);
262 OSAL_SPIN_LOCK_INIT(&p_info->link_lock);
264 OSAL_LIST_INIT(&p_info->cmd_list);
266 if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
267 DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
268 /* Do not free mcp_info here, since public_base indicate that
269 * the MCP is not initialized
271 return ECORE_SUCCESS;
274 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
275 p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
276 p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
277 if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
280 return ECORE_SUCCESS;
283 DP_NOTICE(p_hwfn, false, "Failed to allocate mcp memory\n");
284 ecore_mcp_free(p_hwfn);
288 static void ecore_mcp_reread_offsets(struct ecore_hwfn *p_hwfn,
289 struct ecore_ptt *p_ptt)
291 u32 generic_por_0 = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
293 /* Use MCP history register to check if MCP reset occurred between init
296 if (p_hwfn->mcp_info->mcp_hist != generic_por_0) {
297 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
298 "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
299 p_hwfn->mcp_info->mcp_hist, generic_por_0);
301 ecore_load_mcp_offsets(p_hwfn, p_ptt);
302 ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
306 enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
307 struct ecore_ptt *p_ptt)
309 u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0;
310 enum _ecore_status_t rc = ECORE_SUCCESS;
313 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
314 delay = EMUL_MCP_RESP_ITER_US;
317 if (p_hwfn->mcp_info->b_block_cmd) {
318 DP_NOTICE(p_hwfn, false,
319 "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
320 return ECORE_ABORTED;
323 /* Ensure that only a single thread is accessing the mailbox */
324 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
326 org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
328 /* Set drv command along with the updated sequence */
329 ecore_mcp_reread_offsets(p_hwfn, p_ptt);
330 seq = ++p_hwfn->mcp_info->drv_mb_seq;
331 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
334 /* Wait for MFW response */
336 /* Give the FW up to 500 second (50*1000*10usec) */
337 } while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt,
338 MISCS_REG_GENERIC_POR_0)) &&
339 (cnt++ < ECORE_MCP_RESET_RETRIES));
341 if (org_mcp_reset_seq !=
342 ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
343 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
344 "MCP was reset after %d usec\n", cnt * delay);
346 DP_ERR(p_hwfn, "Failed to reset MCP\n");
350 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
355 /* Must be called while cmd_lock is acquired */
356 static bool ecore_mcp_has_pending_cmd(struct ecore_hwfn *p_hwfn)
358 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
360 /* There is at most one pending command at a certain time, and if it
361 * exists - it is placed at the HEAD of the list.
363 if (!OSAL_LIST_IS_EMPTY(&p_hwfn->mcp_info->cmd_list)) {
364 p_cmd_elem = OSAL_LIST_FIRST_ENTRY(&p_hwfn->mcp_info->cmd_list,
365 struct ecore_mcp_cmd_elem,
367 return !p_cmd_elem->b_is_completed;
373 /* Must be called while cmd_lock is acquired */
374 static enum _ecore_status_t
375 ecore_mcp_update_pending_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
377 struct ecore_mcp_mb_params *p_mb_params;
378 struct ecore_mcp_cmd_elem *p_cmd_elem;
382 mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
383 seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK);
385 /* Return if no new non-handled response has been received */
386 if (seq_num != p_hwfn->mcp_info->drv_mb_seq)
389 p_cmd_elem = ecore_mcp_cmd_get_elem(p_hwfn, seq_num);
392 "Failed to find a pending mailbox cmd that expects sequence number %d\n",
394 return ECORE_UNKNOWN_ERROR;
397 p_mb_params = p_cmd_elem->p_mb_params;
399 /* Get the MFW response along with the sequence number */
400 p_mb_params->mcp_resp = mcp_resp;
402 /* Get the MFW param */
403 p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
405 /* Get the union data */
406 if (p_mb_params->p_data_dst != OSAL_NULL &&
407 p_mb_params->data_dst_size) {
408 u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
409 OFFSETOF(struct public_drv_mb,
411 ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
412 union_data_addr, p_mb_params->data_dst_size);
415 p_cmd_elem->b_is_completed = true;
417 return ECORE_SUCCESS;
420 /* Must be called while cmd_lock is acquired */
421 static void __ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
422 struct ecore_ptt *p_ptt,
423 struct ecore_mcp_mb_params *p_mb_params,
426 union drv_union_data union_data;
429 /* Set the union data */
430 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
431 OFFSETOF(struct public_drv_mb, union_data);
432 OSAL_MEM_ZERO(&union_data, sizeof(union_data));
433 if (p_mb_params->p_data_src != OSAL_NULL && p_mb_params->data_src_size)
434 OSAL_MEMCPY(&union_data, p_mb_params->p_data_src,
435 p_mb_params->data_src_size);
436 ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
439 /* Set the drv param */
440 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param);
442 /* Set the drv command along with the sequence number */
443 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
445 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
446 "MFW mailbox: command 0x%08x param 0x%08x\n",
447 (p_mb_params->cmd | seq_num), p_mb_params->param);
450 static void ecore_mcp_cmd_set_blocking(struct ecore_hwfn *p_hwfn,
453 p_hwfn->mcp_info->b_block_cmd = block_cmd;
455 DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n",
456 block_cmd ? "Block" : "Unblock");
459 void ecore_mcp_print_cpu_info(struct ecore_hwfn *p_hwfn,
460 struct ecore_ptt *p_ptt)
462 u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
464 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
465 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
466 cpu_pc_0 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
467 OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
468 cpu_pc_1 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
469 OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
470 cpu_pc_2 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
472 DP_NOTICE(p_hwfn, false,
473 "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
474 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
477 static enum _ecore_status_t
478 _ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
479 struct ecore_mcp_mb_params *p_mb_params,
480 u32 max_retries, u32 delay)
482 struct ecore_mcp_cmd_elem *p_cmd_elem;
485 enum _ecore_status_t rc = ECORE_SUCCESS;
487 /* Wait until the mailbox is non-occupied */
489 /* Exit the loop if there is no pending command, or if the
490 * pending command is completed during this iteration.
491 * The spinlock stays locked until the command is sent.
494 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
496 if (!ecore_mcp_has_pending_cmd(p_hwfn))
499 rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt);
500 if (rc == ECORE_SUCCESS)
502 else if (rc != ECORE_AGAIN)
505 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
507 OSAL_MFW_CMD_PREEMPT(p_hwfn);
508 } while (++cnt < max_retries);
510 if (cnt >= max_retries) {
511 DP_NOTICE(p_hwfn, false,
512 "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
513 p_mb_params->cmd, p_mb_params->param);
517 /* Send the mailbox command */
518 ecore_mcp_reread_offsets(p_hwfn, p_ptt);
519 seq_num = ++p_hwfn->mcp_info->drv_mb_seq;
520 p_cmd_elem = ecore_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num);
526 __ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num);
527 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
529 /* Wait for the MFW response */
531 /* Exit the loop if the command is already completed, or if the
532 * command is completed during this iteration.
533 * The spinlock stays locked until the list element is removed.
537 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
539 if (p_cmd_elem->b_is_completed)
542 rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt);
543 if (rc == ECORE_SUCCESS)
545 else if (rc != ECORE_AGAIN)
548 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
549 OSAL_MFW_CMD_PREEMPT(p_hwfn);
550 } while (++cnt < max_retries);
552 if (cnt >= max_retries) {
553 DP_NOTICE(p_hwfn, false,
554 "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
555 p_mb_params->cmd, p_mb_params->param);
556 ecore_mcp_print_cpu_info(p_hwfn, p_ptt);
558 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
559 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
560 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
562 ecore_mcp_cmd_set_blocking(p_hwfn, true);
563 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
567 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
568 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
570 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
571 "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
572 p_mb_params->mcp_resp, p_mb_params->mcp_param,
573 (cnt * delay) / 1000, (cnt * delay) % 1000);
575 /* Clear the sequence number from the MFW response */
576 p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
578 return ECORE_SUCCESS;
581 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
585 static enum _ecore_status_t
586 ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
587 struct ecore_ptt *p_ptt,
588 struct ecore_mcp_mb_params *p_mb_params)
590 osal_size_t union_data_size = sizeof(union drv_union_data);
591 u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
592 u32 delay = CHIP_MCP_RESP_ITER_US;
595 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
596 delay = EMUL_MCP_RESP_ITER_US;
597 /* There is a built-in delay of 100usec in each MFW response read */
598 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
602 /* MCP not initialized */
603 if (!ecore_mcp_is_init(p_hwfn)) {
604 DP_NOTICE(p_hwfn, true, "MFW is not initialized!\n");
608 if (p_mb_params->data_src_size > union_data_size ||
609 p_mb_params->data_dst_size > union_data_size) {
611 "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
612 p_mb_params->data_src_size, p_mb_params->data_dst_size,
617 if (p_hwfn->mcp_info->b_block_cmd) {
618 DP_NOTICE(p_hwfn, false,
619 "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
620 p_mb_params->cmd, p_mb_params->param);
621 return ECORE_ABORTED;
624 return _ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
628 enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
629 struct ecore_ptt *p_ptt, u32 cmd, u32 param,
630 u32 *o_mcp_resp, u32 *o_mcp_param)
632 struct ecore_mcp_mb_params mb_params;
633 enum _ecore_status_t rc;
636 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
637 if (cmd == DRV_MSG_CODE_UNLOAD_REQ) {
639 loaded_port[p_hwfn->port_id]--;
640 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n",
643 return ECORE_SUCCESS;
647 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
649 mb_params.param = param;
650 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
651 if (rc != ECORE_SUCCESS)
654 *o_mcp_resp = mb_params.mcp_resp;
655 *o_mcp_param = mb_params.mcp_param;
657 return ECORE_SUCCESS;
660 enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
661 struct ecore_ptt *p_ptt,
666 u32 i_txn_size, u32 *i_buf)
668 struct ecore_mcp_mb_params mb_params;
669 enum _ecore_status_t rc;
671 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
673 mb_params.param = param;
674 mb_params.p_data_src = i_buf;
675 mb_params.data_src_size = (u8)i_txn_size;
676 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
677 if (rc != ECORE_SUCCESS)
680 *o_mcp_resp = mb_params.mcp_resp;
681 *o_mcp_param = mb_params.mcp_param;
683 return ECORE_SUCCESS;
686 enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
687 struct ecore_ptt *p_ptt,
692 u32 *o_txn_size, u32 *o_buf)
694 struct ecore_mcp_mb_params mb_params;
695 u8 raw_data[MCP_DRV_NVM_BUF_LEN];
696 enum _ecore_status_t rc;
698 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
700 mb_params.param = param;
701 mb_params.p_data_dst = raw_data;
703 /* Use the maximal value since the actual one is part of the response */
704 mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
706 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
707 if (rc != ECORE_SUCCESS)
710 *o_mcp_resp = mb_params.mcp_resp;
711 *o_mcp_param = mb_params.mcp_param;
713 *o_txn_size = *o_mcp_param;
715 OSAL_MEMCPY(o_buf, raw_data, RTE_MIN(*o_txn_size, MCP_DRV_NVM_BUF_LEN));
717 return ECORE_SUCCESS;
721 static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
724 static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
727 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
728 else if (!loaded_port[p_hwfn->port_id])
729 load_phase = FW_MSG_CODE_DRV_LOAD_PORT;
731 load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
733 /* On CMT, always tell that it's engine */
734 if (ECORE_IS_CMT(p_hwfn->p_dev))
735 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
737 *p_load_code = load_phase;
739 loaded_port[p_hwfn->port_id]++;
741 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
742 "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n",
743 *p_load_code, loaded, p_hwfn->port_id,
744 loaded_port[p_hwfn->port_id]);
749 ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role,
750 enum ecore_override_force_load override_force_load)
752 bool can_force_load = false;
754 switch (override_force_load) {
755 case ECORE_OVERRIDE_FORCE_LOAD_ALWAYS:
756 can_force_load = true;
758 case ECORE_OVERRIDE_FORCE_LOAD_NEVER:
759 can_force_load = false;
762 can_force_load = (drv_role == DRV_ROLE_OS &&
763 exist_drv_role == DRV_ROLE_PREBOOT) ||
764 (drv_role == DRV_ROLE_KDUMP &&
765 exist_drv_role == DRV_ROLE_OS);
769 return can_force_load;
772 static enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn,
773 struct ecore_ptt *p_ptt)
775 u32 resp = 0, param = 0;
776 enum _ecore_status_t rc;
778 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
780 if (rc != ECORE_SUCCESS)
781 DP_NOTICE(p_hwfn, false,
782 "Failed to send cancel load request, rc = %d\n", rc);
787 #define CONFIG_ECORE_L2_BITMAP_IDX (0x1 << 0)
788 #define CONFIG_ECORE_SRIOV_BITMAP_IDX (0x1 << 1)
789 #define CONFIG_ECORE_ROCE_BITMAP_IDX (0x1 << 2)
790 #define CONFIG_ECORE_IWARP_BITMAP_IDX (0x1 << 3)
791 #define CONFIG_ECORE_FCOE_BITMAP_IDX (0x1 << 4)
792 #define CONFIG_ECORE_ISCSI_BITMAP_IDX (0x1 << 5)
793 #define CONFIG_ECORE_LL2_BITMAP_IDX (0x1 << 6)
795 static u32 ecore_get_config_bitmap(void)
797 u32 config_bitmap = 0x0;
799 #ifdef CONFIG_ECORE_L2
800 config_bitmap |= CONFIG_ECORE_L2_BITMAP_IDX;
802 #ifdef CONFIG_ECORE_SRIOV
803 config_bitmap |= CONFIG_ECORE_SRIOV_BITMAP_IDX;
805 #ifdef CONFIG_ECORE_ROCE
806 config_bitmap |= CONFIG_ECORE_ROCE_BITMAP_IDX;
808 #ifdef CONFIG_ECORE_IWARP
809 config_bitmap |= CONFIG_ECORE_IWARP_BITMAP_IDX;
811 #ifdef CONFIG_ECORE_FCOE
812 config_bitmap |= CONFIG_ECORE_FCOE_BITMAP_IDX;
814 #ifdef CONFIG_ECORE_ISCSI
815 config_bitmap |= CONFIG_ECORE_ISCSI_BITMAP_IDX;
817 #ifdef CONFIG_ECORE_LL2
818 config_bitmap |= CONFIG_ECORE_LL2_BITMAP_IDX;
821 return config_bitmap;
824 struct ecore_load_req_in_params {
826 #define ECORE_LOAD_REQ_HSI_VER_DEFAULT 0
827 #define ECORE_LOAD_REQ_HSI_VER_1 1
834 bool avoid_eng_reset;
837 struct ecore_load_req_out_params {
847 static enum _ecore_status_t
848 __ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
849 struct ecore_load_req_in_params *p_in_params,
850 struct ecore_load_req_out_params *p_out_params)
852 struct ecore_mcp_mb_params mb_params;
853 struct load_req_stc load_req;
854 struct load_rsp_stc load_rsp;
856 enum _ecore_status_t rc;
858 OSAL_MEM_ZERO(&load_req, sizeof(load_req));
859 load_req.drv_ver_0 = p_in_params->drv_ver_0;
860 load_req.drv_ver_1 = p_in_params->drv_ver_1;
861 load_req.fw_ver = p_in_params->fw_ver;
862 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role);
863 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
864 p_in_params->timeout_val);
865 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE, p_in_params->force_cmd);
866 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
867 p_in_params->avoid_eng_reset);
869 hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ?
870 DRV_ID_MCP_HSI_VER_CURRENT :
871 (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_OFFSET);
873 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
874 mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
875 mb_params.param = PDA_COMP | hsi_ver | p_hwfn->p_dev->drv_type;
876 mb_params.p_data_src = &load_req;
877 mb_params.data_src_size = sizeof(load_req);
878 mb_params.p_data_dst = &load_rsp;
879 mb_params.data_dst_size = sizeof(load_rsp);
881 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
882 "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
884 GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
885 GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
886 GET_MFW_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
887 GET_MFW_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
889 if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1)
890 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
891 "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
892 load_req.drv_ver_0, load_req.drv_ver_1,
893 load_req.fw_ver, load_req.misc0,
894 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE),
895 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO),
896 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE),
897 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0));
899 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
900 if (rc != ECORE_SUCCESS) {
901 DP_NOTICE(p_hwfn, false,
902 "Failed to send load request, rc = %d\n", rc);
906 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
907 "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
908 p_out_params->load_code = mb_params.mcp_resp;
910 if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
911 p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
912 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
913 "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
914 load_rsp.drv_ver_0, load_rsp.drv_ver_1,
915 load_rsp.fw_ver, load_rsp.misc0,
916 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
917 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
918 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0));
920 p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
921 p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
922 p_out_params->exist_fw_ver = load_rsp.fw_ver;
923 p_out_params->exist_drv_role =
924 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
925 p_out_params->mfw_hsi_ver =
926 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
927 p_out_params->drv_exists =
928 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
929 LOAD_RSP_FLAGS0_DRV_EXISTS;
932 return ECORE_SUCCESS;
935 static void ecore_get_mfw_drv_role(enum ecore_drv_role drv_role,
939 case ECORE_DRV_ROLE_OS:
940 *p_mfw_drv_role = DRV_ROLE_OS;
942 case ECORE_DRV_ROLE_KDUMP:
943 *p_mfw_drv_role = DRV_ROLE_KDUMP;
948 enum ecore_load_req_force {
949 ECORE_LOAD_REQ_FORCE_NONE,
950 ECORE_LOAD_REQ_FORCE_PF,
951 ECORE_LOAD_REQ_FORCE_ALL,
954 static void ecore_get_mfw_force_cmd(enum ecore_load_req_force force_cmd,
958 case ECORE_LOAD_REQ_FORCE_NONE:
959 *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
961 case ECORE_LOAD_REQ_FORCE_PF:
962 *p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
964 case ECORE_LOAD_REQ_FORCE_ALL:
965 *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
970 enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
971 struct ecore_ptt *p_ptt,
972 struct ecore_load_req_params *p_params)
974 struct ecore_load_req_out_params out_params;
975 struct ecore_load_req_in_params in_params;
976 u8 mfw_drv_role = 0, mfw_force_cmd;
977 enum _ecore_status_t rc;
980 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
981 ecore_mcp_mf_workaround(p_hwfn, &p_params->load_code);
982 return ECORE_SUCCESS;
986 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
987 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT;
988 in_params.drv_ver_0 = ECORE_VERSION;
989 in_params.drv_ver_1 = ecore_get_config_bitmap();
990 in_params.fw_ver = STORM_FW_VERSION;
991 ecore_get_mfw_drv_role(p_params->drv_role, &mfw_drv_role);
992 in_params.drv_role = mfw_drv_role;
993 in_params.timeout_val = p_params->timeout_val;
994 ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_NONE, &mfw_force_cmd);
995 in_params.force_cmd = mfw_force_cmd;
996 in_params.avoid_eng_reset = p_params->avoid_eng_reset;
998 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
999 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
1000 if (rc != ECORE_SUCCESS)
1003 /* First handle cases where another load request should/might be sent:
1004 * - MFW expects the old interface [HSI version = 1]
1005 * - MFW responds that a force load request is required
1007 if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
1009 "MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n");
1011 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1;
1012 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
1013 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
1015 if (rc != ECORE_SUCCESS)
1017 } else if (out_params.load_code ==
1018 FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
1019 if (ecore_mcp_can_force_load(in_params.drv_role,
1020 out_params.exist_drv_role,
1021 p_params->override_force_load)) {
1023 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, 0x%08x_%08x}, existing={%d, 0x%08x, 0x%08x_%08x}]\n",
1024 in_params.drv_role, in_params.fw_ver,
1025 in_params.drv_ver_0, in_params.drv_ver_1,
1026 out_params.exist_drv_role,
1027 out_params.exist_fw_ver,
1028 out_params.exist_drv_ver_0,
1029 out_params.exist_drv_ver_1);
1031 ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_ALL,
1034 in_params.force_cmd = mfw_force_cmd;
1035 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
1036 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
1038 if (rc != ECORE_SUCCESS)
1041 DP_NOTICE(p_hwfn, false,
1042 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
1043 in_params.drv_role, in_params.fw_ver,
1044 in_params.drv_ver_0, in_params.drv_ver_1,
1045 out_params.exist_drv_role,
1046 out_params.exist_fw_ver,
1047 out_params.exist_drv_ver_0,
1048 out_params.exist_drv_ver_1);
1050 ecore_mcp_cancel_load_req(p_hwfn, p_ptt);
1055 /* Now handle the other types of responses.
1056 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
1057 * expected here after the additional revised load requests were sent.
1059 switch (out_params.load_code) {
1060 case FW_MSG_CODE_DRV_LOAD_ENGINE:
1061 case FW_MSG_CODE_DRV_LOAD_PORT:
1062 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
1063 if (out_params.mfw_hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
1064 out_params.drv_exists) {
1065 /* The role and fw/driver version match, but the PF is
1066 * already loaded and has not been unloaded gracefully.
1067 * This is unexpected since a quasi-FLR request was
1068 * previously sent as part of ecore_hw_prepare().
1070 DP_NOTICE(p_hwfn, false,
1071 "PF is already loaded - shouldn't have got here since a quasi-FLR request was previously sent!\n");
1076 DP_NOTICE(p_hwfn, false,
1077 "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
1078 out_params.load_code);
1082 p_params->load_code = out_params.load_code;
1084 return ECORE_SUCCESS;
1087 enum _ecore_status_t ecore_mcp_load_done(struct ecore_hwfn *p_hwfn,
1088 struct ecore_ptt *p_ptt)
1090 u32 resp = 0, param = 0;
1091 enum _ecore_status_t rc;
1093 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp,
1095 if (rc != ECORE_SUCCESS) {
1096 DP_NOTICE(p_hwfn, false,
1097 "Failed to send a LOAD_DONE command, rc = %d\n", rc);
1101 /* Check if there is a DID mismatch between nvm-cfg/efuse */
1102 if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
1103 DP_NOTICE(p_hwfn, false,
1104 "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
1106 return ECORE_SUCCESS;
1109 enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn,
1110 struct ecore_ptt *p_ptt)
1112 u32 wol_param, mcp_resp, mcp_param;
1115 wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
1117 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param,
1118 &mcp_resp, &mcp_param);
1121 enum _ecore_status_t ecore_mcp_unload_done(struct ecore_hwfn *p_hwfn,
1122 struct ecore_ptt *p_ptt)
1124 struct ecore_mcp_mb_params mb_params;
1125 struct mcp_mac wol_mac;
1127 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1128 mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
1130 return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1133 static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
1134 struct ecore_ptt *p_ptt)
1136 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1138 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1139 u32 path_addr = SECTION_ADDR(mfw_path_offsize,
1140 ECORE_PATH_ID(p_hwfn));
1141 u32 disabled_vfs[VF_MAX_STATIC / 32];
1144 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1145 "Reading Disabled VF information from [offset %08x],"
1146 " path_addr %08x\n",
1147 mfw_path_offsize, path_addr);
1149 for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
1150 disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,
1152 OFFSETOF(struct public_path,
1155 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
1156 "FLR-ed VFs [%08x,...,%08x] - %08x\n",
1157 i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
1160 if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs))
1161 OSAL_VF_FLR_UPDATE(p_hwfn);
1164 enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
1165 struct ecore_ptt *p_ptt,
1168 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1170 u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1171 u32 func_addr = SECTION_ADDR(mfw_func_offsize,
1173 struct ecore_mcp_mb_params mb_params;
1174 enum _ecore_status_t rc;
1177 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1178 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
1179 "Acking VFs [%08x,...,%08x] - %08x\n",
1180 i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
1182 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1183 mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
1184 mb_params.p_data_src = vfs_to_ack;
1185 mb_params.data_src_size = VF_MAX_STATIC / 8;
1186 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt,
1188 if (rc != ECORE_SUCCESS) {
1189 DP_NOTICE(p_hwfn, false,
1190 "Failed to pass ACK for VF flr to MFW\n");
1191 return ECORE_TIMEOUT;
1194 /* TMP - clear the ACK bits; should be done by MFW */
1195 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1196 ecore_wr(p_hwfn, p_ptt,
1198 OFFSETOF(struct public_func, drv_ack_vf_disabled) +
1199 i * sizeof(u32), 0);
1204 static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
1205 struct ecore_ptt *p_ptt)
1207 u32 transceiver_state;
1209 transceiver_state = ecore_rd(p_hwfn, p_ptt,
1210 p_hwfn->mcp_info->port_addr +
1211 OFFSETOF(struct public_port,
1214 DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP),
1215 "Received transceiver state update [0x%08x] from mfw"
1217 transceiver_state, (u32)(p_hwfn->mcp_info->port_addr +
1218 OFFSETOF(struct public_port,
1219 transceiver_data)));
1221 transceiver_state = GET_MFW_FIELD(transceiver_state,
1222 ETH_TRANSCEIVER_STATE);
1224 if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
1225 DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
1227 DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
1229 OSAL_TRANSCEIVER_UPDATE(p_hwfn);
1232 static void ecore_mcp_read_eee_config(struct ecore_hwfn *p_hwfn,
1233 struct ecore_ptt *p_ptt,
1234 struct ecore_mcp_link_state *p_link)
1236 u32 eee_status, val;
1238 p_link->eee_adv_caps = 0;
1239 p_link->eee_lp_adv_caps = 0;
1240 eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1241 OFFSETOF(struct public_port, eee_status));
1242 p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT);
1243 val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET;
1244 if (val & EEE_1G_ADV)
1245 p_link->eee_adv_caps |= ECORE_EEE_1G_ADV;
1246 if (val & EEE_10G_ADV)
1247 p_link->eee_adv_caps |= ECORE_EEE_10G_ADV;
1248 val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET;
1249 if (val & EEE_1G_ADV)
1250 p_link->eee_lp_adv_caps |= ECORE_EEE_1G_ADV;
1251 if (val & EEE_10G_ADV)
1252 p_link->eee_lp_adv_caps |= ECORE_EEE_10G_ADV;
1255 static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
1256 struct ecore_ptt *p_ptt,
1257 struct public_func *p_data,
1260 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1262 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1263 u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1266 OSAL_MEM_ZERO(p_data, sizeof(*p_data));
1268 size = OSAL_MIN_T(u32, sizeof(*p_data),
1269 SECTION_SIZE(mfw_path_offsize));
1270 for (i = 0; i < size / sizeof(u32); i++)
1271 ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
1272 func_addr + (i << 2));
1277 static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
1278 struct ecore_ptt *p_ptt,
1281 struct ecore_mcp_link_state *p_link;
1285 /* Prevent SW/attentions from doing this at the same time */
1286 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->link_lock);
1288 p_link = &p_hwfn->mcp_info->link_output;
1289 OSAL_MEMSET(p_link, 0, sizeof(*p_link));
1291 status = ecore_rd(p_hwfn, p_ptt,
1292 p_hwfn->mcp_info->port_addr +
1293 OFFSETOF(struct public_port, link_status));
1294 DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP),
1295 "Received link update [0x%08x] from mfw"
1297 status, (u32)(p_hwfn->mcp_info->port_addr +
1298 OFFSETOF(struct public_port,
1301 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1302 "Resetting link indications\n");
1306 if (p_hwfn->b_drv_link_init) {
1307 /* Link indication with modern MFW arrives as per-PF
1310 if (p_hwfn->mcp_info->capabilities &
1311 FW_MB_PARAM_FEATURE_SUPPORT_VLINK) {
1312 struct public_func shmem_info;
1314 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1316 p_link->link_up = !!(shmem_info.status &
1317 FUNC_STATUS_VIRTUAL_LINK_UP);
1319 p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1322 p_link->link_up = false;
1325 p_link->full_duplex = true;
1326 switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
1327 case LINK_STATUS_SPEED_AND_DUPLEX_100G:
1328 p_link->speed = 100000;
1330 case LINK_STATUS_SPEED_AND_DUPLEX_50G:
1331 p_link->speed = 50000;
1333 case LINK_STATUS_SPEED_AND_DUPLEX_40G:
1334 p_link->speed = 40000;
1336 case LINK_STATUS_SPEED_AND_DUPLEX_25G:
1337 p_link->speed = 25000;
1339 case LINK_STATUS_SPEED_AND_DUPLEX_20G:
1340 p_link->speed = 20000;
1342 case LINK_STATUS_SPEED_AND_DUPLEX_10G:
1343 p_link->speed = 10000;
1345 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
1346 p_link->full_duplex = false;
1348 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
1349 p_link->speed = 1000;
1355 /* We never store total line speed as p_link->speed is
1356 * again changes according to bandwidth allocation.
1358 if (p_link->link_up && p_link->speed)
1359 p_link->line_speed = p_link->speed;
1361 p_link->line_speed = 0;
1363 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
1364 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
1366 /* Max bandwidth configuration */
1367 __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
1370 /* Min bandwidth configuration */
1371 __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
1373 ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev, p_ptt,
1374 p_link->min_pf_rate);
1376 p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
1377 p_link->an_complete = !!(status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
1378 p_link->parallel_detection = !!(status &
1379 LINK_STATUS_PARALLEL_DETECTION_USED);
1380 p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
1382 p_link->partner_adv_speed |=
1383 (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
1384 ECORE_LINK_PARTNER_SPEED_1G_FD : 0;
1385 p_link->partner_adv_speed |=
1386 (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
1387 ECORE_LINK_PARTNER_SPEED_1G_HD : 0;
1388 p_link->partner_adv_speed |=
1389 (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
1390 ECORE_LINK_PARTNER_SPEED_10G : 0;
1391 p_link->partner_adv_speed |=
1392 (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
1393 ECORE_LINK_PARTNER_SPEED_20G : 0;
1394 p_link->partner_adv_speed |=
1395 (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
1396 ECORE_LINK_PARTNER_SPEED_25G : 0;
1397 p_link->partner_adv_speed |=
1398 (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
1399 ECORE_LINK_PARTNER_SPEED_40G : 0;
1400 p_link->partner_adv_speed |=
1401 (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
1402 ECORE_LINK_PARTNER_SPEED_50G : 0;
1403 p_link->partner_adv_speed |=
1404 (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
1405 ECORE_LINK_PARTNER_SPEED_100G : 0;
1407 p_link->partner_tx_flow_ctrl_en =
1408 !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
1409 p_link->partner_rx_flow_ctrl_en =
1410 !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
1412 switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
1413 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
1414 p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE;
1416 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
1417 p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE;
1419 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
1420 p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE;
1423 p_link->partner_adv_pause = 0;
1426 p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
1428 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
1429 ecore_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
1431 OSAL_LINK_UPDATE(p_hwfn);
1433 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->link_lock);
1436 enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
1437 struct ecore_ptt *p_ptt, bool b_up)
1439 struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
1440 struct ecore_mcp_mb_params mb_params;
1441 struct eth_phy_cfg phy_cfg;
1442 enum _ecore_status_t rc = ECORE_SUCCESS;
1446 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
1447 return ECORE_SUCCESS;
1450 /* Set the shmem configuration according to params */
1451 OSAL_MEM_ZERO(&phy_cfg, sizeof(phy_cfg));
1452 cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
1453 if (!params->speed.autoneg)
1454 phy_cfg.speed = params->speed.forced_speed;
1455 phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
1456 phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
1457 phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1458 phy_cfg.adv_speed = params->speed.advertised_speeds;
1459 phy_cfg.loopback_mode = params->loopback_mode;
1461 /* There are MFWs that share this capability regardless of whether
1462 * this is feasible or not. And given that at the very least adv_caps
1463 * would be set internally by ecore, we want to make sure LFA would
1466 if ((p_hwfn->mcp_info->capabilities &
1467 FW_MB_PARAM_FEATURE_SUPPORT_EEE) &&
1468 params->eee.enable) {
1469 phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
1470 if (params->eee.tx_lpi_enable)
1471 phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
1472 if (params->eee.adv_caps & ECORE_EEE_1G_ADV)
1473 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G;
1474 if (params->eee.adv_caps & ECORE_EEE_10G_ADV)
1475 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G;
1476 phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer <<
1477 EEE_TX_TIMER_USEC_OFFSET) &
1478 EEE_TX_TIMER_USEC_MASK;
1481 p_hwfn->b_drv_link_init = b_up;
1484 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1485 "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x\n",
1486 phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed,
1487 phy_cfg.loopback_mode);
1489 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
1491 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1492 mb_params.cmd = cmd;
1493 mb_params.p_data_src = &phy_cfg;
1494 mb_params.data_src_size = sizeof(phy_cfg);
1495 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1497 /* if mcp fails to respond we must abort */
1498 if (rc != ECORE_SUCCESS) {
1499 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1503 /* Mimic link-change attention, done for several reasons:
1504 * - On reset, there's no guarantee MFW would trigger
1506 * - On initialization, older MFWs might not indicate link change
1507 * during LFA, so we'll never get an UP indication.
1509 ecore_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
1511 return ECORE_SUCCESS;
1514 u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
1515 struct ecore_ptt *p_ptt)
1517 u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
1519 /* TODO - Add support for VFs */
1520 if (IS_VF(p_hwfn->p_dev))
1523 path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1525 path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr);
1526 path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn));
1528 proc_kill_cnt = ecore_rd(p_hwfn, p_ptt,
1530 OFFSETOF(struct public_path, process_kill)) &
1531 PROCESS_KILL_COUNTER_MASK;
1533 return proc_kill_cnt;
1536 static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn,
1537 struct ecore_ptt *p_ptt)
1539 struct ecore_dev *p_dev = p_hwfn->p_dev;
1542 /* Prevent possible attentions/interrupts during the recovery handling
1543 * and till its load phase, during which they will be re-enabled.
1545 ecore_int_igu_disable_int(p_hwfn, p_ptt);
1547 DP_NOTICE(p_hwfn, false, "Received a process kill indication\n");
1549 /* The following operations should be done once, and thus in CMT mode
1550 * are carried out by only the first HW function.
1552 if (p_hwfn != ECORE_LEADING_HWFN(p_dev))
1555 if (p_dev->recov_in_prog) {
1556 DP_NOTICE(p_hwfn, false,
1557 "Ignoring the indication since a recovery"
1558 " process is already in progress\n");
1562 p_dev->recov_in_prog = true;
1564 proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt);
1565 DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt);
1567 OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn);
1570 static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
1571 struct ecore_ptt *p_ptt,
1572 enum MFW_DRV_MSG_TYPE type)
1574 enum ecore_mcp_protocol_type stats_type;
1575 union ecore_mcp_protocol_stats stats;
1576 struct ecore_mcp_mb_params mb_params;
1578 enum _ecore_status_t rc;
1581 case MFW_DRV_MSG_GET_LAN_STATS:
1582 stats_type = ECORE_MCP_LAN_STATS;
1583 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1586 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1587 "Invalid protocol type %d\n", type);
1591 OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats);
1593 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1594 mb_params.cmd = DRV_MSG_CODE_GET_STATS;
1595 mb_params.param = hsi_param;
1596 mb_params.p_data_src = &stats;
1597 mb_params.data_src_size = sizeof(stats);
1598 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1599 if (rc != ECORE_SUCCESS)
1600 DP_ERR(p_hwfn, "Failed to send protocol stats, rc = %d\n", rc);
1603 static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
1604 struct public_func *p_shmem_info)
1606 struct ecore_mcp_function_info *p_info;
1608 p_info = &p_hwfn->mcp_info->func_info;
1610 /* TODO - bandwidth min/max should have valid values of 1-100,
1611 * as well as some indication that the feature is disabled.
1612 * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS
1613 * limit and correct value to min `1' and max `100' if limit isn't in
1616 p_info->bandwidth_min = (p_shmem_info->config &
1617 FUNC_MF_CFG_MIN_BW_MASK) >>
1618 FUNC_MF_CFG_MIN_BW_OFFSET;
1619 if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1621 "bandwidth minimum out of bounds [%02x]. Set to 1\n",
1622 p_info->bandwidth_min);
1623 p_info->bandwidth_min = 1;
1626 p_info->bandwidth_max = (p_shmem_info->config &
1627 FUNC_MF_CFG_MAX_BW_MASK) >>
1628 FUNC_MF_CFG_MAX_BW_OFFSET;
1629 if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1631 "bandwidth maximum out of bounds [%02x]. Set to 100\n",
1632 p_info->bandwidth_max);
1633 p_info->bandwidth_max = 100;
1638 ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1640 struct ecore_mcp_function_info *p_info;
1641 struct public_func shmem_info;
1642 u32 resp = 0, param = 0;
1644 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1646 ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1648 p_info = &p_hwfn->mcp_info->func_info;
1650 ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min);
1652 ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max);
1654 /* Acknowledge the MFW */
1655 ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1659 static void ecore_mcp_update_stag(struct ecore_hwfn *p_hwfn,
1660 struct ecore_ptt *p_ptt)
1662 struct public_func shmem_info;
1663 u32 resp = 0, param = 0;
1665 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1668 p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
1669 FUNC_MF_CFG_OV_STAG_MASK;
1670 p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
1671 if (OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS, &p_hwfn->p_dev->mf_bits)) {
1672 if (p_hwfn->hw_info.ovlan != ECORE_MCP_VLAN_UNSET) {
1673 ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE,
1674 p_hwfn->hw_info.ovlan);
1675 ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 1);
1677 /* Configure DB to add external vlan to EDPM packets */
1678 ecore_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
1679 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2,
1680 p_hwfn->hw_info.ovlan);
1682 ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 0);
1683 ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, 0);
1685 /* Configure DB to add external vlan to EDPM packets */
1686 ecore_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 0);
1687 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, 0);
1690 ecore_sp_pf_update_stag(p_hwfn);
1693 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "ovlan = %d hw_mode = 0x%x\n",
1694 p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode);
1695 OSAL_HW_INFO_CHANGE(p_hwfn, ECORE_HW_INFO_CHANGE_OVLAN);
1697 /* Acknowledge the MFW */
1698 ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
1702 static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn)
1704 /* A single notification should be sent to upper driver in CMT mode */
1705 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1708 DP_NOTICE(p_hwfn, false,
1709 "Fan failure was detected on the network interface card"
1710 " and it's going to be shut down.\n");
1712 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
1715 struct ecore_mdump_cmd_params {
1724 static enum _ecore_status_t
1725 ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1726 struct ecore_mdump_cmd_params *p_mdump_cmd_params)
1728 struct ecore_mcp_mb_params mb_params;
1729 enum _ecore_status_t rc;
1731 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1732 mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
1733 mb_params.param = p_mdump_cmd_params->cmd;
1734 mb_params.p_data_src = p_mdump_cmd_params->p_data_src;
1735 mb_params.data_src_size = p_mdump_cmd_params->data_src_size;
1736 mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst;
1737 mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size;
1738 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1739 if (rc != ECORE_SUCCESS)
1742 p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
1744 if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
1746 "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n",
1747 p_mdump_cmd_params->cmd);
1749 } else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
1751 "The mdump command is not supported by the MFW\n");
1758 static enum _ecore_status_t ecore_mcp_mdump_ack(struct ecore_hwfn *p_hwfn,
1759 struct ecore_ptt *p_ptt)
1761 struct ecore_mdump_cmd_params mdump_cmd_params;
1763 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1764 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK;
1766 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1769 enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn,
1770 struct ecore_ptt *p_ptt,
1773 struct ecore_mdump_cmd_params mdump_cmd_params;
1775 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1776 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_SET_VALUES;
1777 mdump_cmd_params.p_data_src = &epoch;
1778 mdump_cmd_params.data_src_size = sizeof(epoch);
1780 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1783 enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
1784 struct ecore_ptt *p_ptt)
1786 struct ecore_mdump_cmd_params mdump_cmd_params;
1788 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1789 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_TRIGGER;
1791 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1794 static enum _ecore_status_t
1795 ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1796 struct mdump_config_stc *p_mdump_config)
1798 struct ecore_mdump_cmd_params mdump_cmd_params;
1799 enum _ecore_status_t rc;
1801 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1802 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_CONFIG;
1803 mdump_cmd_params.p_data_dst = p_mdump_config;
1804 mdump_cmd_params.data_dst_size = sizeof(*p_mdump_config);
1806 rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1807 if (rc != ECORE_SUCCESS)
1810 if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1812 "Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n",
1813 mdump_cmd_params.mcp_resp);
1814 rc = ECORE_UNKNOWN_ERROR;
1820 enum _ecore_status_t
1821 ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1822 struct ecore_mdump_info *p_mdump_info)
1824 u32 addr, global_offsize, global_addr;
1825 struct mdump_config_stc mdump_config;
1826 enum _ecore_status_t rc;
1828 OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info));
1830 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1832 global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1833 global_addr = SECTION_ADDR(global_offsize, 0);
1834 p_mdump_info->reason = ecore_rd(p_hwfn, p_ptt,
1836 OFFSETOF(struct public_global,
1839 if (p_mdump_info->reason) {
1840 rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config);
1841 if (rc != ECORE_SUCCESS)
1844 p_mdump_info->version = mdump_config.version;
1845 p_mdump_info->config = mdump_config.config;
1846 p_mdump_info->epoch = mdump_config.epoc;
1847 p_mdump_info->num_of_logs = mdump_config.num_of_logs;
1848 p_mdump_info->valid_logs = mdump_config.valid_logs;
1850 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1851 "MFW mdump info: reason %d, version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n",
1852 p_mdump_info->reason, p_mdump_info->version,
1853 p_mdump_info->config, p_mdump_info->epoch,
1854 p_mdump_info->num_of_logs, p_mdump_info->valid_logs);
1856 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1857 "MFW mdump info: reason %d\n", p_mdump_info->reason);
1860 return ECORE_SUCCESS;
1863 enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
1864 struct ecore_ptt *p_ptt)
1866 struct ecore_mdump_cmd_params mdump_cmd_params;
1868 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1869 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLEAR_LOGS;
1871 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1874 enum _ecore_status_t
1875 ecore_mcp_mdump_get_retain(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1876 struct ecore_mdump_retain_data *p_mdump_retain)
1878 struct ecore_mdump_cmd_params mdump_cmd_params;
1879 struct mdump_retain_data_stc mfw_mdump_retain;
1880 enum _ecore_status_t rc;
1882 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1883 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN;
1884 mdump_cmd_params.p_data_dst = &mfw_mdump_retain;
1885 mdump_cmd_params.data_dst_size = sizeof(mfw_mdump_retain);
1887 rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1888 if (rc != ECORE_SUCCESS)
1891 if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1893 "Failed to get the mdump retained data [mcp_resp 0x%x]\n",
1894 mdump_cmd_params.mcp_resp);
1895 return ECORE_UNKNOWN_ERROR;
1898 p_mdump_retain->valid = mfw_mdump_retain.valid;
1899 p_mdump_retain->epoch = mfw_mdump_retain.epoch;
1900 p_mdump_retain->pf = mfw_mdump_retain.pf;
1901 p_mdump_retain->status = mfw_mdump_retain.status;
1903 return ECORE_SUCCESS;
1906 enum _ecore_status_t ecore_mcp_mdump_clr_retain(struct ecore_hwfn *p_hwfn,
1907 struct ecore_ptt *p_ptt)
1909 struct ecore_mdump_cmd_params mdump_cmd_params;
1911 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1912 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLR_RETAIN;
1914 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1917 static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
1918 struct ecore_ptt *p_ptt)
1920 struct ecore_mdump_retain_data mdump_retain;
1921 enum _ecore_status_t rc;
1923 /* In CMT mode - no need for more than a single acknowledgment to the
1924 * MFW, and no more than a single notification to the upper driver.
1926 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1929 rc = ecore_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain);
1930 if (rc == ECORE_SUCCESS && mdump_retain.valid) {
1931 DP_NOTICE(p_hwfn, false,
1932 "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n",
1933 mdump_retain.epoch, mdump_retain.pf,
1934 mdump_retain.status);
1936 DP_NOTICE(p_hwfn, false,
1937 "The MFW notified that a critical error occurred in the device\n");
1940 if (p_hwfn->p_dev->allow_mdump) {
1941 DP_NOTICE(p_hwfn, false,
1942 "Not acknowledging the notification to allow the MFW crash dump\n");
1946 DP_NOTICE(p_hwfn, false,
1947 "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n");
1948 ecore_mcp_mdump_ack(p_hwfn, p_ptt);
1949 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
1953 ecore_mcp_read_ufp_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1955 struct public_func shmem_info;
1958 if (!OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits))
1961 OSAL_MEMSET(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info));
1962 port_cfg = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1963 OFFSETOF(struct public_port, oem_cfg_port));
1964 val = GET_MFW_FIELD(port_cfg, OEM_CFG_CHANNEL_TYPE);
1965 if (val != OEM_CFG_CHANNEL_TYPE_STAGGED)
1966 DP_NOTICE(p_hwfn, false, "Incorrect UFP Channel type %d\n",
1969 val = GET_MFW_FIELD(port_cfg, OEM_CFG_SCHED_TYPE);
1970 if (val == OEM_CFG_SCHED_TYPE_ETS)
1971 p_hwfn->ufp_info.mode = ECORE_UFP_MODE_ETS;
1972 else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW)
1973 p_hwfn->ufp_info.mode = ECORE_UFP_MODE_VNIC_BW;
1975 DP_NOTICE(p_hwfn, false, "Unknown UFP scheduling mode %d\n",
1978 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1980 val = GET_MFW_FIELD(shmem_info.oem_cfg_func, OEM_CFG_FUNC_TC);
1981 p_hwfn->ufp_info.tc = (u8)val;
1982 val = GET_MFW_FIELD(shmem_info.oem_cfg_func,
1983 OEM_CFG_FUNC_HOST_PRI_CTRL);
1984 if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC)
1985 p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_VNIC;
1986 else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS)
1987 p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_OS;
1989 DP_NOTICE(p_hwfn, false, "Unknown Host priority control %d\n",
1992 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1993 "UFP shmem config: mode = %d tc = %d pri_type = %d\n",
1994 p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc,
1995 p_hwfn->ufp_info.pri_type);
1998 static enum _ecore_status_t
1999 ecore_mcp_handle_ufp_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
2001 ecore_mcp_read_ufp_config(p_hwfn, p_ptt);
2003 if (p_hwfn->ufp_info.mode == ECORE_UFP_MODE_VNIC_BW) {
2004 p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc;
2005 p_hwfn->hw_info.offload_tc = p_hwfn->ufp_info.tc;
2007 ecore_qm_reconf(p_hwfn, p_ptt);
2009 /* Merge UFP TC with the dcbx TC data */
2010 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
2011 ECORE_DCBX_OPERATIONAL_MIB);
2014 /* update storm FW with negotiation results */
2015 ecore_sp_pf_update_ufp(p_hwfn);
2017 return ECORE_SUCCESS;
2020 enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
2021 struct ecore_ptt *p_ptt)
2023 struct ecore_mcp_info *info = p_hwfn->mcp_info;
2024 enum _ecore_status_t rc = ECORE_SUCCESS;
2028 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n");
2030 /* Read Messages from MFW */
2031 ecore_mcp_read_mb(p_hwfn, p_ptt);
2033 /* Compare current messages to old ones */
2034 for (i = 0; i < info->mfw_mb_length; i++) {
2035 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
2040 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
2041 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
2042 i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
2045 case MFW_DRV_MSG_LINK_CHANGE:
2046 ecore_mcp_handle_link_change(p_hwfn, p_ptt, false);
2048 case MFW_DRV_MSG_VF_DISABLED:
2049 ecore_mcp_handle_vf_flr(p_hwfn, p_ptt);
2051 case MFW_DRV_MSG_LLDP_DATA_UPDATED:
2052 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
2053 ECORE_DCBX_REMOTE_LLDP_MIB);
2055 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
2056 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
2057 ECORE_DCBX_REMOTE_MIB);
2059 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
2060 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
2061 ECORE_DCBX_OPERATIONAL_MIB);
2062 /* clear the user-config cache */
2063 OSAL_MEMSET(&p_hwfn->p_dcbx_info->set, 0,
2064 sizeof(struct ecore_dcbx_set));
2066 case MFW_DRV_MSG_LLDP_RECEIVED_TLVS_UPDATED:
2067 ecore_lldp_mib_update_event(p_hwfn, p_ptt);
2069 case MFW_DRV_MSG_OEM_CFG_UPDATE:
2070 ecore_mcp_handle_ufp_event(p_hwfn, p_ptt);
2072 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
2073 ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
2075 case MFW_DRV_MSG_ERROR_RECOVERY:
2076 ecore_mcp_handle_process_kill(p_hwfn, p_ptt);
2078 case MFW_DRV_MSG_GET_LAN_STATS:
2079 case MFW_DRV_MSG_GET_FCOE_STATS:
2080 case MFW_DRV_MSG_GET_ISCSI_STATS:
2081 case MFW_DRV_MSG_GET_RDMA_STATS:
2082 ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
2084 case MFW_DRV_MSG_BW_UPDATE:
2085 ecore_mcp_update_bw(p_hwfn, p_ptt);
2087 case MFW_DRV_MSG_S_TAG_UPDATE:
2088 ecore_mcp_update_stag(p_hwfn, p_ptt);
2090 case MFW_DRV_MSG_FAILURE_DETECTED:
2091 ecore_mcp_handle_fan_failure(p_hwfn);
2093 case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
2094 ecore_mcp_handle_critical_error(p_hwfn, p_ptt);
2097 DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
2102 /* ACK everything */
2103 for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
2104 OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]);
2106 /* MFW expect answer in BE, so we force write in that format */
2107 ecore_wr(p_hwfn, p_ptt,
2108 info->mfw_mb_addr + sizeof(u32) +
2109 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
2110 sizeof(u32) + i * sizeof(u32), val);
2114 DP_NOTICE(p_hwfn, false,
2115 "Received an MFW message indication but no"
2120 /* Copy the new mfw messages into the shadow */
2121 OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
2126 enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
2127 struct ecore_ptt *p_ptt,
2129 u32 *p_running_bundle_id)
2134 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
2135 DP_NOTICE(p_hwfn, false, "Emulation - can't get MFW version\n");
2136 return ECORE_SUCCESS;
2140 if (IS_VF(p_hwfn->p_dev)) {
2141 if (p_hwfn->vf_iov_info) {
2142 struct pfvf_acquire_resp_tlv *p_resp;
2144 p_resp = &p_hwfn->vf_iov_info->acquire_resp;
2145 *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
2146 return ECORE_SUCCESS;
2148 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2149 "VF requested MFW version prior to ACQUIRE\n");
2154 global_offsize = ecore_rd(p_hwfn, p_ptt,
2155 SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
2159 ecore_rd(p_hwfn, p_ptt,
2160 SECTION_ADDR(global_offsize,
2161 0) + OFFSETOF(struct public_global, mfw_ver));
2163 if (p_running_bundle_id != OSAL_NULL) {
2164 *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt,
2165 SECTION_ADDR(global_offsize,
2167 OFFSETOF(struct public_global,
2168 running_bundle_id));
2171 return ECORE_SUCCESS;
2174 enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,
2175 struct ecore_ptt *p_ptt,
2178 enum _ecore_status_t rc = ECORE_SUCCESS;
2180 /* TODO - Add support for VFs */
2181 if (IS_VF(p_hwfn->p_dev))
2184 if (!ecore_mcp_is_init(p_hwfn)) {
2185 DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
2190 *p_media_type = MEDIA_UNSPECIFIED;
2193 *p_media_type = ecore_rd(p_hwfn, p_ptt,
2194 p_hwfn->mcp_info->port_addr +
2195 OFFSETOF(struct public_port,
2199 return ECORE_SUCCESS;
2202 enum _ecore_status_t ecore_mcp_get_transceiver_data(struct ecore_hwfn *p_hwfn,
2203 struct ecore_ptt *p_ptt,
2204 u32 *p_transceiver_state,
2205 u32 *p_transceiver_type)
2207 u32 transceiver_info;
2208 enum _ecore_status_t rc = ECORE_SUCCESS;
2210 /* TODO - Add support for VFs */
2211 if (IS_VF(p_hwfn->p_dev))
2214 if (!ecore_mcp_is_init(p_hwfn)) {
2215 DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
2219 *p_transceiver_type = ETH_TRANSCEIVER_TYPE_NONE;
2220 *p_transceiver_state = ETH_TRANSCEIVER_STATE_UPDATING;
2222 transceiver_info = ecore_rd(p_hwfn, p_ptt,
2223 p_hwfn->mcp_info->port_addr +
2224 offsetof(struct public_port,
2227 *p_transceiver_state = GET_MFW_FIELD(transceiver_info,
2228 ETH_TRANSCEIVER_STATE);
2230 if (*p_transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT) {
2231 *p_transceiver_type = GET_MFW_FIELD(transceiver_info,
2232 ETH_TRANSCEIVER_TYPE);
2234 *p_transceiver_type = ETH_TRANSCEIVER_TYPE_UNKNOWN;
2240 static int is_transceiver_ready(u32 transceiver_state, u32 transceiver_type)
2242 if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) &&
2243 ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) == 0x0) &&
2244 (transceiver_type != ETH_TRANSCEIVER_TYPE_NONE))
2250 enum _ecore_status_t ecore_mcp_trans_speed_mask(struct ecore_hwfn *p_hwfn,
2251 struct ecore_ptt *p_ptt,
2254 u32 transceiver_type, transceiver_state;
2256 ecore_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state,
2260 if (is_transceiver_ready(transceiver_state, transceiver_type) == 0)
2263 switch (transceiver_type) {
2264 case ETH_TRANSCEIVER_TYPE_1G_LX:
2265 case ETH_TRANSCEIVER_TYPE_1G_SX:
2266 case ETH_TRANSCEIVER_TYPE_1G_PCC:
2267 case ETH_TRANSCEIVER_TYPE_1G_ACC:
2268 case ETH_TRANSCEIVER_TYPE_1000BASET:
2269 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2272 case ETH_TRANSCEIVER_TYPE_10G_SR:
2273 case ETH_TRANSCEIVER_TYPE_10G_LR:
2274 case ETH_TRANSCEIVER_TYPE_10G_LRM:
2275 case ETH_TRANSCEIVER_TYPE_10G_ER:
2276 case ETH_TRANSCEIVER_TYPE_10G_PCC:
2277 case ETH_TRANSCEIVER_TYPE_10G_ACC:
2278 case ETH_TRANSCEIVER_TYPE_4x10G:
2279 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2282 case ETH_TRANSCEIVER_TYPE_40G_LR4:
2283 case ETH_TRANSCEIVER_TYPE_40G_SR4:
2284 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
2285 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
2286 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2287 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2290 case ETH_TRANSCEIVER_TYPE_100G_AOC:
2291 case ETH_TRANSCEIVER_TYPE_100G_SR4:
2292 case ETH_TRANSCEIVER_TYPE_100G_LR4:
2293 case ETH_TRANSCEIVER_TYPE_100G_ER4:
2294 case ETH_TRANSCEIVER_TYPE_100G_ACC:
2296 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2297 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2300 case ETH_TRANSCEIVER_TYPE_25G_SR:
2301 case ETH_TRANSCEIVER_TYPE_25G_LR:
2302 case ETH_TRANSCEIVER_TYPE_25G_AOC:
2303 case ETH_TRANSCEIVER_TYPE_25G_ACC_S:
2304 case ETH_TRANSCEIVER_TYPE_25G_ACC_M:
2305 case ETH_TRANSCEIVER_TYPE_25G_ACC_L:
2306 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2309 case ETH_TRANSCEIVER_TYPE_25G_CA_N:
2310 case ETH_TRANSCEIVER_TYPE_25G_CA_S:
2311 case ETH_TRANSCEIVER_TYPE_25G_CA_L:
2312 case ETH_TRANSCEIVER_TYPE_4x25G_CR:
2313 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2314 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2315 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2318 case ETH_TRANSCEIVER_TYPE_40G_CR4:
2319 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR:
2320 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2321 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2322 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2325 case ETH_TRANSCEIVER_TYPE_100G_CR4:
2326 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
2328 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2329 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G |
2330 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2331 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2332 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G |
2333 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2334 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2337 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
2338 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
2339 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC:
2341 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2342 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2343 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2344 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2347 case ETH_TRANSCEIVER_TYPE_XLPPI:
2348 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
2351 case ETH_TRANSCEIVER_TYPE_10G_BASET:
2352 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2353 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2357 DP_INFO(p_hwfn, "Unknown transcevier type 0x%x\n",
2359 *p_speed_mask = 0xff;
2363 return ECORE_SUCCESS;
2366 enum _ecore_status_t ecore_mcp_get_board_config(struct ecore_hwfn *p_hwfn,
2367 struct ecore_ptt *p_ptt,
2368 u32 *p_board_config)
2370 u32 nvm_cfg_addr, nvm_cfg1_offset, port_cfg_addr;
2371 enum _ecore_status_t rc = ECORE_SUCCESS;
2373 /* TODO - Add support for VFs */
2374 if (IS_VF(p_hwfn->p_dev))
2377 if (!ecore_mcp_is_init(p_hwfn)) {
2378 DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
2382 *p_board_config = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
2385 nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt,
2386 MISC_REG_GEN_PURP_CR0);
2387 nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt,
2389 port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
2390 offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
2391 *p_board_config = ecore_rd(p_hwfn, p_ptt,
2393 offsetof(struct nvm_cfg1_port,
2401 /* Old MFW has a global configuration for all PFs regarding RDMA support */
2403 ecore_mcp_get_shmem_proto_legacy(struct ecore_hwfn *p_hwfn,
2404 enum ecore_pci_personality *p_proto)
2406 *p_proto = ECORE_PCI_ETH;
2408 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2409 "According to Legacy capabilities, L2 personality is %08x\n",
2414 static enum _ecore_status_t
2415 ecore_mcp_get_shmem_proto_mfw(struct ecore_hwfn *p_hwfn,
2416 struct ecore_ptt *p_ptt,
2417 enum ecore_pci_personality *p_proto)
2419 u32 resp = 0, param = 0;
2420 enum _ecore_status_t rc;
2422 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2423 "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
2424 (u32)*p_proto, resp, param);
2425 return ECORE_SUCCESS;
2428 static enum _ecore_status_t
2429 ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn,
2430 struct public_func *p_info,
2431 struct ecore_ptt *p_ptt,
2432 enum ecore_pci_personality *p_proto)
2434 enum _ecore_status_t rc = ECORE_SUCCESS;
2436 switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
2437 case FUNC_MF_CFG_PROTOCOL_ETHERNET:
2438 if (ecore_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto) !=
2440 ecore_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
2449 enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
2450 struct ecore_ptt *p_ptt)
2452 struct ecore_mcp_function_info *info;
2453 struct public_func shmem_info;
2455 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
2456 info = &p_hwfn->mcp_info->func_info;
2458 info->pause_on_host = (shmem_info.config &
2459 FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
2461 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2463 DP_ERR(p_hwfn, "Unknown personality %08x\n",
2464 (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
2468 ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
2470 if (shmem_info.mac_upper || shmem_info.mac_lower) {
2471 info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
2472 info->mac[1] = (u8)(shmem_info.mac_upper);
2473 info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
2474 info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
2475 info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
2476 info->mac[5] = (u8)(shmem_info.mac_lower);
2478 /* TODO - are there protocols for which there's no MAC? */
2479 DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n");
2482 /* TODO - are these calculations true for BE machine? */
2483 info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
2484 (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
2485 info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
2486 (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
2488 info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
2490 info->mtu = (u16)shmem_info.mtu_size;
2495 info->mtu = (u16)shmem_info.mtu_size;
2497 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP),
2498 "Read configuration from shmem: pause_on_host %02x"
2499 " protocol %02x BW [%02x - %02x]"
2500 " MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %lx"
2501 " node %lx ovlan %04x\n",
2502 info->pause_on_host, info->protocol,
2503 info->bandwidth_min, info->bandwidth_max,
2504 info->mac[0], info->mac[1], info->mac[2],
2505 info->mac[3], info->mac[4], info->mac[5],
2506 (unsigned long)info->wwn_port,
2507 (unsigned long)info->wwn_node, info->ovlan);
2509 return ECORE_SUCCESS;
2512 struct ecore_mcp_link_params
2513 *ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn)
2515 if (!p_hwfn || !p_hwfn->mcp_info)
2517 return &p_hwfn->mcp_info->link_input;
2520 struct ecore_mcp_link_state
2521 *ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn)
2523 if (!p_hwfn || !p_hwfn->mcp_info)
2527 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
2528 DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n");
2529 p_hwfn->mcp_info->link_output.link_up = true;
2533 return &p_hwfn->mcp_info->link_output;
2536 struct ecore_mcp_link_capabilities
2537 *ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn)
2539 if (!p_hwfn || !p_hwfn->mcp_info)
2541 return &p_hwfn->mcp_info->link_capabilities;
2544 enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
2545 struct ecore_ptt *p_ptt)
2547 u32 resp = 0, param = 0;
2548 enum _ecore_status_t rc;
2550 rc = ecore_mcp_cmd(p_hwfn, p_ptt,
2551 DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, ¶m);
2553 /* Wait for the drain to complete before returning */
2559 const struct ecore_mcp_function_info
2560 *ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn)
2562 if (!p_hwfn || !p_hwfn->mcp_info)
2564 return &p_hwfn->mcp_info->func_info;
2567 int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
2568 struct ecore_ptt *p_ptt, u32 personalities)
2570 enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT;
2571 struct public_func shmem_info;
2572 int i, count = 0, num_pfs;
2574 num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
2576 for (i = 0; i < num_pfs; i++) {
2577 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
2578 MCP_PF_ID_BY_REL(p_hwfn, i));
2579 if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
2582 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2587 if ((1 << ((u32)protocol)) & personalities)
2594 enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
2595 struct ecore_ptt *p_ptt,
2601 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
2602 DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n");
2607 if (IS_VF(p_hwfn->p_dev))
2610 flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
2611 flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
2612 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
2613 flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_OFFSET));
2615 *p_flash_size = flash_size;
2617 return ECORE_SUCCESS;
2620 enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
2621 struct ecore_ptt *p_ptt)
2623 struct ecore_dev *p_dev = p_hwfn->p_dev;
2625 if (p_dev->recov_in_prog) {
2626 DP_NOTICE(p_hwfn, false,
2627 "Avoid triggering a recovery since such a process"
2628 " is already in progress\n");
2632 DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n");
2633 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
2635 return ECORE_SUCCESS;
2638 static enum _ecore_status_t
2639 ecore_mcp_config_vf_msix_bb(struct ecore_hwfn *p_hwfn,
2640 struct ecore_ptt *p_ptt,
2643 u32 resp = 0, param = 0, rc_param = 0;
2644 enum _ecore_status_t rc;
2646 /* Only Leader can configure MSIX, and need to take CMT into account */
2648 if (!IS_LEAD_HWFN(p_hwfn))
2649 return ECORE_SUCCESS;
2650 num *= p_hwfn->p_dev->num_hwfns;
2652 param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_OFFSET) &
2653 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
2654 param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_OFFSET) &
2655 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
2657 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
2660 if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
2661 DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n",
2665 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2666 "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
2673 static enum _ecore_status_t
2674 ecore_mcp_config_vf_msix_ah(struct ecore_hwfn *p_hwfn,
2675 struct ecore_ptt *p_ptt,
2678 u32 resp = 0, param = num, rc_param = 0;
2679 enum _ecore_status_t rc;
2681 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX,
2682 param, &resp, &rc_param);
2684 if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) {
2685 DP_NOTICE(p_hwfn, true, "MFW failed to set MSI-X for VFs\n");
2688 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2689 "Requested 0x%02x MSI-x interrupts for VFs\n",
2696 enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
2697 struct ecore_ptt *p_ptt,
2700 if (ECORE_IS_BB(p_hwfn->p_dev))
2701 return ecore_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
2703 return ecore_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num);
2706 enum _ecore_status_t
2707 ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2708 struct ecore_mcp_drv_version *p_ver)
2710 struct ecore_mcp_mb_params mb_params;
2711 struct drv_version_stc drv_version;
2715 enum _ecore_status_t rc;
2718 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
2719 return ECORE_SUCCESS;
2722 OSAL_MEM_ZERO(&drv_version, sizeof(drv_version));
2723 drv_version.version = p_ver->version;
2724 num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4;
2725 for (i = 0; i < num_words; i++) {
2726 /* The driver name is expected to be in a big-endian format */
2727 p_name = &p_ver->name[i * sizeof(u32)];
2728 val = OSAL_CPU_TO_BE32(*(u32 *)p_name);
2729 *(u32 *)&drv_version.name[i * sizeof(u32)] = val;
2732 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2733 mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
2734 mb_params.p_data_src = &drv_version;
2735 mb_params.data_src_size = sizeof(drv_version);
2736 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2737 if (rc != ECORE_SUCCESS)
2738 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2743 /* A maximal 100 msec waiting time for the MCP to halt */
2744 #define ECORE_MCP_HALT_SLEEP_MS 10
2745 #define ECORE_MCP_HALT_MAX_RETRIES 10
2747 enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
2748 struct ecore_ptt *p_ptt)
2750 u32 resp = 0, param = 0, cpu_state, cnt = 0;
2751 enum _ecore_status_t rc;
2753 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2755 if (rc != ECORE_SUCCESS) {
2756 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2761 OSAL_MSLEEP(ECORE_MCP_HALT_SLEEP_MS);
2762 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2763 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
2765 } while (++cnt < ECORE_MCP_HALT_MAX_RETRIES);
2767 if (cnt == ECORE_MCP_HALT_MAX_RETRIES) {
2768 DP_NOTICE(p_hwfn, false,
2769 "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2770 ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
2774 ecore_mcp_cmd_set_blocking(p_hwfn, true);
2776 return ECORE_SUCCESS;
2779 #define ECORE_MCP_RESUME_SLEEP_MS 10
2781 enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
2782 struct ecore_ptt *p_ptt)
2784 u32 cpu_mode, cpu_state;
2786 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
2788 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2789 cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2790 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
2792 OSAL_MSLEEP(ECORE_MCP_RESUME_SLEEP_MS);
2793 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2795 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
2796 DP_NOTICE(p_hwfn, false,
2797 "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2798 cpu_mode, cpu_state);
2802 ecore_mcp_cmd_set_blocking(p_hwfn, false);
2804 return ECORE_SUCCESS;
2807 enum _ecore_status_t
2808 ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
2809 struct ecore_ptt *p_ptt,
2810 enum ecore_ov_client client)
2812 u32 resp = 0, param = 0;
2814 enum _ecore_status_t rc;
2817 case ECORE_OV_CLIENT_DRV:
2818 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
2820 case ECORE_OV_CLIENT_USER:
2821 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
2823 case ECORE_OV_CLIENT_VENDOR_SPEC:
2824 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
2827 DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", client);
2831 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
2832 drv_mb_param, &resp, ¶m);
2833 if (rc != ECORE_SUCCESS)
2834 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2839 enum _ecore_status_t
2840 ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
2841 struct ecore_ptt *p_ptt,
2842 enum ecore_ov_driver_state drv_state)
2844 u32 resp = 0, param = 0;
2846 enum _ecore_status_t rc;
2848 switch (drv_state) {
2849 case ECORE_OV_DRIVER_STATE_NOT_LOADED:
2850 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
2852 case ECORE_OV_DRIVER_STATE_DISABLED:
2853 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
2855 case ECORE_OV_DRIVER_STATE_ACTIVE:
2856 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
2859 DP_NOTICE(p_hwfn, true, "Invalid driver state %d\n", drv_state);
2863 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
2864 drv_mb_param, &resp, ¶m);
2865 if (rc != ECORE_SUCCESS)
2866 DP_ERR(p_hwfn, "Failed to send driver state\n");
2871 enum _ecore_status_t
2872 ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2873 struct ecore_fc_npiv_tbl *p_table)
2878 enum _ecore_status_t
2879 ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2882 u32 resp = 0, param = 0, drv_mb_param = 0;
2883 enum _ecore_status_t rc;
2885 SET_MFW_FIELD(drv_mb_param, DRV_MB_PARAM_OV_MTU_SIZE, (u32)mtu);
2886 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU,
2887 drv_mb_param, &resp, ¶m);
2888 if (rc != ECORE_SUCCESS)
2889 DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc);
2894 enum _ecore_status_t
2895 ecore_mcp_ov_update_mac(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2898 struct ecore_mcp_mb_params mb_params;
2899 union drv_union_data union_data;
2900 enum _ecore_status_t rc;
2902 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2903 mb_params.cmd = DRV_MSG_CODE_SET_VMAC;
2904 SET_MFW_FIELD(mb_params.param, DRV_MSG_CODE_VMAC_TYPE,
2905 DRV_MSG_CODE_VMAC_TYPE_MAC);
2906 mb_params.param |= MCP_PF_ID(p_hwfn);
2907 OSAL_MEMCPY(&union_data.raw_data, mac, ETH_ALEN);
2908 mb_params.p_data_src = &union_data;
2909 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2910 if (rc != ECORE_SUCCESS)
2911 DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
2916 enum _ecore_status_t
2917 ecore_mcp_ov_update_eswitch(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2918 enum ecore_ov_eswitch eswitch)
2920 enum _ecore_status_t rc;
2921 u32 resp = 0, param = 0;
2925 case ECORE_OV_ESWITCH_NONE:
2926 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE;
2928 case ECORE_OV_ESWITCH_VEB:
2929 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB;
2931 case ECORE_OV_ESWITCH_VEPA:
2932 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA;
2935 DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch);
2939 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE,
2940 drv_mb_param, &resp, ¶m);
2941 if (rc != ECORE_SUCCESS)
2942 DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc);
2947 enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
2948 struct ecore_ptt *p_ptt,
2949 enum ecore_led_mode mode)
2951 u32 resp = 0, param = 0, drv_mb_param;
2952 enum _ecore_status_t rc;
2955 case ECORE_LED_MODE_ON:
2956 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
2958 case ECORE_LED_MODE_OFF:
2959 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
2961 case ECORE_LED_MODE_RESTORE:
2962 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
2965 DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode);
2969 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
2970 drv_mb_param, &resp, ¶m);
2971 if (rc != ECORE_SUCCESS)
2972 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2977 enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
2978 struct ecore_ptt *p_ptt,
2981 u32 resp = 0, param = 0;
2982 enum _ecore_status_t rc;
2984 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
2985 mask_parities, &resp, ¶m);
2987 if (rc != ECORE_SUCCESS) {
2989 "MCP response failure for mask parities, aborting\n");
2990 } else if (resp != FW_MSG_CODE_OK) {
2992 "MCP did not ack mask parity request. Old MFW?\n");
2999 enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
3002 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3003 u32 bytes_left, offset, bytes_to_copy, buf_size;
3004 u32 nvm_offset, resp, param;
3005 struct ecore_ptt *p_ptt;
3006 enum _ecore_status_t rc = ECORE_SUCCESS;
3008 p_ptt = ecore_ptt_acquire(p_hwfn);
3014 while (bytes_left > 0) {
3015 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
3016 MCP_DRV_NVM_BUF_LEN);
3017 nvm_offset = (addr + offset) | (bytes_to_copy <<
3018 DRV_MB_PARAM_NVM_LEN_OFFSET);
3019 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3020 DRV_MSG_CODE_NVM_READ_NVRAM,
3021 nvm_offset, &resp, ¶m, &buf_size,
3022 (u32 *)(p_buf + offset));
3023 if (rc != ECORE_SUCCESS) {
3024 DP_NOTICE(p_dev, false,
3025 "ecore_mcp_nvm_rd_cmd() failed, rc = %d\n",
3027 resp = FW_MSG_CODE_ERROR;
3031 if (resp != FW_MSG_CODE_NVM_OK) {
3032 DP_NOTICE(p_dev, false,
3033 "nvm read failed, resp = 0x%08x\n", resp);
3034 rc = ECORE_UNKNOWN_ERROR;
3038 /* This can be a lengthy process, and it's possible scheduler
3039 * isn't preemptible. Sleep a bit to prevent CPU hogging.
3041 if (bytes_left % 0x1000 <
3042 (bytes_left - buf_size) % 0x1000)
3046 bytes_left -= buf_size;
3049 p_dev->mcp_nvm_resp = resp;
3050 ecore_ptt_release(p_hwfn, p_ptt);
3055 enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
3056 u32 addr, u8 *p_buf, u32 *p_len)
3058 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3059 struct ecore_ptt *p_ptt;
3061 enum _ecore_status_t rc;
3063 p_ptt = ecore_ptt_acquire(p_hwfn);
3067 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3068 (cmd == ECORE_PHY_CORE_READ) ?
3069 DRV_MSG_CODE_PHY_CORE_READ :
3070 DRV_MSG_CODE_PHY_RAW_READ,
3071 addr, &resp, ¶m, p_len, (u32 *)p_buf);
3072 if (rc != ECORE_SUCCESS)
3073 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
3075 p_dev->mcp_nvm_resp = resp;
3076 ecore_ptt_release(p_hwfn, p_ptt);
3081 enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
3083 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3084 struct ecore_ptt *p_ptt;
3086 p_ptt = ecore_ptt_acquire(p_hwfn);
3090 OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp));
3091 ecore_ptt_release(p_hwfn, p_ptt);
3093 return ECORE_SUCCESS;
3096 enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr)
3098 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3099 struct ecore_ptt *p_ptt;
3101 enum _ecore_status_t rc;
3103 p_ptt = ecore_ptt_acquire(p_hwfn);
3106 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_DEL_FILE, addr,
3108 p_dev->mcp_nvm_resp = resp;
3109 ecore_ptt_release(p_hwfn, p_ptt);
3114 enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
3117 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3118 struct ecore_ptt *p_ptt;
3120 enum _ecore_status_t rc;
3122 p_ptt = ecore_ptt_acquire(p_hwfn);
3125 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, addr,
3127 p_dev->mcp_nvm_resp = resp;
3128 ecore_ptt_release(p_hwfn, p_ptt);
3133 /* rc receives ECORE_INVAL as default parameter because
3134 * it might not enter the while loop if the len is 0
3136 enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
3137 u32 addr, u8 *p_buf, u32 len)
3139 u32 buf_idx, buf_size, nvm_cmd, nvm_offset, resp, param;
3140 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3141 enum _ecore_status_t rc = ECORE_INVAL;
3142 struct ecore_ptt *p_ptt;
3144 p_ptt = ecore_ptt_acquire(p_hwfn);
3149 case ECORE_PUT_FILE_DATA:
3150 nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
3152 case ECORE_NVM_WRITE_NVRAM:
3153 nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
3155 case ECORE_EXT_PHY_FW_UPGRADE:
3156 nvm_cmd = DRV_MSG_CODE_EXT_PHY_FW_UPGRADE;
3159 DP_NOTICE(p_hwfn, true, "Invalid nvm write command 0x%x\n",
3166 while (buf_idx < len) {
3167 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
3168 MCP_DRV_NVM_BUF_LEN);
3169 nvm_offset = ((buf_size << DRV_MB_PARAM_NVM_LEN_OFFSET) |
3172 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset,
3173 &resp, ¶m, buf_size,
3174 (u32 *)&p_buf[buf_idx]);
3175 if (rc != ECORE_SUCCESS) {
3176 DP_NOTICE(p_dev, false,
3177 "ecore_mcp_nvm_write() failed, rc = %d\n",
3179 resp = FW_MSG_CODE_ERROR;
3183 if (resp != FW_MSG_CODE_OK &&
3184 resp != FW_MSG_CODE_NVM_OK &&
3185 resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK) {
3186 DP_NOTICE(p_dev, false,
3187 "nvm write failed, resp = 0x%08x\n", resp);
3188 rc = ECORE_UNKNOWN_ERROR;
3192 /* This can be a lengthy process, and it's possible scheduler
3193 * isn't preemptible. Sleep a bit to prevent CPU hogging.
3195 if (buf_idx % 0x1000 >
3196 (buf_idx + buf_size) % 0x1000)
3199 buf_idx += buf_size;
3202 p_dev->mcp_nvm_resp = resp;
3204 ecore_ptt_release(p_hwfn, p_ptt);
3209 enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
3210 u32 addr, u8 *p_buf, u32 len)
3212 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3213 struct ecore_ptt *p_ptt;
3214 u32 resp, param, nvm_cmd;
3215 enum _ecore_status_t rc;
3217 p_ptt = ecore_ptt_acquire(p_hwfn);
3221 nvm_cmd = (cmd == ECORE_PHY_CORE_WRITE) ? DRV_MSG_CODE_PHY_CORE_WRITE :
3222 DRV_MSG_CODE_PHY_RAW_WRITE;
3223 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, addr,
3224 &resp, ¶m, len, (u32 *)p_buf);
3225 if (rc != ECORE_SUCCESS)
3226 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
3227 p_dev->mcp_nvm_resp = resp;
3228 ecore_ptt_release(p_hwfn, p_ptt);
3233 enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
3236 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3237 struct ecore_ptt *p_ptt;
3239 enum _ecore_status_t rc;
3241 p_ptt = ecore_ptt_acquire(p_hwfn);
3245 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_SECURE_MODE, addr,
3247 p_dev->mcp_nvm_resp = resp;
3248 ecore_ptt_release(p_hwfn, p_ptt);
3253 enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
3254 struct ecore_ptt *p_ptt,
3255 u32 port, u32 addr, u32 offset,
3258 u32 bytes_left, bytes_to_copy, buf_size, nvm_offset;
3260 enum _ecore_status_t rc;
3262 nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) |
3263 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET);
3267 while (bytes_left > 0) {
3268 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
3269 MAX_I2C_TRANSACTION_SIZE);
3270 nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
3271 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
3272 nvm_offset |= ((addr + offset) <<
3273 DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET);
3274 nvm_offset |= (bytes_to_copy <<
3275 DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET);
3276 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3277 DRV_MSG_CODE_TRANSCEIVER_READ,
3278 nvm_offset, &resp, ¶m, &buf_size,
3279 (u32 *)(p_buf + offset));
3280 if (rc != ECORE_SUCCESS) {
3281 DP_NOTICE(p_hwfn, false,
3282 "Failed to send a transceiver read command to the MFW. rc = %d.\n",
3287 if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
3289 else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
3290 return ECORE_UNKNOWN_ERROR;
3293 bytes_left -= buf_size;
3296 return ECORE_SUCCESS;
3299 enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
3300 struct ecore_ptt *p_ptt,
3301 u32 port, u32 addr, u32 offset,
3304 u32 buf_idx, buf_size, nvm_offset, resp, param;
3305 enum _ecore_status_t rc;
3307 nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) |
3308 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET);
3310 while (buf_idx < len) {
3311 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
3312 MAX_I2C_TRANSACTION_SIZE);
3313 nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
3314 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
3315 nvm_offset |= ((offset + buf_idx) <<
3316 DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET);
3317 nvm_offset |= (buf_size <<
3318 DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET);
3319 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt,
3320 DRV_MSG_CODE_TRANSCEIVER_WRITE,
3321 nvm_offset, &resp, ¶m, buf_size,
3322 (u32 *)&p_buf[buf_idx]);
3323 if (rc != ECORE_SUCCESS) {
3324 DP_NOTICE(p_hwfn, false,
3325 "Failed to send a transceiver write command to the MFW. rc = %d.\n",
3330 if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
3332 else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
3333 return ECORE_UNKNOWN_ERROR;
3335 buf_idx += buf_size;
3338 return ECORE_SUCCESS;
3341 enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
3342 struct ecore_ptt *p_ptt,
3343 u16 gpio, u32 *gpio_val)
3345 enum _ecore_status_t rc = ECORE_SUCCESS;
3346 u32 drv_mb_param = 0, rsp;
3348 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET);
3350 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ,
3351 drv_mb_param, &rsp, gpio_val);
3353 if (rc != ECORE_SUCCESS)
3356 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3357 return ECORE_UNKNOWN_ERROR;
3359 return ECORE_SUCCESS;
3362 enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
3363 struct ecore_ptt *p_ptt,
3364 u16 gpio, u16 gpio_val)
3366 enum _ecore_status_t rc = ECORE_SUCCESS;
3367 u32 drv_mb_param = 0, param, rsp;
3369 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET) |
3370 (gpio_val << DRV_MB_PARAM_GPIO_VALUE_OFFSET);
3372 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE,
3373 drv_mb_param, &rsp, ¶m);
3375 if (rc != ECORE_SUCCESS)
3378 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3379 return ECORE_UNKNOWN_ERROR;
3381 return ECORE_SUCCESS;
3384 enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn,
3385 struct ecore_ptt *p_ptt,
3386 u16 gpio, u32 *gpio_direction,
3389 u32 drv_mb_param = 0, rsp, val = 0;
3390 enum _ecore_status_t rc = ECORE_SUCCESS;
3392 drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET;
3394 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO,
3395 drv_mb_param, &rsp, &val);
3396 if (rc != ECORE_SUCCESS)
3399 *gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >>
3400 DRV_MB_PARAM_GPIO_DIRECTION_OFFSET;
3401 *gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >>
3402 DRV_MB_PARAM_GPIO_CTRL_OFFSET;
3404 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3405 return ECORE_UNKNOWN_ERROR;
3407 return ECORE_SUCCESS;
3410 enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn,
3411 struct ecore_ptt *p_ptt)
3413 u32 drv_mb_param = 0, rsp, param;
3414 enum _ecore_status_t rc = ECORE_SUCCESS;
3416 drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
3417 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3419 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3420 drv_mb_param, &rsp, ¶m);
3422 if (rc != ECORE_SUCCESS)
3425 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3426 (param != DRV_MB_PARAM_BIST_RC_PASSED))
3427 rc = ECORE_UNKNOWN_ERROR;
3432 enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn,
3433 struct ecore_ptt *p_ptt)
3435 u32 drv_mb_param, rsp, param;
3436 enum _ecore_status_t rc = ECORE_SUCCESS;
3438 drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
3439 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3441 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3442 drv_mb_param, &rsp, ¶m);
3444 if (rc != ECORE_SUCCESS)
3447 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3448 (param != DRV_MB_PARAM_BIST_RC_PASSED))
3449 rc = ECORE_UNKNOWN_ERROR;
3454 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images(
3455 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *num_images)
3457 u32 drv_mb_param = 0, rsp;
3458 enum _ecore_status_t rc = ECORE_SUCCESS;
3460 drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
3461 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3463 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3464 drv_mb_param, &rsp, num_images);
3466 if (rc != ECORE_SUCCESS)
3469 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
3470 rc = ECORE_UNKNOWN_ERROR;
3475 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att(
3476 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3477 struct bist_nvm_image_att *p_image_att, u32 image_index)
3479 u32 buf_size, nvm_offset, resp, param;
3480 enum _ecore_status_t rc;
3482 nvm_offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
3483 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3484 nvm_offset |= (image_index <<
3485 DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_OFFSET);
3486 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3487 nvm_offset, &resp, ¶m, &buf_size,
3488 (u32 *)p_image_att);
3489 if (rc != ECORE_SUCCESS)
3492 if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3493 (p_image_att->return_code != 1))
3494 rc = ECORE_UNKNOWN_ERROR;
3499 enum _ecore_status_t
3500 ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn,
3501 struct ecore_ptt *p_ptt,
3502 struct ecore_temperature_info *p_temp_info)
3504 struct ecore_temperature_sensor *p_temp_sensor;
3505 struct temperature_status_stc mfw_temp_info;
3506 struct ecore_mcp_mb_params mb_params;
3508 enum _ecore_status_t rc;
3511 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3512 mb_params.cmd = DRV_MSG_CODE_GET_TEMPERATURE;
3513 mb_params.p_data_dst = &mfw_temp_info;
3514 mb_params.data_dst_size = sizeof(mfw_temp_info);
3515 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3516 if (rc != ECORE_SUCCESS)
3519 OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS != MAX_NUM_OF_SENSORS);
3520 p_temp_info->num_sensors = OSAL_MIN_T(u32, mfw_temp_info.num_of_sensors,
3521 ECORE_MAX_NUM_OF_SENSORS);
3522 for (i = 0; i < p_temp_info->num_sensors; i++) {
3523 val = mfw_temp_info.sensor[i];
3524 p_temp_sensor = &p_temp_info->sensors[i];
3525 p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >>
3526 SENSOR_LOCATION_OFFSET;
3527 p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >>
3528 THRESHOLD_HIGH_OFFSET;
3529 p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >>
3530 CRITICAL_TEMPERATURE_OFFSET;
3531 p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >>
3532 CURRENT_TEMP_OFFSET;
3535 return ECORE_SUCCESS;
3538 enum _ecore_status_t ecore_mcp_get_mba_versions(
3539 struct ecore_hwfn *p_hwfn,
3540 struct ecore_ptt *p_ptt,
3541 struct ecore_mba_vers *p_mba_vers)
3543 u32 buf_size, resp, param;
3544 enum _ecore_status_t rc;
3546 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MBA_VERSION,
3547 0, &resp, ¶m, &buf_size,
3548 &p_mba_vers->mba_vers[0]);
3550 if (rc != ECORE_SUCCESS)
3553 if ((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
3554 rc = ECORE_UNKNOWN_ERROR;
3556 if (buf_size != MCP_DRV_NVM_BUF_LEN)
3557 rc = ECORE_UNKNOWN_ERROR;
3562 enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
3563 struct ecore_ptt *p_ptt,
3568 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MEM_ECC_EVENTS,
3569 0, &rsp, (u32 *)num_events);
3572 static enum resource_id_enum
3573 ecore_mcp_get_mfw_res_id(enum ecore_resources res_id)
3575 enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
3579 mfw_res_id = RESOURCE_NUM_SB_E;
3581 case ECORE_L2_QUEUE:
3582 mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
3585 mfw_res_id = RESOURCE_NUM_VPORT_E;
3588 mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
3591 mfw_res_id = RESOURCE_NUM_PQ_E;
3594 mfw_res_id = RESOURCE_NUM_RL_E;
3598 /* Each VFC resource can accommodate both a MAC and a VLAN */
3599 mfw_res_id = RESOURCE_VFC_FILTER_E;
3602 mfw_res_id = RESOURCE_ILT_E;
3604 case ECORE_LL2_QUEUE:
3605 mfw_res_id = RESOURCE_LL2_QUEUE_E;
3607 case ECORE_RDMA_CNQ_RAM:
3608 case ECORE_CMDQS_CQS:
3609 /* CNQ/CMDQS are the same resource */
3610 mfw_res_id = RESOURCE_CQS_E;
3612 case ECORE_RDMA_STATS_QUEUE:
3613 mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
3616 mfw_res_id = RESOURCE_BDQ_E;
3625 #define ECORE_RESC_ALLOC_VERSION_MAJOR 2
3626 #define ECORE_RESC_ALLOC_VERSION_MINOR 0
3627 #define ECORE_RESC_ALLOC_VERSION \
3628 ((ECORE_RESC_ALLOC_VERSION_MAJOR << \
3629 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_OFFSET) | \
3630 (ECORE_RESC_ALLOC_VERSION_MINOR << \
3631 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_OFFSET))
3633 struct ecore_resc_alloc_in_params {
3635 enum ecore_resources res_id;
3639 struct ecore_resc_alloc_out_params {
3649 #define ECORE_RECOVERY_PROLOG_SLEEP_MS 100
3651 enum _ecore_status_t ecore_recovery_prolog(struct ecore_dev *p_dev)
3653 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3654 struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
3655 enum _ecore_status_t rc;
3657 /* Allow ongoing PCIe transactions to complete */
3658 OSAL_MSLEEP(ECORE_RECOVERY_PROLOG_SLEEP_MS);
3660 /* Clear the PF's internal FID_enable in the PXP */
3661 rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_ptt, false);
3662 if (rc != ECORE_SUCCESS)
3663 DP_NOTICE(p_hwfn, false,
3664 "ecore_pglueb_set_pfid_enable() failed. rc = %d.\n",
3670 static enum _ecore_status_t
3671 ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn,
3672 struct ecore_ptt *p_ptt,
3673 struct ecore_resc_alloc_in_params *p_in_params,
3674 struct ecore_resc_alloc_out_params *p_out_params)
3676 struct ecore_mcp_mb_params mb_params;
3677 struct resource_info mfw_resc_info;
3678 enum _ecore_status_t rc;
3680 OSAL_MEM_ZERO(&mfw_resc_info, sizeof(mfw_resc_info));
3682 mfw_resc_info.res_id = ecore_mcp_get_mfw_res_id(p_in_params->res_id);
3683 if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
3685 "Failed to match resource %d [%s] with the MFW resources\n",
3686 p_in_params->res_id,
3687 ecore_hw_get_resc_name(p_in_params->res_id));
3691 switch (p_in_params->cmd) {
3692 case DRV_MSG_SET_RESOURCE_VALUE_MSG:
3693 mfw_resc_info.size = p_in_params->resc_max_val;
3695 case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
3698 DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
3703 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3704 mb_params.cmd = p_in_params->cmd;
3705 mb_params.param = ECORE_RESC_ALLOC_VERSION;
3706 mb_params.p_data_src = &mfw_resc_info;
3707 mb_params.data_src_size = sizeof(mfw_resc_info);
3708 mb_params.p_data_dst = mb_params.p_data_src;
3709 mb_params.data_dst_size = mb_params.data_src_size;
3711 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3712 "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
3713 p_in_params->cmd, p_in_params->res_id,
3714 ecore_hw_get_resc_name(p_in_params->res_id),
3715 GET_MFW_FIELD(mb_params.param,
3716 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3717 GET_MFW_FIELD(mb_params.param,
3718 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3719 p_in_params->resc_max_val);
3721 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3722 if (rc != ECORE_SUCCESS)
3725 p_out_params->mcp_resp = mb_params.mcp_resp;
3726 p_out_params->mcp_param = mb_params.mcp_param;
3727 p_out_params->resc_num = mfw_resc_info.size;
3728 p_out_params->resc_start = mfw_resc_info.offset;
3729 p_out_params->vf_resc_num = mfw_resc_info.vf_size;
3730 p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
3731 p_out_params->flags = mfw_resc_info.flags;
3733 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3734 "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
3735 GET_MFW_FIELD(p_out_params->mcp_param,
3736 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3737 GET_MFW_FIELD(p_out_params->mcp_param,
3738 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3739 p_out_params->resc_num, p_out_params->resc_start,
3740 p_out_params->vf_resc_num, p_out_params->vf_resc_start,
3741 p_out_params->flags);
3743 return ECORE_SUCCESS;
3746 enum _ecore_status_t
3747 ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3748 enum ecore_resources res_id, u32 resc_max_val,
3751 struct ecore_resc_alloc_out_params out_params;
3752 struct ecore_resc_alloc_in_params in_params;
3753 enum _ecore_status_t rc;
3755 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
3756 in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
3757 in_params.res_id = res_id;
3758 in_params.resc_max_val = resc_max_val;
3759 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
3760 rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3762 if (rc != ECORE_SUCCESS)
3765 *p_mcp_resp = out_params.mcp_resp;
3767 return ECORE_SUCCESS;
3770 enum _ecore_status_t
3771 ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3772 enum ecore_resources res_id, u32 *p_mcp_resp,
3773 u32 *p_resc_num, u32 *p_resc_start)
3775 struct ecore_resc_alloc_out_params out_params;
3776 struct ecore_resc_alloc_in_params in_params;
3777 enum _ecore_status_t rc;
3779 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
3780 in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
3781 in_params.res_id = res_id;
3782 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
3783 rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3785 if (rc != ECORE_SUCCESS)
3788 *p_mcp_resp = out_params.mcp_resp;
3790 if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
3791 *p_resc_num = out_params.resc_num;
3792 *p_resc_start = out_params.resc_start;
3795 return ECORE_SUCCESS;
3798 enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn,
3799 struct ecore_ptt *p_ptt)
3801 u32 mcp_resp, mcp_param;
3803 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
3804 &mcp_resp, &mcp_param);
3807 static enum _ecore_status_t ecore_mcp_resource_cmd(struct ecore_hwfn *p_hwfn,
3808 struct ecore_ptt *p_ptt,
3809 u32 param, u32 *p_mcp_resp,
3812 enum _ecore_status_t rc;
3814 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
3815 p_mcp_resp, p_mcp_param);
3816 if (rc != ECORE_SUCCESS)
3819 if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3821 "The resource command is unsupported by the MFW\n");
3822 return ECORE_NOTIMPL;
3825 if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
3826 u8 opcode = GET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
3828 DP_NOTICE(p_hwfn, false,
3829 "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
3837 enum _ecore_status_t
3838 __ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3839 struct ecore_resc_lock_params *p_params)
3841 u32 param = 0, mcp_resp, mcp_param;
3843 enum _ecore_status_t rc;
3845 switch (p_params->timeout) {
3846 case ECORE_MCP_RESC_LOCK_TO_DEFAULT:
3847 opcode = RESOURCE_OPCODE_REQ;
3848 p_params->timeout = 0;
3850 case ECORE_MCP_RESC_LOCK_TO_NONE:
3851 opcode = RESOURCE_OPCODE_REQ_WO_AGING;
3852 p_params->timeout = 0;
3855 opcode = RESOURCE_OPCODE_REQ_W_AGING;
3859 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3860 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3861 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
3863 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3864 "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
3865 param, p_params->timeout, opcode, p_params->resource);
3867 /* Attempt to acquire the resource */
3868 rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
3870 if (rc != ECORE_SUCCESS)
3873 /* Analyze the response */
3874 p_params->owner = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER);
3875 opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3877 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3878 "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
3879 mcp_param, opcode, p_params->owner);
3882 case RESOURCE_OPCODE_GNT:
3883 p_params->b_granted = true;
3885 case RESOURCE_OPCODE_BUSY:
3886 p_params->b_granted = false;
3889 DP_NOTICE(p_hwfn, false,
3890 "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
3895 return ECORE_SUCCESS;
3898 enum _ecore_status_t
3899 ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3900 struct ecore_resc_lock_params *p_params)
3903 enum _ecore_status_t rc;
3906 /* No need for an interval before the first iteration */
3908 if (p_params->sleep_b4_retry) {
3909 u16 retry_interval_in_ms =
3910 DIV_ROUND_UP(p_params->retry_interval,
3913 OSAL_MSLEEP(retry_interval_in_ms);
3915 OSAL_UDELAY(p_params->retry_interval);
3919 rc = __ecore_mcp_resc_lock(p_hwfn, p_ptt, p_params);
3920 if (rc != ECORE_SUCCESS)
3923 if (p_params->b_granted)
3925 } while (retry_cnt++ < p_params->retry_num);
3927 return ECORE_SUCCESS;
3930 void ecore_mcp_resc_lock_default_init(struct ecore_resc_lock_params *p_lock,
3931 struct ecore_resc_unlock_params *p_unlock,
3932 enum ecore_resc_lock resource,
3933 bool b_is_permanent)
3935 if (p_lock != OSAL_NULL) {
3936 OSAL_MEM_ZERO(p_lock, sizeof(*p_lock));
3938 /* Permanent resources don't require aging, and there's no
3939 * point in trying to acquire them more than once since it's
3940 * unexpected another entity would release them.
3942 if (b_is_permanent) {
3943 p_lock->timeout = ECORE_MCP_RESC_LOCK_TO_NONE;
3945 p_lock->retry_num = ECORE_MCP_RESC_LOCK_RETRY_CNT_DFLT;
3946 p_lock->retry_interval =
3947 ECORE_MCP_RESC_LOCK_RETRY_VAL_DFLT;
3948 p_lock->sleep_b4_retry = true;
3951 p_lock->resource = resource;
3954 if (p_unlock != OSAL_NULL) {
3955 OSAL_MEM_ZERO(p_unlock, sizeof(*p_unlock));
3956 p_unlock->resource = resource;
3960 enum _ecore_status_t
3961 ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3962 struct ecore_resc_unlock_params *p_params)
3964 u32 param = 0, mcp_resp, mcp_param;
3966 enum _ecore_status_t rc;
3968 opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
3969 : RESOURCE_OPCODE_RELEASE;
3970 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3971 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3973 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3974 "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
3975 param, opcode, p_params->resource);
3977 /* Attempt to release the resource */
3978 rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
3980 if (rc != ECORE_SUCCESS)
3983 /* Analyze the response */
3984 opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3986 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3987 "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
3991 case RESOURCE_OPCODE_RELEASED_PREVIOUS:
3993 "Resource unlock request for an already released resource [%d]\n",
3994 p_params->resource);
3996 case RESOURCE_OPCODE_RELEASED:
3997 p_params->b_released = true;
3999 case RESOURCE_OPCODE_WRONG_OWNER:
4000 p_params->b_released = false;
4003 DP_NOTICE(p_hwfn, false,
4004 "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
4009 return ECORE_SUCCESS;
4012 bool ecore_mcp_is_smart_an_supported(struct ecore_hwfn *p_hwfn)
4014 return !!(p_hwfn->mcp_info->capabilities &
4015 FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ);
4018 enum _ecore_status_t ecore_mcp_get_capabilities(struct ecore_hwfn *p_hwfn,
4019 struct ecore_ptt *p_ptt)
4022 enum _ecore_status_t rc;
4024 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT,
4025 0, &mcp_resp, &p_hwfn->mcp_info->capabilities);
4026 if (rc == ECORE_SUCCESS)
4027 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_PROBE),
4028 "MFW supported features: %08x\n",
4029 p_hwfn->mcp_info->capabilities);
4034 enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn,
4035 struct ecore_ptt *p_ptt)
4037 u32 mcp_resp, mcp_param, features;
4039 features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ |
4040 DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE |
4041 DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK;
4043 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
4044 features, &mcp_resp, &mcp_param);
4047 enum _ecore_status_t
4048 ecore_mcp_drv_attribute(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
4049 struct ecore_mcp_drv_attr *p_drv_attr)
4051 struct attribute_cmd_write_stc attr_cmd_write;
4052 enum _attribute_commands_e mfw_attr_cmd;
4053 struct ecore_mcp_mb_params mb_params;
4054 enum _ecore_status_t rc;
4056 switch (p_drv_attr->attr_cmd) {
4057 case ECORE_MCP_DRV_ATTR_CMD_READ:
4058 mfw_attr_cmd = ATTRIBUTE_CMD_READ;
4060 case ECORE_MCP_DRV_ATTR_CMD_WRITE:
4061 mfw_attr_cmd = ATTRIBUTE_CMD_WRITE;
4063 case ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR:
4064 mfw_attr_cmd = ATTRIBUTE_CMD_READ_CLEAR;
4066 case ECORE_MCP_DRV_ATTR_CMD_CLEAR:
4067 mfw_attr_cmd = ATTRIBUTE_CMD_CLEAR;
4070 DP_NOTICE(p_hwfn, false, "Unknown attribute command %d\n",
4071 p_drv_attr->attr_cmd);
4075 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
4076 mb_params.cmd = DRV_MSG_CODE_ATTRIBUTE;
4077 SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_KEY,
4078 p_drv_attr->attr_num);
4079 SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_CMD,
4081 if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_WRITE) {
4082 OSAL_MEM_ZERO(&attr_cmd_write, sizeof(attr_cmd_write));
4083 attr_cmd_write.val = p_drv_attr->val;
4084 attr_cmd_write.mask = p_drv_attr->mask;
4085 attr_cmd_write.offset = p_drv_attr->offset;
4087 mb_params.p_data_src = &attr_cmd_write;
4088 mb_params.data_src_size = sizeof(attr_cmd_write);
4091 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4092 if (rc != ECORE_SUCCESS)
4095 if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
4097 "The attribute command is not supported by the MFW\n");
4098 return ECORE_NOTIMPL;
4099 } else if (mb_params.mcp_resp != FW_MSG_CODE_OK) {
4101 "Failed to send an attribute command [mcp_resp 0x%x, attr_cmd %d, attr_num %d]\n",
4102 mb_params.mcp_resp, p_drv_attr->attr_cmd,
4103 p_drv_attr->attr_num);
4107 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
4108 "Attribute Command: cmd %d [mfw_cmd %d], num %d, in={val 0x%08x, mask 0x%08x, offset 0x%08x}, out={val 0x%08x}\n",
4109 p_drv_attr->attr_cmd, mfw_attr_cmd, p_drv_attr->attr_num,
4110 p_drv_attr->val, p_drv_attr->mask, p_drv_attr->offset,
4111 mb_params.mcp_param);
4113 if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ ||
4114 p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR)
4115 p_drv_attr->val = mb_params.mcp_param;
4117 return ECORE_SUCCESS;
4120 void ecore_mcp_wol_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
4121 u32 offset, u32 val)
4123 struct ecore_mcp_mb_params mb_params = {0};
4124 enum _ecore_status_t rc = ECORE_SUCCESS;
4127 mb_params.cmd = DRV_MSG_CODE_WRITE_WOL_REG;
4128 mb_params.param = offset;
4129 mb_params.p_data_src = &dword;
4130 mb_params.data_src_size = sizeof(dword);
4132 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4133 if (rc != ECORE_SUCCESS) {
4134 DP_NOTICE(p_hwfn, false,
4135 "Failed to wol write request, rc = %d\n", rc);
4138 if (mb_params.mcp_resp != FW_MSG_CODE_WOL_READ_WRITE_OK) {
4139 DP_NOTICE(p_hwfn, false,
4140 "Failed to write value 0x%x to offset 0x%x [mcp_resp 0x%x]\n",
4141 val, offset, mb_params.mcp_resp);
4142 rc = ECORE_UNKNOWN_ERROR;