2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
11 #include "ecore_status.h"
12 #include "ecore_mcp.h"
13 #include "mcp_public.h"
16 #include "ecore_init_fw_funcs.h"
17 #include "ecore_sriov.h"
19 #include "ecore_iov_api.h"
20 #include "ecore_gtt_reg_addr.h"
21 #include "ecore_iro.h"
22 #include "ecore_dcbx.h"
23 #include "ecore_sp_commands.h"
24 #include "ecore_cxt.h"
26 #define CHIP_MCP_RESP_ITER_US 10
27 #define EMUL_MCP_RESP_ITER_US (1000 * 1000)
29 #define ECORE_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
30 #define ECORE_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
32 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
33 ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
36 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
37 ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
39 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
40 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
41 OFFSETOF(struct public_drv_mb, _field), _val)
43 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
44 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
45 OFFSETOF(struct public_drv_mb, _field))
47 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
48 DRV_ID_PDA_COMP_VER_OFFSET)
50 #define MCP_BYTES_PER_MBIT_OFFSET 17
54 static int loaded_port[MAX_NUM_PORTS] = { 0 };
57 bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn)
59 if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
64 void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
66 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
68 u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr);
70 p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
72 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
73 "port_addr = 0x%x, port_id 0x%02x\n",
74 p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
77 void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
79 u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
84 if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev))
88 if (!p_hwfn->mcp_info->public_base)
91 for (i = 0; i < length; i++) {
92 tmp = ecore_rd(p_hwfn, p_ptt,
93 p_hwfn->mcp_info->mfw_mb_addr +
94 (i << 2) + sizeof(u32));
96 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
97 OSAL_BE32_TO_CPU(tmp);
101 struct ecore_mcp_cmd_elem {
102 osal_list_entry_t list;
103 struct ecore_mcp_mb_params *p_mb_params;
104 u16 expected_seq_num;
108 /* Must be called while cmd_lock is acquired */
109 static struct ecore_mcp_cmd_elem *
110 ecore_mcp_cmd_add_elem(struct ecore_hwfn *p_hwfn,
111 struct ecore_mcp_mb_params *p_mb_params,
112 u16 expected_seq_num)
114 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
116 p_cmd_elem = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
117 sizeof(*p_cmd_elem));
119 DP_NOTICE(p_hwfn, false,
120 "Failed to allocate `struct ecore_mcp_cmd_elem'\n");
124 p_cmd_elem->p_mb_params = p_mb_params;
125 p_cmd_elem->expected_seq_num = expected_seq_num;
126 OSAL_LIST_PUSH_HEAD(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
131 /* Must be called while cmd_lock is acquired */
132 static void ecore_mcp_cmd_del_elem(struct ecore_hwfn *p_hwfn,
133 struct ecore_mcp_cmd_elem *p_cmd_elem)
135 OSAL_LIST_REMOVE_ENTRY(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
136 OSAL_FREE(p_hwfn->p_dev, p_cmd_elem);
139 /* Must be called while cmd_lock is acquired */
140 static struct ecore_mcp_cmd_elem *
141 ecore_mcp_cmd_get_elem(struct ecore_hwfn *p_hwfn, u16 seq_num)
143 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
145 OSAL_LIST_FOR_EACH_ENTRY(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list,
146 struct ecore_mcp_cmd_elem) {
147 if (p_cmd_elem->expected_seq_num == seq_num)
154 enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
156 if (p_hwfn->mcp_info) {
157 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL, *p_tmp;
159 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
160 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
162 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
163 OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_cmd_elem, p_tmp,
164 &p_hwfn->mcp_info->cmd_list, list,
165 struct ecore_mcp_cmd_elem) {
166 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
168 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
170 #ifdef CONFIG_ECORE_LOCK_ALLOC
171 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->cmd_lock);
172 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->link_lock);
176 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
178 return ECORE_SUCCESS;
181 static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
182 struct ecore_ptt *p_ptt)
184 struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
185 u32 drv_mb_offsize, mfw_mb_offsize;
186 u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
189 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
190 DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n");
191 p_info->public_base = 0;
196 p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
197 if (!p_info->public_base)
200 p_info->public_base |= GRCBASE_MCP;
202 /* Calculate the driver and MFW mailbox address */
203 drv_mb_offsize = ecore_rd(p_hwfn, p_ptt,
204 SECTION_OFFSIZE_ADDR(p_info->public_base,
206 p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
207 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
208 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x"
209 " mcp_pf_id = 0x%x\n",
210 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
212 /* Set the MFW MB address */
213 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
214 SECTION_OFFSIZE_ADDR(p_info->public_base,
216 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
217 p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
218 p_info->mfw_mb_addr);
220 /* Get the current driver mailbox sequence before sending
223 p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
224 DRV_MSG_SEQ_NUMBER_MASK;
226 /* Get current FW pulse sequence */
227 p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
230 p_info->mcp_hist = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
232 return ECORE_SUCCESS;
235 enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
236 struct ecore_ptt *p_ptt)
238 struct ecore_mcp_info *p_info;
241 /* Allocate mcp_info structure */
242 p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
243 sizeof(*p_hwfn->mcp_info));
244 if (!p_hwfn->mcp_info)
246 p_info = p_hwfn->mcp_info;
248 /* Initialize the MFW spinlocks */
249 #ifdef CONFIG_ECORE_LOCK_ALLOC
250 OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock);
251 OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock);
253 OSAL_SPIN_LOCK_INIT(&p_info->cmd_lock);
254 OSAL_SPIN_LOCK_INIT(&p_info->link_lock);
256 OSAL_LIST_INIT(&p_info->cmd_list);
258 if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
259 DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
260 /* Do not free mcp_info here, since public_base indicate that
261 * the MCP is not initialized
263 return ECORE_SUCCESS;
266 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
267 p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
268 p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
269 if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
272 return ECORE_SUCCESS;
275 DP_NOTICE(p_hwfn, true, "Failed to allocate mcp memory\n");
276 ecore_mcp_free(p_hwfn);
280 static void ecore_mcp_reread_offsets(struct ecore_hwfn *p_hwfn,
281 struct ecore_ptt *p_ptt)
283 u32 generic_por_0 = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
285 /* Use MCP history register to check if MCP reset occurred between init
288 if (p_hwfn->mcp_info->mcp_hist != generic_por_0) {
289 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
290 "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
291 p_hwfn->mcp_info->mcp_hist, generic_por_0);
293 ecore_load_mcp_offsets(p_hwfn, p_ptt);
294 ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
298 enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
299 struct ecore_ptt *p_ptt)
301 u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0;
302 enum _ecore_status_t rc = ECORE_SUCCESS;
305 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
306 delay = EMUL_MCP_RESP_ITER_US;
309 if (p_hwfn->mcp_info->b_block_cmd) {
310 DP_NOTICE(p_hwfn, false,
311 "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
312 return ECORE_ABORTED;
315 /* Ensure that only a single thread is accessing the mailbox */
316 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
318 org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
320 /* Set drv command along with the updated sequence */
321 ecore_mcp_reread_offsets(p_hwfn, p_ptt);
322 seq = ++p_hwfn->mcp_info->drv_mb_seq;
323 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
326 /* Wait for MFW response */
328 /* Give the FW up to 500 second (50*1000*10usec) */
329 } while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt,
330 MISCS_REG_GENERIC_POR_0)) &&
331 (cnt++ < ECORE_MCP_RESET_RETRIES));
333 if (org_mcp_reset_seq !=
334 ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
335 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
336 "MCP was reset after %d usec\n", cnt * delay);
338 DP_ERR(p_hwfn, "Failed to reset MCP\n");
342 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
347 /* Must be called while cmd_lock is acquired */
348 static bool ecore_mcp_has_pending_cmd(struct ecore_hwfn *p_hwfn)
350 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
352 /* There is at most one pending command at a certain time, and if it
353 * exists - it is placed at the HEAD of the list.
355 if (!OSAL_LIST_IS_EMPTY(&p_hwfn->mcp_info->cmd_list)) {
356 p_cmd_elem = OSAL_LIST_FIRST_ENTRY(&p_hwfn->mcp_info->cmd_list,
357 struct ecore_mcp_cmd_elem,
359 return !p_cmd_elem->b_is_completed;
365 /* Must be called while cmd_lock is acquired */
366 static enum _ecore_status_t
367 ecore_mcp_update_pending_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
369 struct ecore_mcp_mb_params *p_mb_params;
370 struct ecore_mcp_cmd_elem *p_cmd_elem;
374 mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
375 seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK);
377 /* Return if no new non-handled response has been received */
378 if (seq_num != p_hwfn->mcp_info->drv_mb_seq)
381 p_cmd_elem = ecore_mcp_cmd_get_elem(p_hwfn, seq_num);
384 "Failed to find a pending mailbox cmd that expects sequence number %d\n",
386 return ECORE_UNKNOWN_ERROR;
389 p_mb_params = p_cmd_elem->p_mb_params;
391 /* Get the MFW response along with the sequence number */
392 p_mb_params->mcp_resp = mcp_resp;
394 /* Get the MFW param */
395 p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
397 /* Get the union data */
398 if (p_mb_params->p_data_dst != OSAL_NULL &&
399 p_mb_params->data_dst_size) {
400 u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
401 OFFSETOF(struct public_drv_mb,
403 ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
404 union_data_addr, p_mb_params->data_dst_size);
407 p_cmd_elem->b_is_completed = true;
409 return ECORE_SUCCESS;
412 /* Must be called while cmd_lock is acquired */
413 static void __ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
414 struct ecore_ptt *p_ptt,
415 struct ecore_mcp_mb_params *p_mb_params,
418 union drv_union_data union_data;
421 /* Set the union data */
422 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
423 OFFSETOF(struct public_drv_mb, union_data);
424 OSAL_MEM_ZERO(&union_data, sizeof(union_data));
425 if (p_mb_params->p_data_src != OSAL_NULL && p_mb_params->data_src_size)
426 OSAL_MEMCPY(&union_data, p_mb_params->p_data_src,
427 p_mb_params->data_src_size);
428 ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
431 /* Set the drv param */
432 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param);
434 /* Set the drv command along with the sequence number */
435 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
437 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
438 "MFW mailbox: command 0x%08x param 0x%08x\n",
439 (p_mb_params->cmd | seq_num), p_mb_params->param);
442 static void ecore_mcp_cmd_set_blocking(struct ecore_hwfn *p_hwfn,
445 p_hwfn->mcp_info->b_block_cmd = block_cmd;
447 DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n",
448 block_cmd ? "Block" : "Unblock");
451 void ecore_mcp_print_cpu_info(struct ecore_hwfn *p_hwfn,
452 struct ecore_ptt *p_ptt)
454 u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
456 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
457 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
458 cpu_pc_0 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
459 OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
460 cpu_pc_1 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
461 OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
462 cpu_pc_2 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
464 DP_NOTICE(p_hwfn, false,
465 "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
466 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
469 static enum _ecore_status_t
470 _ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
471 struct ecore_mcp_mb_params *p_mb_params,
472 u32 max_retries, u32 delay)
474 struct ecore_mcp_cmd_elem *p_cmd_elem;
477 enum _ecore_status_t rc = ECORE_SUCCESS;
479 /* Wait until the mailbox is non-occupied */
481 /* Exit the loop if there is no pending command, or if the
482 * pending command is completed during this iteration.
483 * The spinlock stays locked until the command is sent.
486 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
488 if (!ecore_mcp_has_pending_cmd(p_hwfn))
491 rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt);
492 if (rc == ECORE_SUCCESS)
494 else if (rc != ECORE_AGAIN)
497 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
499 OSAL_MFW_CMD_PREEMPT(p_hwfn);
500 } while (++cnt < max_retries);
502 if (cnt >= max_retries) {
503 DP_NOTICE(p_hwfn, false,
504 "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
505 p_mb_params->cmd, p_mb_params->param);
509 /* Send the mailbox command */
510 ecore_mcp_reread_offsets(p_hwfn, p_ptt);
511 seq_num = ++p_hwfn->mcp_info->drv_mb_seq;
512 p_cmd_elem = ecore_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num);
518 __ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num);
519 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
521 /* Wait for the MFW response */
523 /* Exit the loop if the command is already completed, or if the
524 * command is completed during this iteration.
525 * The spinlock stays locked until the list element is removed.
529 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
531 if (p_cmd_elem->b_is_completed)
534 rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt);
535 if (rc == ECORE_SUCCESS)
537 else if (rc != ECORE_AGAIN)
540 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
541 OSAL_MFW_CMD_PREEMPT(p_hwfn);
542 } while (++cnt < max_retries);
544 if (cnt >= max_retries) {
545 DP_NOTICE(p_hwfn, false,
546 "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
547 p_mb_params->cmd, p_mb_params->param);
548 ecore_mcp_print_cpu_info(p_hwfn, p_ptt);
550 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
551 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
552 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
554 ecore_mcp_cmd_set_blocking(p_hwfn, true);
555 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
559 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
560 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
562 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
563 "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
564 p_mb_params->mcp_resp, p_mb_params->mcp_param,
565 (cnt * delay) / 1000, (cnt * delay) % 1000);
567 /* Clear the sequence number from the MFW response */
568 p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
570 return ECORE_SUCCESS;
573 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
577 static enum _ecore_status_t
578 ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
579 struct ecore_ptt *p_ptt,
580 struct ecore_mcp_mb_params *p_mb_params)
582 osal_size_t union_data_size = sizeof(union drv_union_data);
583 u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
584 u32 delay = CHIP_MCP_RESP_ITER_US;
587 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
588 delay = EMUL_MCP_RESP_ITER_US;
589 /* There is a built-in delay of 100usec in each MFW response read */
590 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
594 /* MCP not initialized */
595 if (!ecore_mcp_is_init(p_hwfn)) {
596 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
600 if (p_mb_params->data_src_size > union_data_size ||
601 p_mb_params->data_dst_size > union_data_size) {
603 "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
604 p_mb_params->data_src_size, p_mb_params->data_dst_size,
609 if (p_hwfn->mcp_info->b_block_cmd) {
610 DP_NOTICE(p_hwfn, false,
611 "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
612 p_mb_params->cmd, p_mb_params->param);
613 return ECORE_ABORTED;
616 return _ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
620 enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
621 struct ecore_ptt *p_ptt, u32 cmd, u32 param,
622 u32 *o_mcp_resp, u32 *o_mcp_param)
624 struct ecore_mcp_mb_params mb_params;
625 enum _ecore_status_t rc;
628 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
629 if (cmd == DRV_MSG_CODE_UNLOAD_REQ) {
631 loaded_port[p_hwfn->port_id]--;
632 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n",
635 return ECORE_SUCCESS;
639 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
641 mb_params.param = param;
642 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
643 if (rc != ECORE_SUCCESS)
646 *o_mcp_resp = mb_params.mcp_resp;
647 *o_mcp_param = mb_params.mcp_param;
649 return ECORE_SUCCESS;
652 enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
653 struct ecore_ptt *p_ptt,
658 u32 i_txn_size, u32 *i_buf)
660 struct ecore_mcp_mb_params mb_params;
661 enum _ecore_status_t rc;
663 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
665 mb_params.param = param;
666 mb_params.p_data_src = i_buf;
667 mb_params.data_src_size = (u8)i_txn_size;
668 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
669 if (rc != ECORE_SUCCESS)
672 *o_mcp_resp = mb_params.mcp_resp;
673 *o_mcp_param = mb_params.mcp_param;
675 return ECORE_SUCCESS;
678 enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
679 struct ecore_ptt *p_ptt,
684 u32 *o_txn_size, u32 *o_buf)
686 struct ecore_mcp_mb_params mb_params;
687 u8 raw_data[MCP_DRV_NVM_BUF_LEN];
688 enum _ecore_status_t rc;
690 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
692 mb_params.param = param;
693 mb_params.p_data_dst = raw_data;
695 /* Use the maximal value since the actual one is part of the response */
696 mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
698 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
699 if (rc != ECORE_SUCCESS)
702 *o_mcp_resp = mb_params.mcp_resp;
703 *o_mcp_param = mb_params.mcp_param;
705 *o_txn_size = *o_mcp_param;
707 OSAL_MEMCPY(o_buf, raw_data, RTE_MIN(*o_txn_size, MCP_DRV_NVM_BUF_LEN));
709 return ECORE_SUCCESS;
713 static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
716 static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
719 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
720 else if (!loaded_port[p_hwfn->port_id])
721 load_phase = FW_MSG_CODE_DRV_LOAD_PORT;
723 load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
725 /* On CMT, always tell that it's engine */
726 if (ECORE_IS_CMT(p_hwfn->p_dev))
727 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
729 *p_load_code = load_phase;
731 loaded_port[p_hwfn->port_id]++;
733 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
734 "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n",
735 *p_load_code, loaded, p_hwfn->port_id,
736 loaded_port[p_hwfn->port_id]);
741 ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role,
742 enum ecore_override_force_load override_force_load)
744 bool can_force_load = false;
746 switch (override_force_load) {
747 case ECORE_OVERRIDE_FORCE_LOAD_ALWAYS:
748 can_force_load = true;
750 case ECORE_OVERRIDE_FORCE_LOAD_NEVER:
751 can_force_load = false;
754 can_force_load = (drv_role == DRV_ROLE_OS &&
755 exist_drv_role == DRV_ROLE_PREBOOT) ||
756 (drv_role == DRV_ROLE_KDUMP &&
757 exist_drv_role == DRV_ROLE_OS);
761 return can_force_load;
764 static enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn,
765 struct ecore_ptt *p_ptt)
767 u32 resp = 0, param = 0;
768 enum _ecore_status_t rc;
770 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
772 if (rc != ECORE_SUCCESS)
773 DP_NOTICE(p_hwfn, false,
774 "Failed to send cancel load request, rc = %d\n", rc);
779 #define CONFIG_ECORE_L2_BITMAP_IDX (0x1 << 0)
780 #define CONFIG_ECORE_SRIOV_BITMAP_IDX (0x1 << 1)
781 #define CONFIG_ECORE_ROCE_BITMAP_IDX (0x1 << 2)
782 #define CONFIG_ECORE_IWARP_BITMAP_IDX (0x1 << 3)
783 #define CONFIG_ECORE_FCOE_BITMAP_IDX (0x1 << 4)
784 #define CONFIG_ECORE_ISCSI_BITMAP_IDX (0x1 << 5)
785 #define CONFIG_ECORE_LL2_BITMAP_IDX (0x1 << 6)
787 static u32 ecore_get_config_bitmap(void)
789 u32 config_bitmap = 0x0;
791 #ifdef CONFIG_ECORE_L2
792 config_bitmap |= CONFIG_ECORE_L2_BITMAP_IDX;
794 #ifdef CONFIG_ECORE_SRIOV
795 config_bitmap |= CONFIG_ECORE_SRIOV_BITMAP_IDX;
797 #ifdef CONFIG_ECORE_ROCE
798 config_bitmap |= CONFIG_ECORE_ROCE_BITMAP_IDX;
800 #ifdef CONFIG_ECORE_IWARP
801 config_bitmap |= CONFIG_ECORE_IWARP_BITMAP_IDX;
803 #ifdef CONFIG_ECORE_FCOE
804 config_bitmap |= CONFIG_ECORE_FCOE_BITMAP_IDX;
806 #ifdef CONFIG_ECORE_ISCSI
807 config_bitmap |= CONFIG_ECORE_ISCSI_BITMAP_IDX;
809 #ifdef CONFIG_ECORE_LL2
810 config_bitmap |= CONFIG_ECORE_LL2_BITMAP_IDX;
813 return config_bitmap;
816 struct ecore_load_req_in_params {
818 #define ECORE_LOAD_REQ_HSI_VER_DEFAULT 0
819 #define ECORE_LOAD_REQ_HSI_VER_1 1
826 bool avoid_eng_reset;
829 struct ecore_load_req_out_params {
839 static enum _ecore_status_t
840 __ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
841 struct ecore_load_req_in_params *p_in_params,
842 struct ecore_load_req_out_params *p_out_params)
844 struct ecore_mcp_mb_params mb_params;
845 struct load_req_stc load_req;
846 struct load_rsp_stc load_rsp;
848 enum _ecore_status_t rc;
850 OSAL_MEM_ZERO(&load_req, sizeof(load_req));
851 load_req.drv_ver_0 = p_in_params->drv_ver_0;
852 load_req.drv_ver_1 = p_in_params->drv_ver_1;
853 load_req.fw_ver = p_in_params->fw_ver;
854 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role);
855 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
856 p_in_params->timeout_val);
857 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE, p_in_params->force_cmd);
858 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
859 p_in_params->avoid_eng_reset);
861 hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ?
862 DRV_ID_MCP_HSI_VER_CURRENT :
863 (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_OFFSET);
865 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
866 mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
867 mb_params.param = PDA_COMP | hsi_ver | p_hwfn->p_dev->drv_type;
868 mb_params.p_data_src = &load_req;
869 mb_params.data_src_size = sizeof(load_req);
870 mb_params.p_data_dst = &load_rsp;
871 mb_params.data_dst_size = sizeof(load_rsp);
873 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
874 "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
876 GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
877 GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
878 GET_MFW_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
879 GET_MFW_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
881 if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1)
882 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
883 "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
884 load_req.drv_ver_0, load_req.drv_ver_1,
885 load_req.fw_ver, load_req.misc0,
886 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE),
887 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO),
888 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE),
889 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0));
891 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
892 if (rc != ECORE_SUCCESS) {
893 DP_NOTICE(p_hwfn, false,
894 "Failed to send load request, rc = %d\n", rc);
898 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
899 "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
900 p_out_params->load_code = mb_params.mcp_resp;
902 if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
903 p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
904 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
905 "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
906 load_rsp.drv_ver_0, load_rsp.drv_ver_1,
907 load_rsp.fw_ver, load_rsp.misc0,
908 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
909 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
910 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0));
912 p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
913 p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
914 p_out_params->exist_fw_ver = load_rsp.fw_ver;
915 p_out_params->exist_drv_role =
916 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
917 p_out_params->mfw_hsi_ver =
918 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
919 p_out_params->drv_exists =
920 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
921 LOAD_RSP_FLAGS0_DRV_EXISTS;
924 return ECORE_SUCCESS;
927 static void ecore_get_mfw_drv_role(enum ecore_drv_role drv_role,
931 case ECORE_DRV_ROLE_OS:
932 *p_mfw_drv_role = DRV_ROLE_OS;
934 case ECORE_DRV_ROLE_KDUMP:
935 *p_mfw_drv_role = DRV_ROLE_KDUMP;
940 enum ecore_load_req_force {
941 ECORE_LOAD_REQ_FORCE_NONE,
942 ECORE_LOAD_REQ_FORCE_PF,
943 ECORE_LOAD_REQ_FORCE_ALL,
946 static void ecore_get_mfw_force_cmd(enum ecore_load_req_force force_cmd,
950 case ECORE_LOAD_REQ_FORCE_NONE:
951 *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
953 case ECORE_LOAD_REQ_FORCE_PF:
954 *p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
956 case ECORE_LOAD_REQ_FORCE_ALL:
957 *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
962 enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
963 struct ecore_ptt *p_ptt,
964 struct ecore_load_req_params *p_params)
966 struct ecore_load_req_out_params out_params;
967 struct ecore_load_req_in_params in_params;
968 u8 mfw_drv_role = 0, mfw_force_cmd;
969 enum _ecore_status_t rc;
972 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
973 ecore_mcp_mf_workaround(p_hwfn, &p_params->load_code);
974 return ECORE_SUCCESS;
978 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
979 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT;
980 in_params.drv_ver_0 = ECORE_VERSION;
981 in_params.drv_ver_1 = ecore_get_config_bitmap();
982 in_params.fw_ver = STORM_FW_VERSION;
983 ecore_get_mfw_drv_role(p_params->drv_role, &mfw_drv_role);
984 in_params.drv_role = mfw_drv_role;
985 in_params.timeout_val = p_params->timeout_val;
986 ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_NONE, &mfw_force_cmd);
987 in_params.force_cmd = mfw_force_cmd;
988 in_params.avoid_eng_reset = p_params->avoid_eng_reset;
990 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
991 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
992 if (rc != ECORE_SUCCESS)
995 /* First handle cases where another load request should/might be sent:
996 * - MFW expects the old interface [HSI version = 1]
997 * - MFW responds that a force load request is required
999 if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
1001 "MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n");
1003 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1;
1004 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
1005 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
1007 if (rc != ECORE_SUCCESS)
1009 } else if (out_params.load_code ==
1010 FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
1011 if (ecore_mcp_can_force_load(in_params.drv_role,
1012 out_params.exist_drv_role,
1013 p_params->override_force_load)) {
1015 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, 0x%08x_%08x}, existing={%d, 0x%08x, 0x%08x_%08x}]\n",
1016 in_params.drv_role, in_params.fw_ver,
1017 in_params.drv_ver_0, in_params.drv_ver_1,
1018 out_params.exist_drv_role,
1019 out_params.exist_fw_ver,
1020 out_params.exist_drv_ver_0,
1021 out_params.exist_drv_ver_1);
1023 ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_ALL,
1026 in_params.force_cmd = mfw_force_cmd;
1027 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
1028 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
1030 if (rc != ECORE_SUCCESS)
1033 DP_NOTICE(p_hwfn, false,
1034 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
1035 in_params.drv_role, in_params.fw_ver,
1036 in_params.drv_ver_0, in_params.drv_ver_1,
1037 out_params.exist_drv_role,
1038 out_params.exist_fw_ver,
1039 out_params.exist_drv_ver_0,
1040 out_params.exist_drv_ver_1);
1042 ecore_mcp_cancel_load_req(p_hwfn, p_ptt);
1047 /* Now handle the other types of responses.
1048 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
1049 * expected here after the additional revised load requests were sent.
1051 switch (out_params.load_code) {
1052 case FW_MSG_CODE_DRV_LOAD_ENGINE:
1053 case FW_MSG_CODE_DRV_LOAD_PORT:
1054 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
1055 if (out_params.mfw_hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
1056 out_params.drv_exists) {
1057 /* The role and fw/driver version match, but the PF is
1058 * already loaded and has not been unloaded gracefully.
1059 * This is unexpected since a quasi-FLR request was
1060 * previously sent as part of ecore_hw_prepare().
1062 DP_NOTICE(p_hwfn, false,
1063 "PF is already loaded - shouldn't have got here since a quasi-FLR request was previously sent!\n");
1068 DP_NOTICE(p_hwfn, false,
1069 "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
1070 out_params.load_code);
1074 p_params->load_code = out_params.load_code;
1076 return ECORE_SUCCESS;
1079 enum _ecore_status_t ecore_mcp_load_done(struct ecore_hwfn *p_hwfn,
1080 struct ecore_ptt *p_ptt)
1082 u32 resp = 0, param = 0;
1083 enum _ecore_status_t rc;
1085 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp,
1087 if (rc != ECORE_SUCCESS) {
1088 DP_NOTICE(p_hwfn, false,
1089 "Failed to send a LOAD_DONE command, rc = %d\n", rc);
1093 /* Check if there is a DID mismatch between nvm-cfg/efuse */
1094 if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
1095 DP_NOTICE(p_hwfn, false,
1096 "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
1098 return ECORE_SUCCESS;
1101 enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn,
1102 struct ecore_ptt *p_ptt)
1104 u32 wol_param, mcp_resp, mcp_param;
1107 wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
1109 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param,
1110 &mcp_resp, &mcp_param);
1113 enum _ecore_status_t ecore_mcp_unload_done(struct ecore_hwfn *p_hwfn,
1114 struct ecore_ptt *p_ptt)
1116 struct ecore_mcp_mb_params mb_params;
1117 struct mcp_mac wol_mac;
1119 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1120 mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
1122 return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1125 static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
1126 struct ecore_ptt *p_ptt)
1128 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1130 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1131 u32 path_addr = SECTION_ADDR(mfw_path_offsize,
1132 ECORE_PATH_ID(p_hwfn));
1133 u32 disabled_vfs[VF_MAX_STATIC / 32];
1136 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1137 "Reading Disabled VF information from [offset %08x],"
1138 " path_addr %08x\n",
1139 mfw_path_offsize, path_addr);
1141 for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
1142 disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,
1144 OFFSETOF(struct public_path,
1147 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
1148 "FLR-ed VFs [%08x,...,%08x] - %08x\n",
1149 i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
1152 if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs))
1153 OSAL_VF_FLR_UPDATE(p_hwfn);
1156 enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
1157 struct ecore_ptt *p_ptt,
1160 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1162 u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1163 u32 func_addr = SECTION_ADDR(mfw_func_offsize,
1165 struct ecore_mcp_mb_params mb_params;
1166 enum _ecore_status_t rc;
1169 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1170 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
1171 "Acking VFs [%08x,...,%08x] - %08x\n",
1172 i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
1174 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1175 mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
1176 mb_params.p_data_src = vfs_to_ack;
1177 mb_params.data_src_size = VF_MAX_STATIC / 8;
1178 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt,
1180 if (rc != ECORE_SUCCESS) {
1181 DP_NOTICE(p_hwfn, false,
1182 "Failed to pass ACK for VF flr to MFW\n");
1183 return ECORE_TIMEOUT;
1186 /* TMP - clear the ACK bits; should be done by MFW */
1187 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1188 ecore_wr(p_hwfn, p_ptt,
1190 OFFSETOF(struct public_func, drv_ack_vf_disabled) +
1191 i * sizeof(u32), 0);
1196 static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
1197 struct ecore_ptt *p_ptt)
1199 u32 transceiver_state;
1201 transceiver_state = ecore_rd(p_hwfn, p_ptt,
1202 p_hwfn->mcp_info->port_addr +
1203 OFFSETOF(struct public_port,
1206 DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP),
1207 "Received transceiver state update [0x%08x] from mfw"
1209 transceiver_state, (u32)(p_hwfn->mcp_info->port_addr +
1210 OFFSETOF(struct public_port,
1211 transceiver_data)));
1213 transceiver_state = GET_MFW_FIELD(transceiver_state,
1214 ETH_TRANSCEIVER_STATE);
1216 if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
1217 DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
1219 DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
1221 OSAL_TRANSCEIVER_UPDATE(p_hwfn);
1224 static void ecore_mcp_read_eee_config(struct ecore_hwfn *p_hwfn,
1225 struct ecore_ptt *p_ptt,
1226 struct ecore_mcp_link_state *p_link)
1228 u32 eee_status, val;
1230 p_link->eee_adv_caps = 0;
1231 p_link->eee_lp_adv_caps = 0;
1232 eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1233 OFFSETOF(struct public_port, eee_status));
1234 p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT);
1235 val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET;
1236 if (val & EEE_1G_ADV)
1237 p_link->eee_adv_caps |= ECORE_EEE_1G_ADV;
1238 if (val & EEE_10G_ADV)
1239 p_link->eee_adv_caps |= ECORE_EEE_10G_ADV;
1240 val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET;
1241 if (val & EEE_1G_ADV)
1242 p_link->eee_lp_adv_caps |= ECORE_EEE_1G_ADV;
1243 if (val & EEE_10G_ADV)
1244 p_link->eee_lp_adv_caps |= ECORE_EEE_10G_ADV;
1247 static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
1248 struct ecore_ptt *p_ptt,
1249 struct public_func *p_data,
1252 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1254 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1255 u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1258 OSAL_MEM_ZERO(p_data, sizeof(*p_data));
1260 size = OSAL_MIN_T(u32, sizeof(*p_data),
1261 SECTION_SIZE(mfw_path_offsize));
1262 for (i = 0; i < size / sizeof(u32); i++)
1263 ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
1264 func_addr + (i << 2));
1269 static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
1270 struct ecore_ptt *p_ptt,
1273 struct ecore_mcp_link_state *p_link;
1277 /* Prevent SW/attentions from doing this at the same time */
1278 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->link_lock);
1280 p_link = &p_hwfn->mcp_info->link_output;
1281 OSAL_MEMSET(p_link, 0, sizeof(*p_link));
1283 status = ecore_rd(p_hwfn, p_ptt,
1284 p_hwfn->mcp_info->port_addr +
1285 OFFSETOF(struct public_port, link_status));
1286 DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP),
1287 "Received link update [0x%08x] from mfw"
1289 status, (u32)(p_hwfn->mcp_info->port_addr +
1290 OFFSETOF(struct public_port,
1293 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1294 "Resetting link indications\n");
1298 if (p_hwfn->b_drv_link_init) {
1299 /* Link indication with modern MFW arrives as per-PF
1302 if (p_hwfn->mcp_info->capabilities &
1303 FW_MB_PARAM_FEATURE_SUPPORT_VLINK) {
1304 struct public_func shmem_info;
1306 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1308 p_link->link_up = !!(shmem_info.status &
1309 FUNC_STATUS_VIRTUAL_LINK_UP);
1311 p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1314 p_link->link_up = false;
1317 p_link->full_duplex = true;
1318 switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
1319 case LINK_STATUS_SPEED_AND_DUPLEX_100G:
1320 p_link->speed = 100000;
1322 case LINK_STATUS_SPEED_AND_DUPLEX_50G:
1323 p_link->speed = 50000;
1325 case LINK_STATUS_SPEED_AND_DUPLEX_40G:
1326 p_link->speed = 40000;
1328 case LINK_STATUS_SPEED_AND_DUPLEX_25G:
1329 p_link->speed = 25000;
1331 case LINK_STATUS_SPEED_AND_DUPLEX_20G:
1332 p_link->speed = 20000;
1334 case LINK_STATUS_SPEED_AND_DUPLEX_10G:
1335 p_link->speed = 10000;
1337 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
1338 p_link->full_duplex = false;
1340 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
1341 p_link->speed = 1000;
1347 /* We never store total line speed as p_link->speed is
1348 * again changes according to bandwidth allocation.
1350 if (p_link->link_up && p_link->speed)
1351 p_link->line_speed = p_link->speed;
1353 p_link->line_speed = 0;
1355 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
1356 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
1358 /* Max bandwidth configuration */
1359 __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
1362 /* Min bandwidth configuration */
1363 __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
1365 ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev, p_ptt,
1366 p_link->min_pf_rate);
1368 p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
1369 p_link->an_complete = !!(status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
1370 p_link->parallel_detection = !!(status &
1371 LINK_STATUS_PARALLEL_DETECTION_USED);
1372 p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
1374 p_link->partner_adv_speed |=
1375 (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
1376 ECORE_LINK_PARTNER_SPEED_1G_FD : 0;
1377 p_link->partner_adv_speed |=
1378 (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
1379 ECORE_LINK_PARTNER_SPEED_1G_HD : 0;
1380 p_link->partner_adv_speed |=
1381 (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
1382 ECORE_LINK_PARTNER_SPEED_10G : 0;
1383 p_link->partner_adv_speed |=
1384 (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
1385 ECORE_LINK_PARTNER_SPEED_20G : 0;
1386 p_link->partner_adv_speed |=
1387 (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
1388 ECORE_LINK_PARTNER_SPEED_25G : 0;
1389 p_link->partner_adv_speed |=
1390 (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
1391 ECORE_LINK_PARTNER_SPEED_40G : 0;
1392 p_link->partner_adv_speed |=
1393 (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
1394 ECORE_LINK_PARTNER_SPEED_50G : 0;
1395 p_link->partner_adv_speed |=
1396 (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
1397 ECORE_LINK_PARTNER_SPEED_100G : 0;
1399 p_link->partner_tx_flow_ctrl_en =
1400 !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
1401 p_link->partner_rx_flow_ctrl_en =
1402 !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
1404 switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
1405 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
1406 p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE;
1408 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
1409 p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE;
1411 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
1412 p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE;
1415 p_link->partner_adv_pause = 0;
1418 p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
1420 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
1421 ecore_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
1423 OSAL_LINK_UPDATE(p_hwfn);
1425 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->link_lock);
1428 enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
1429 struct ecore_ptt *p_ptt, bool b_up)
1431 struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
1432 struct ecore_mcp_mb_params mb_params;
1433 struct eth_phy_cfg phy_cfg;
1434 enum _ecore_status_t rc = ECORE_SUCCESS;
1438 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
1439 return ECORE_SUCCESS;
1442 /* Set the shmem configuration according to params */
1443 OSAL_MEM_ZERO(&phy_cfg, sizeof(phy_cfg));
1444 cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
1445 if (!params->speed.autoneg)
1446 phy_cfg.speed = params->speed.forced_speed;
1447 phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
1448 phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
1449 phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1450 phy_cfg.adv_speed = params->speed.advertised_speeds;
1451 phy_cfg.loopback_mode = params->loopback_mode;
1453 /* There are MFWs that share this capability regardless of whether
1454 * this is feasible or not. And given that at the very least adv_caps
1455 * would be set internally by ecore, we want to make sure LFA would
1458 if ((p_hwfn->mcp_info->capabilities &
1459 FW_MB_PARAM_FEATURE_SUPPORT_EEE) &&
1460 params->eee.enable) {
1461 phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
1462 if (params->eee.tx_lpi_enable)
1463 phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
1464 if (params->eee.adv_caps & ECORE_EEE_1G_ADV)
1465 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G;
1466 if (params->eee.adv_caps & ECORE_EEE_10G_ADV)
1467 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G;
1468 phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer <<
1469 EEE_TX_TIMER_USEC_OFFSET) &
1470 EEE_TX_TIMER_USEC_MASK;
1473 p_hwfn->b_drv_link_init = b_up;
1476 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1477 "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x\n",
1478 phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed,
1479 phy_cfg.loopback_mode);
1481 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
1483 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1484 mb_params.cmd = cmd;
1485 mb_params.p_data_src = &phy_cfg;
1486 mb_params.data_src_size = sizeof(phy_cfg);
1487 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1489 /* if mcp fails to respond we must abort */
1490 if (rc != ECORE_SUCCESS) {
1491 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1495 /* Mimic link-change attention, done for several reasons:
1496 * - On reset, there's no guarantee MFW would trigger
1498 * - On initialization, older MFWs might not indicate link change
1499 * during LFA, so we'll never get an UP indication.
1501 ecore_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
1503 return ECORE_SUCCESS;
1506 u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
1507 struct ecore_ptt *p_ptt)
1509 u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
1511 /* TODO - Add support for VFs */
1512 if (IS_VF(p_hwfn->p_dev))
1515 path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1517 path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr);
1518 path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn));
1520 proc_kill_cnt = ecore_rd(p_hwfn, p_ptt,
1522 OFFSETOF(struct public_path, process_kill)) &
1523 PROCESS_KILL_COUNTER_MASK;
1525 return proc_kill_cnt;
1528 static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn,
1529 struct ecore_ptt *p_ptt)
1531 struct ecore_dev *p_dev = p_hwfn->p_dev;
1534 /* Prevent possible attentions/interrupts during the recovery handling
1535 * and till its load phase, during which they will be re-enabled.
1537 ecore_int_igu_disable_int(p_hwfn, p_ptt);
1539 DP_NOTICE(p_hwfn, false, "Received a process kill indication\n");
1541 /* The following operations should be done once, and thus in CMT mode
1542 * are carried out by only the first HW function.
1544 if (p_hwfn != ECORE_LEADING_HWFN(p_dev))
1547 if (p_dev->recov_in_prog) {
1548 DP_NOTICE(p_hwfn, false,
1549 "Ignoring the indication since a recovery"
1550 " process is already in progress\n");
1554 p_dev->recov_in_prog = true;
1556 proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt);
1557 DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt);
1559 OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn);
1562 static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
1563 struct ecore_ptt *p_ptt,
1564 enum MFW_DRV_MSG_TYPE type)
1566 enum ecore_mcp_protocol_type stats_type;
1567 union ecore_mcp_protocol_stats stats;
1568 struct ecore_mcp_mb_params mb_params;
1570 enum _ecore_status_t rc;
1573 case MFW_DRV_MSG_GET_LAN_STATS:
1574 stats_type = ECORE_MCP_LAN_STATS;
1575 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1578 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1579 "Invalid protocol type %d\n", type);
1583 OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats);
1585 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1586 mb_params.cmd = DRV_MSG_CODE_GET_STATS;
1587 mb_params.param = hsi_param;
1588 mb_params.p_data_src = &stats;
1589 mb_params.data_src_size = sizeof(stats);
1590 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1591 if (rc != ECORE_SUCCESS)
1592 DP_ERR(p_hwfn, "Failed to send protocol stats, rc = %d\n", rc);
1595 static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
1596 struct public_func *p_shmem_info)
1598 struct ecore_mcp_function_info *p_info;
1600 p_info = &p_hwfn->mcp_info->func_info;
1602 /* TODO - bandwidth min/max should have valid values of 1-100,
1603 * as well as some indication that the feature is disabled.
1604 * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS
1605 * limit and correct value to min `1' and max `100' if limit isn't in
1608 p_info->bandwidth_min = (p_shmem_info->config &
1609 FUNC_MF_CFG_MIN_BW_MASK) >>
1610 FUNC_MF_CFG_MIN_BW_OFFSET;
1611 if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1613 "bandwidth minimum out of bounds [%02x]. Set to 1\n",
1614 p_info->bandwidth_min);
1615 p_info->bandwidth_min = 1;
1618 p_info->bandwidth_max = (p_shmem_info->config &
1619 FUNC_MF_CFG_MAX_BW_MASK) >>
1620 FUNC_MF_CFG_MAX_BW_OFFSET;
1621 if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1623 "bandwidth maximum out of bounds [%02x]. Set to 100\n",
1624 p_info->bandwidth_max);
1625 p_info->bandwidth_max = 100;
1630 ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1632 struct ecore_mcp_function_info *p_info;
1633 struct public_func shmem_info;
1634 u32 resp = 0, param = 0;
1636 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1638 ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1640 p_info = &p_hwfn->mcp_info->func_info;
1642 ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min);
1644 ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max);
1646 /* Acknowledge the MFW */
1647 ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1651 static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn)
1653 /* A single notification should be sent to upper driver in CMT mode */
1654 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1657 DP_NOTICE(p_hwfn, false,
1658 "Fan failure was detected on the network interface card"
1659 " and it's going to be shut down.\n");
1661 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
1664 struct ecore_mdump_cmd_params {
1673 static enum _ecore_status_t
1674 ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1675 struct ecore_mdump_cmd_params *p_mdump_cmd_params)
1677 struct ecore_mcp_mb_params mb_params;
1678 enum _ecore_status_t rc;
1680 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1681 mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
1682 mb_params.param = p_mdump_cmd_params->cmd;
1683 mb_params.p_data_src = p_mdump_cmd_params->p_data_src;
1684 mb_params.data_src_size = p_mdump_cmd_params->data_src_size;
1685 mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst;
1686 mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size;
1687 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1688 if (rc != ECORE_SUCCESS)
1691 p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
1693 if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
1695 "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n",
1696 p_mdump_cmd_params->cmd);
1698 } else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
1700 "The mdump command is not supported by the MFW\n");
1707 static enum _ecore_status_t ecore_mcp_mdump_ack(struct ecore_hwfn *p_hwfn,
1708 struct ecore_ptt *p_ptt)
1710 struct ecore_mdump_cmd_params mdump_cmd_params;
1712 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1713 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK;
1715 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1718 enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn,
1719 struct ecore_ptt *p_ptt,
1722 struct ecore_mdump_cmd_params mdump_cmd_params;
1724 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1725 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_SET_VALUES;
1726 mdump_cmd_params.p_data_src = &epoch;
1727 mdump_cmd_params.data_src_size = sizeof(epoch);
1729 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1732 enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
1733 struct ecore_ptt *p_ptt)
1735 struct ecore_mdump_cmd_params mdump_cmd_params;
1737 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1738 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_TRIGGER;
1740 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1743 static enum _ecore_status_t
1744 ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1745 struct mdump_config_stc *p_mdump_config)
1747 struct ecore_mdump_cmd_params mdump_cmd_params;
1748 enum _ecore_status_t rc;
1750 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1751 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_CONFIG;
1752 mdump_cmd_params.p_data_dst = p_mdump_config;
1753 mdump_cmd_params.data_dst_size = sizeof(*p_mdump_config);
1755 rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1756 if (rc != ECORE_SUCCESS)
1759 if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1761 "Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n",
1762 mdump_cmd_params.mcp_resp);
1763 rc = ECORE_UNKNOWN_ERROR;
1769 enum _ecore_status_t
1770 ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1771 struct ecore_mdump_info *p_mdump_info)
1773 u32 addr, global_offsize, global_addr;
1774 struct mdump_config_stc mdump_config;
1775 enum _ecore_status_t rc;
1777 OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info));
1779 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1781 global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1782 global_addr = SECTION_ADDR(global_offsize, 0);
1783 p_mdump_info->reason = ecore_rd(p_hwfn, p_ptt,
1785 OFFSETOF(struct public_global,
1788 if (p_mdump_info->reason) {
1789 rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config);
1790 if (rc != ECORE_SUCCESS)
1793 p_mdump_info->version = mdump_config.version;
1794 p_mdump_info->config = mdump_config.config;
1795 p_mdump_info->epoch = mdump_config.epoc;
1796 p_mdump_info->num_of_logs = mdump_config.num_of_logs;
1797 p_mdump_info->valid_logs = mdump_config.valid_logs;
1799 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1800 "MFW mdump info: reason %d, version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n",
1801 p_mdump_info->reason, p_mdump_info->version,
1802 p_mdump_info->config, p_mdump_info->epoch,
1803 p_mdump_info->num_of_logs, p_mdump_info->valid_logs);
1805 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1806 "MFW mdump info: reason %d\n", p_mdump_info->reason);
1809 return ECORE_SUCCESS;
1812 enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
1813 struct ecore_ptt *p_ptt)
1815 struct ecore_mdump_cmd_params mdump_cmd_params;
1817 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1818 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLEAR_LOGS;
1820 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1823 enum _ecore_status_t
1824 ecore_mcp_mdump_get_retain(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1825 struct ecore_mdump_retain_data *p_mdump_retain)
1827 struct ecore_mdump_cmd_params mdump_cmd_params;
1828 struct mdump_retain_data_stc mfw_mdump_retain;
1829 enum _ecore_status_t rc;
1831 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1832 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN;
1833 mdump_cmd_params.p_data_dst = &mfw_mdump_retain;
1834 mdump_cmd_params.data_dst_size = sizeof(mfw_mdump_retain);
1836 rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1837 if (rc != ECORE_SUCCESS)
1840 if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1842 "Failed to get the mdump retained data [mcp_resp 0x%x]\n",
1843 mdump_cmd_params.mcp_resp);
1844 return ECORE_UNKNOWN_ERROR;
1847 p_mdump_retain->valid = mfw_mdump_retain.valid;
1848 p_mdump_retain->epoch = mfw_mdump_retain.epoch;
1849 p_mdump_retain->pf = mfw_mdump_retain.pf;
1850 p_mdump_retain->status = mfw_mdump_retain.status;
1852 return ECORE_SUCCESS;
1855 enum _ecore_status_t ecore_mcp_mdump_clr_retain(struct ecore_hwfn *p_hwfn,
1856 struct ecore_ptt *p_ptt)
1858 struct ecore_mdump_cmd_params mdump_cmd_params;
1860 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1861 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLR_RETAIN;
1863 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1866 static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
1867 struct ecore_ptt *p_ptt)
1869 struct ecore_mdump_retain_data mdump_retain;
1870 enum _ecore_status_t rc;
1872 /* In CMT mode - no need for more than a single acknowledgment to the
1873 * MFW, and no more than a single notification to the upper driver.
1875 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1878 rc = ecore_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain);
1879 if (rc == ECORE_SUCCESS && mdump_retain.valid) {
1880 DP_NOTICE(p_hwfn, false,
1881 "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n",
1882 mdump_retain.epoch, mdump_retain.pf,
1883 mdump_retain.status);
1885 DP_NOTICE(p_hwfn, false,
1886 "The MFW notified that a critical error occurred in the device\n");
1889 if (p_hwfn->p_dev->allow_mdump) {
1890 DP_NOTICE(p_hwfn, false,
1891 "Not acknowledging the notification to allow the MFW crash dump\n");
1895 DP_NOTICE(p_hwfn, false,
1896 "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n");
1897 ecore_mcp_mdump_ack(p_hwfn, p_ptt);
1898 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
1902 ecore_mcp_read_ufp_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1904 struct public_func shmem_info;
1907 if (!OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits))
1910 OSAL_MEMSET(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info));
1911 port_cfg = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1912 OFFSETOF(struct public_port, oem_cfg_port));
1913 val = GET_MFW_FIELD(port_cfg, OEM_CFG_CHANNEL_TYPE);
1914 if (val != OEM_CFG_CHANNEL_TYPE_STAGGED)
1915 DP_NOTICE(p_hwfn, false, "Incorrect UFP Channel type %d\n",
1918 val = GET_MFW_FIELD(port_cfg, OEM_CFG_SCHED_TYPE);
1919 if (val == OEM_CFG_SCHED_TYPE_ETS)
1920 p_hwfn->ufp_info.mode = ECORE_UFP_MODE_ETS;
1921 else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW)
1922 p_hwfn->ufp_info.mode = ECORE_UFP_MODE_VNIC_BW;
1924 DP_NOTICE(p_hwfn, false, "Unknown UFP scheduling mode %d\n",
1927 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1929 val = GET_MFW_FIELD(shmem_info.oem_cfg_func, OEM_CFG_FUNC_TC);
1930 p_hwfn->ufp_info.tc = (u8)val;
1931 val = GET_MFW_FIELD(shmem_info.oem_cfg_func,
1932 OEM_CFG_FUNC_HOST_PRI_CTRL);
1933 if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC)
1934 p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_VNIC;
1935 else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS)
1936 p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_OS;
1938 DP_NOTICE(p_hwfn, false, "Unknown Host priority control %d\n",
1941 DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
1942 "UFP shmem config: mode = %d tc = %d pri_type = %d\n",
1943 p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc,
1944 p_hwfn->ufp_info.pri_type);
1947 static enum _ecore_status_t
1948 ecore_mcp_handle_ufp_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1950 ecore_mcp_read_ufp_config(p_hwfn, p_ptt);
1952 if (p_hwfn->ufp_info.mode == ECORE_UFP_MODE_VNIC_BW) {
1953 p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc;
1954 p_hwfn->hw_info.offload_tc = p_hwfn->ufp_info.tc;
1956 ecore_qm_reconf(p_hwfn, p_ptt);
1958 /* Merge UFP TC with the dcbx TC data */
1959 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1960 ECORE_DCBX_OPERATIONAL_MIB);
1963 /* update storm FW with negotiation results */
1964 ecore_sp_pf_update_ufp(p_hwfn);
1966 return ECORE_SUCCESS;
1969 enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
1970 struct ecore_ptt *p_ptt)
1972 struct ecore_mcp_info *info = p_hwfn->mcp_info;
1973 enum _ecore_status_t rc = ECORE_SUCCESS;
1977 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n");
1979 /* Read Messages from MFW */
1980 ecore_mcp_read_mb(p_hwfn, p_ptt);
1982 /* Compare current messages to old ones */
1983 for (i = 0; i < info->mfw_mb_length; i++) {
1984 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
1989 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1990 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
1991 i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
1994 case MFW_DRV_MSG_LINK_CHANGE:
1995 ecore_mcp_handle_link_change(p_hwfn, p_ptt, false);
1997 case MFW_DRV_MSG_VF_DISABLED:
1998 ecore_mcp_handle_vf_flr(p_hwfn, p_ptt);
2000 case MFW_DRV_MSG_LLDP_DATA_UPDATED:
2001 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
2002 ECORE_DCBX_REMOTE_LLDP_MIB);
2004 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
2005 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
2006 ECORE_DCBX_REMOTE_MIB);
2008 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
2009 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
2010 ECORE_DCBX_OPERATIONAL_MIB);
2011 /* clear the user-config cache */
2012 OSAL_MEMSET(&p_hwfn->p_dcbx_info->set, 0,
2013 sizeof(struct ecore_dcbx_set));
2015 case MFW_DRV_MSG_LLDP_RECEIVED_TLVS_UPDATED:
2016 ecore_lldp_mib_update_event(p_hwfn, p_ptt);
2018 case MFW_DRV_MSG_OEM_CFG_UPDATE:
2019 ecore_mcp_handle_ufp_event(p_hwfn, p_ptt);
2021 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
2022 ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
2024 case MFW_DRV_MSG_ERROR_RECOVERY:
2025 ecore_mcp_handle_process_kill(p_hwfn, p_ptt);
2027 case MFW_DRV_MSG_GET_LAN_STATS:
2028 case MFW_DRV_MSG_GET_FCOE_STATS:
2029 case MFW_DRV_MSG_GET_ISCSI_STATS:
2030 case MFW_DRV_MSG_GET_RDMA_STATS:
2031 ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
2033 case MFW_DRV_MSG_BW_UPDATE:
2034 ecore_mcp_update_bw(p_hwfn, p_ptt);
2036 case MFW_DRV_MSG_FAILURE_DETECTED:
2037 ecore_mcp_handle_fan_failure(p_hwfn);
2039 case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
2040 ecore_mcp_handle_critical_error(p_hwfn, p_ptt);
2043 DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
2048 /* ACK everything */
2049 for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
2050 OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]);
2052 /* MFW expect answer in BE, so we force write in that format */
2053 ecore_wr(p_hwfn, p_ptt,
2054 info->mfw_mb_addr + sizeof(u32) +
2055 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
2056 sizeof(u32) + i * sizeof(u32), val);
2060 DP_NOTICE(p_hwfn, false,
2061 "Received an MFW message indication but no"
2066 /* Copy the new mfw messages into the shadow */
2067 OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
2072 enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
2073 struct ecore_ptt *p_ptt,
2075 u32 *p_running_bundle_id)
2080 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
2081 DP_NOTICE(p_hwfn, false, "Emulation - can't get MFW version\n");
2082 return ECORE_SUCCESS;
2086 if (IS_VF(p_hwfn->p_dev)) {
2087 if (p_hwfn->vf_iov_info) {
2088 struct pfvf_acquire_resp_tlv *p_resp;
2090 p_resp = &p_hwfn->vf_iov_info->acquire_resp;
2091 *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
2092 return ECORE_SUCCESS;
2094 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2095 "VF requested MFW version prior to ACQUIRE\n");
2100 global_offsize = ecore_rd(p_hwfn, p_ptt,
2101 SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
2105 ecore_rd(p_hwfn, p_ptt,
2106 SECTION_ADDR(global_offsize,
2107 0) + OFFSETOF(struct public_global, mfw_ver));
2109 if (p_running_bundle_id != OSAL_NULL) {
2110 *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt,
2111 SECTION_ADDR(global_offsize,
2113 OFFSETOF(struct public_global,
2114 running_bundle_id));
2117 return ECORE_SUCCESS;
2120 enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,
2121 struct ecore_ptt *p_ptt,
2125 /* TODO - Add support for VFs */
2126 if (IS_VF(p_hwfn->p_dev))
2129 if (!ecore_mcp_is_init(p_hwfn)) {
2130 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
2135 *p_media_type = MEDIA_UNSPECIFIED;
2138 *p_media_type = ecore_rd(p_hwfn, p_ptt,
2139 p_hwfn->mcp_info->port_addr +
2140 OFFSETOF(struct public_port,
2144 return ECORE_SUCCESS;
2148 /* Old MFW has a global configuration for all PFs regarding RDMA support */
2150 ecore_mcp_get_shmem_proto_legacy(struct ecore_hwfn *p_hwfn,
2151 enum ecore_pci_personality *p_proto)
2153 *p_proto = ECORE_PCI_ETH;
2155 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2156 "According to Legacy capabilities, L2 personality is %08x\n",
2161 static enum _ecore_status_t
2162 ecore_mcp_get_shmem_proto_mfw(struct ecore_hwfn *p_hwfn,
2163 struct ecore_ptt *p_ptt,
2164 enum ecore_pci_personality *p_proto)
2166 u32 resp = 0, param = 0;
2167 enum _ecore_status_t rc;
2169 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2170 "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
2171 (u32)*p_proto, resp, param);
2172 return ECORE_SUCCESS;
2175 static enum _ecore_status_t
2176 ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn,
2177 struct public_func *p_info,
2178 struct ecore_ptt *p_ptt,
2179 enum ecore_pci_personality *p_proto)
2181 enum _ecore_status_t rc = ECORE_SUCCESS;
2183 switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
2184 case FUNC_MF_CFG_PROTOCOL_ETHERNET:
2185 if (ecore_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto) !=
2187 ecore_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
2196 enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
2197 struct ecore_ptt *p_ptt)
2199 struct ecore_mcp_function_info *info;
2200 struct public_func shmem_info;
2202 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
2203 info = &p_hwfn->mcp_info->func_info;
2205 info->pause_on_host = (shmem_info.config &
2206 FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
2208 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2210 DP_ERR(p_hwfn, "Unknown personality %08x\n",
2211 (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
2215 ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
2217 if (shmem_info.mac_upper || shmem_info.mac_lower) {
2218 info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
2219 info->mac[1] = (u8)(shmem_info.mac_upper);
2220 info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
2221 info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
2222 info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
2223 info->mac[5] = (u8)(shmem_info.mac_lower);
2225 /* TODO - are there protocols for which there's no MAC? */
2226 DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n");
2229 /* TODO - are these calculations true for BE machine? */
2230 info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
2231 (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
2232 info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
2233 (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
2235 info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
2237 info->mtu = (u16)shmem_info.mtu_size;
2242 info->mtu = (u16)shmem_info.mtu_size;
2244 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP),
2245 "Read configuration from shmem: pause_on_host %02x"
2246 " protocol %02x BW [%02x - %02x]"
2247 " MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %lx"
2248 " node %lx ovlan %04x\n",
2249 info->pause_on_host, info->protocol,
2250 info->bandwidth_min, info->bandwidth_max,
2251 info->mac[0], info->mac[1], info->mac[2],
2252 info->mac[3], info->mac[4], info->mac[5],
2253 (unsigned long)info->wwn_port,
2254 (unsigned long)info->wwn_node, info->ovlan);
2256 return ECORE_SUCCESS;
2259 struct ecore_mcp_link_params
2260 *ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn)
2262 if (!p_hwfn || !p_hwfn->mcp_info)
2264 return &p_hwfn->mcp_info->link_input;
2267 struct ecore_mcp_link_state
2268 *ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn)
2270 if (!p_hwfn || !p_hwfn->mcp_info)
2274 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
2275 DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n");
2276 p_hwfn->mcp_info->link_output.link_up = true;
2280 return &p_hwfn->mcp_info->link_output;
2283 struct ecore_mcp_link_capabilities
2284 *ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn)
2286 if (!p_hwfn || !p_hwfn->mcp_info)
2288 return &p_hwfn->mcp_info->link_capabilities;
2291 enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
2292 struct ecore_ptt *p_ptt)
2294 u32 resp = 0, param = 0;
2295 enum _ecore_status_t rc;
2297 rc = ecore_mcp_cmd(p_hwfn, p_ptt,
2298 DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, ¶m);
2300 /* Wait for the drain to complete before returning */
2306 const struct ecore_mcp_function_info
2307 *ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn)
2309 if (!p_hwfn || !p_hwfn->mcp_info)
2311 return &p_hwfn->mcp_info->func_info;
2314 int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
2315 struct ecore_ptt *p_ptt, u32 personalities)
2317 enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT;
2318 struct public_func shmem_info;
2319 int i, count = 0, num_pfs;
2321 num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
2323 for (i = 0; i < num_pfs; i++) {
2324 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
2325 MCP_PF_ID_BY_REL(p_hwfn, i));
2326 if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
2329 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2334 if ((1 << ((u32)protocol)) & personalities)
2341 enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
2342 struct ecore_ptt *p_ptt,
2348 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
2349 DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n");
2354 if (IS_VF(p_hwfn->p_dev))
2357 flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
2358 flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
2359 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
2360 flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_OFFSET));
2362 *p_flash_size = flash_size;
2364 return ECORE_SUCCESS;
2367 enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
2368 struct ecore_ptt *p_ptt)
2370 struct ecore_dev *p_dev = p_hwfn->p_dev;
2372 if (p_dev->recov_in_prog) {
2373 DP_NOTICE(p_hwfn, false,
2374 "Avoid triggering a recovery since such a process"
2375 " is already in progress\n");
2379 DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n");
2380 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
2382 return ECORE_SUCCESS;
2385 static enum _ecore_status_t
2386 ecore_mcp_config_vf_msix_bb(struct ecore_hwfn *p_hwfn,
2387 struct ecore_ptt *p_ptt,
2390 u32 resp = 0, param = 0, rc_param = 0;
2391 enum _ecore_status_t rc;
2393 /* Only Leader can configure MSIX, and need to take CMT into account */
2395 if (!IS_LEAD_HWFN(p_hwfn))
2396 return ECORE_SUCCESS;
2397 num *= p_hwfn->p_dev->num_hwfns;
2399 param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_OFFSET) &
2400 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
2401 param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_OFFSET) &
2402 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
2404 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
2407 if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
2408 DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n",
2412 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2413 "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
2420 static enum _ecore_status_t
2421 ecore_mcp_config_vf_msix_ah(struct ecore_hwfn *p_hwfn,
2422 struct ecore_ptt *p_ptt,
2425 u32 resp = 0, param = num, rc_param = 0;
2426 enum _ecore_status_t rc;
2428 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX,
2429 param, &resp, &rc_param);
2431 if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) {
2432 DP_NOTICE(p_hwfn, true, "MFW failed to set MSI-X for VFs\n");
2435 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2436 "Requested 0x%02x MSI-x interrupts for VFs\n",
2443 enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
2444 struct ecore_ptt *p_ptt,
2447 if (ECORE_IS_BB(p_hwfn->p_dev))
2448 return ecore_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
2450 return ecore_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num);
2453 enum _ecore_status_t
2454 ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2455 struct ecore_mcp_drv_version *p_ver)
2457 struct ecore_mcp_mb_params mb_params;
2458 struct drv_version_stc drv_version;
2462 enum _ecore_status_t rc;
2465 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
2466 return ECORE_SUCCESS;
2469 OSAL_MEM_ZERO(&drv_version, sizeof(drv_version));
2470 drv_version.version = p_ver->version;
2471 num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4;
2472 for (i = 0; i < num_words; i++) {
2473 /* The driver name is expected to be in a big-endian format */
2474 p_name = &p_ver->name[i * sizeof(u32)];
2475 val = OSAL_CPU_TO_BE32(*(u32 *)p_name);
2476 *(u32 *)&drv_version.name[i * sizeof(u32)] = val;
2479 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2480 mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
2481 mb_params.p_data_src = &drv_version;
2482 mb_params.data_src_size = sizeof(drv_version);
2483 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2484 if (rc != ECORE_SUCCESS)
2485 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2490 /* A maximal 100 msec waiting time for the MCP to halt */
2491 #define ECORE_MCP_HALT_SLEEP_MS 10
2492 #define ECORE_MCP_HALT_MAX_RETRIES 10
2494 enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
2495 struct ecore_ptt *p_ptt)
2497 u32 resp = 0, param = 0, cpu_state, cnt = 0;
2498 enum _ecore_status_t rc;
2500 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2502 if (rc != ECORE_SUCCESS) {
2503 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2508 OSAL_MSLEEP(ECORE_MCP_HALT_SLEEP_MS);
2509 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2510 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
2512 } while (++cnt < ECORE_MCP_HALT_MAX_RETRIES);
2514 if (cnt == ECORE_MCP_HALT_MAX_RETRIES) {
2515 DP_NOTICE(p_hwfn, false,
2516 "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2517 ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
2521 ecore_mcp_cmd_set_blocking(p_hwfn, true);
2523 return ECORE_SUCCESS;
2526 #define ECORE_MCP_RESUME_SLEEP_MS 10
2528 enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
2529 struct ecore_ptt *p_ptt)
2531 u32 cpu_mode, cpu_state;
2533 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
2535 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2536 cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2537 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
2539 OSAL_MSLEEP(ECORE_MCP_RESUME_SLEEP_MS);
2540 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2542 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
2543 DP_NOTICE(p_hwfn, false,
2544 "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2545 cpu_mode, cpu_state);
2549 ecore_mcp_cmd_set_blocking(p_hwfn, false);
2551 return ECORE_SUCCESS;
2554 enum _ecore_status_t
2555 ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
2556 struct ecore_ptt *p_ptt,
2557 enum ecore_ov_client client)
2559 u32 resp = 0, param = 0;
2561 enum _ecore_status_t rc;
2564 case ECORE_OV_CLIENT_DRV:
2565 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
2567 case ECORE_OV_CLIENT_USER:
2568 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
2570 case ECORE_OV_CLIENT_VENDOR_SPEC:
2571 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
2574 DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", client);
2578 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
2579 drv_mb_param, &resp, ¶m);
2580 if (rc != ECORE_SUCCESS)
2581 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2586 enum _ecore_status_t
2587 ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
2588 struct ecore_ptt *p_ptt,
2589 enum ecore_ov_driver_state drv_state)
2591 u32 resp = 0, param = 0;
2593 enum _ecore_status_t rc;
2595 switch (drv_state) {
2596 case ECORE_OV_DRIVER_STATE_NOT_LOADED:
2597 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
2599 case ECORE_OV_DRIVER_STATE_DISABLED:
2600 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
2602 case ECORE_OV_DRIVER_STATE_ACTIVE:
2603 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
2606 DP_NOTICE(p_hwfn, true, "Invalid driver state %d\n", drv_state);
2610 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
2611 drv_mb_param, &resp, ¶m);
2612 if (rc != ECORE_SUCCESS)
2613 DP_ERR(p_hwfn, "Failed to send driver state\n");
2618 enum _ecore_status_t
2619 ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2620 struct ecore_fc_npiv_tbl *p_table)
2625 enum _ecore_status_t
2626 ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn,
2627 struct ecore_ptt *p_ptt, u16 mtu)
2632 enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
2633 struct ecore_ptt *p_ptt,
2634 enum ecore_led_mode mode)
2636 u32 resp = 0, param = 0, drv_mb_param;
2637 enum _ecore_status_t rc;
2640 case ECORE_LED_MODE_ON:
2641 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
2643 case ECORE_LED_MODE_OFF:
2644 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
2646 case ECORE_LED_MODE_RESTORE:
2647 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
2650 DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode);
2654 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
2655 drv_mb_param, &resp, ¶m);
2656 if (rc != ECORE_SUCCESS)
2657 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2662 enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
2663 struct ecore_ptt *p_ptt,
2666 u32 resp = 0, param = 0;
2667 enum _ecore_status_t rc;
2669 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
2670 mask_parities, &resp, ¶m);
2672 if (rc != ECORE_SUCCESS) {
2674 "MCP response failure for mask parities, aborting\n");
2675 } else if (resp != FW_MSG_CODE_OK) {
2677 "MCP did not ack mask parity request. Old MFW?\n");
2684 enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
2687 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2688 u32 bytes_left, offset, bytes_to_copy, buf_size;
2689 u32 nvm_offset, resp, param;
2690 struct ecore_ptt *p_ptt;
2691 enum _ecore_status_t rc = ECORE_SUCCESS;
2693 p_ptt = ecore_ptt_acquire(p_hwfn);
2699 while (bytes_left > 0) {
2700 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
2701 MCP_DRV_NVM_BUF_LEN);
2702 nvm_offset = (addr + offset) | (bytes_to_copy <<
2703 DRV_MB_PARAM_NVM_LEN_OFFSET);
2704 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
2705 DRV_MSG_CODE_NVM_READ_NVRAM,
2706 nvm_offset, &resp, ¶m, &buf_size,
2707 (u32 *)(p_buf + offset));
2708 if (rc != ECORE_SUCCESS) {
2709 DP_NOTICE(p_dev, false,
2710 "ecore_mcp_nvm_rd_cmd() failed, rc = %d\n",
2712 resp = FW_MSG_CODE_ERROR;
2716 if (resp != FW_MSG_CODE_NVM_OK) {
2717 DP_NOTICE(p_dev, false,
2718 "nvm read failed, resp = 0x%08x\n", resp);
2719 rc = ECORE_UNKNOWN_ERROR;
2723 /* This can be a lengthy process, and it's possible scheduler
2724 * isn't preemptible. Sleep a bit to prevent CPU hogging.
2726 if (bytes_left % 0x1000 <
2727 (bytes_left - buf_size) % 0x1000)
2731 bytes_left -= buf_size;
2734 p_dev->mcp_nvm_resp = resp;
2735 ecore_ptt_release(p_hwfn, p_ptt);
2740 enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
2741 u32 addr, u8 *p_buf, u32 len)
2743 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2744 struct ecore_ptt *p_ptt;
2746 enum _ecore_status_t rc;
2748 p_ptt = ecore_ptt_acquire(p_hwfn);
2752 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
2753 (cmd == ECORE_PHY_CORE_READ) ?
2754 DRV_MSG_CODE_PHY_CORE_READ :
2755 DRV_MSG_CODE_PHY_RAW_READ,
2756 addr, &resp, ¶m, &len, (u32 *)p_buf);
2757 if (rc != ECORE_SUCCESS)
2758 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2760 p_dev->mcp_nvm_resp = resp;
2761 ecore_ptt_release(p_hwfn, p_ptt);
2766 enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
2768 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2769 struct ecore_ptt *p_ptt;
2771 p_ptt = ecore_ptt_acquire(p_hwfn);
2775 OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp));
2776 ecore_ptt_release(p_hwfn, p_ptt);
2778 return ECORE_SUCCESS;
2781 enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr)
2783 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2784 struct ecore_ptt *p_ptt;
2786 enum _ecore_status_t rc;
2788 p_ptt = ecore_ptt_acquire(p_hwfn);
2791 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_DEL_FILE, addr,
2793 p_dev->mcp_nvm_resp = resp;
2794 ecore_ptt_release(p_hwfn, p_ptt);
2799 enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
2802 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2803 struct ecore_ptt *p_ptt;
2805 enum _ecore_status_t rc;
2807 p_ptt = ecore_ptt_acquire(p_hwfn);
2810 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, addr,
2812 p_dev->mcp_nvm_resp = resp;
2813 ecore_ptt_release(p_hwfn, p_ptt);
2818 /* rc receives ECORE_INVAL as default parameter because
2819 * it might not enter the while loop if the len is 0
2821 enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
2822 u32 addr, u8 *p_buf, u32 len)
2824 u32 buf_idx, buf_size, nvm_cmd, nvm_offset, resp, param;
2825 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2826 enum _ecore_status_t rc = ECORE_INVAL;
2827 struct ecore_ptt *p_ptt;
2829 p_ptt = ecore_ptt_acquire(p_hwfn);
2834 case ECORE_PUT_FILE_DATA:
2835 nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
2837 case ECORE_NVM_WRITE_NVRAM:
2838 nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
2840 case ECORE_EXT_PHY_FW_UPGRADE:
2841 nvm_cmd = DRV_MSG_CODE_EXT_PHY_FW_UPGRADE;
2844 DP_NOTICE(p_hwfn, true, "Invalid nvm write command 0x%x\n",
2851 while (buf_idx < len) {
2852 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
2853 MCP_DRV_NVM_BUF_LEN);
2854 nvm_offset = ((buf_size << DRV_MB_PARAM_NVM_LEN_OFFSET) |
2857 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset,
2858 &resp, ¶m, buf_size,
2859 (u32 *)&p_buf[buf_idx]);
2860 if (rc != ECORE_SUCCESS) {
2861 DP_NOTICE(p_dev, false,
2862 "ecore_mcp_nvm_write() failed, rc = %d\n",
2864 resp = FW_MSG_CODE_ERROR;
2868 if (resp != FW_MSG_CODE_OK &&
2869 resp != FW_MSG_CODE_NVM_OK &&
2870 resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK) {
2871 DP_NOTICE(p_dev, false,
2872 "nvm write failed, resp = 0x%08x\n", resp);
2873 rc = ECORE_UNKNOWN_ERROR;
2877 /* This can be a lengthy process, and it's possible scheduler
2878 * isn't preemptible. Sleep a bit to prevent CPU hogging.
2880 if (buf_idx % 0x1000 >
2881 (buf_idx + buf_size) % 0x1000)
2884 buf_idx += buf_size;
2887 p_dev->mcp_nvm_resp = resp;
2889 ecore_ptt_release(p_hwfn, p_ptt);
2894 enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
2895 u32 addr, u8 *p_buf, u32 len)
2897 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2898 struct ecore_ptt *p_ptt;
2899 u32 resp, param, nvm_cmd;
2900 enum _ecore_status_t rc;
2902 p_ptt = ecore_ptt_acquire(p_hwfn);
2906 nvm_cmd = (cmd == ECORE_PHY_CORE_WRITE) ? DRV_MSG_CODE_PHY_CORE_WRITE :
2907 DRV_MSG_CODE_PHY_RAW_WRITE;
2908 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, addr,
2909 &resp, ¶m, len, (u32 *)p_buf);
2910 if (rc != ECORE_SUCCESS)
2911 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2912 p_dev->mcp_nvm_resp = resp;
2913 ecore_ptt_release(p_hwfn, p_ptt);
2918 enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
2921 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2922 struct ecore_ptt *p_ptt;
2924 enum _ecore_status_t rc;
2926 p_ptt = ecore_ptt_acquire(p_hwfn);
2930 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_SECURE_MODE, addr,
2932 p_dev->mcp_nvm_resp = resp;
2933 ecore_ptt_release(p_hwfn, p_ptt);
2938 enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
2939 struct ecore_ptt *p_ptt,
2940 u32 port, u32 addr, u32 offset,
2943 u32 bytes_left, bytes_to_copy, buf_size, nvm_offset;
2945 enum _ecore_status_t rc;
2947 nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) |
2948 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET);
2952 while (bytes_left > 0) {
2953 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
2954 MAX_I2C_TRANSACTION_SIZE);
2955 nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
2956 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
2957 nvm_offset |= ((addr + offset) <<
2958 DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET);
2959 nvm_offset |= (bytes_to_copy <<
2960 DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET);
2961 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
2962 DRV_MSG_CODE_TRANSCEIVER_READ,
2963 nvm_offset, &resp, ¶m, &buf_size,
2964 (u32 *)(p_buf + offset));
2965 if (rc != ECORE_SUCCESS) {
2966 DP_NOTICE(p_hwfn, false,
2967 "Failed to send a transceiver read command to the MFW. rc = %d.\n",
2972 if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
2974 else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
2975 return ECORE_UNKNOWN_ERROR;
2978 bytes_left -= buf_size;
2981 return ECORE_SUCCESS;
2984 enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
2985 struct ecore_ptt *p_ptt,
2986 u32 port, u32 addr, u32 offset,
2989 u32 buf_idx, buf_size, nvm_offset, resp, param;
2990 enum _ecore_status_t rc;
2992 nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) |
2993 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET);
2995 while (buf_idx < len) {
2996 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
2997 MAX_I2C_TRANSACTION_SIZE);
2998 nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
2999 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
3000 nvm_offset |= ((offset + buf_idx) <<
3001 DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET);
3002 nvm_offset |= (buf_size <<
3003 DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET);
3004 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt,
3005 DRV_MSG_CODE_TRANSCEIVER_WRITE,
3006 nvm_offset, &resp, ¶m, buf_size,
3007 (u32 *)&p_buf[buf_idx]);
3008 if (rc != ECORE_SUCCESS) {
3009 DP_NOTICE(p_hwfn, false,
3010 "Failed to send a transceiver write command to the MFW. rc = %d.\n",
3015 if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
3017 else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
3018 return ECORE_UNKNOWN_ERROR;
3020 buf_idx += buf_size;
3023 return ECORE_SUCCESS;
3026 enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
3027 struct ecore_ptt *p_ptt,
3028 u16 gpio, u32 *gpio_val)
3030 enum _ecore_status_t rc = ECORE_SUCCESS;
3031 u32 drv_mb_param = 0, rsp;
3033 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET);
3035 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ,
3036 drv_mb_param, &rsp, gpio_val);
3038 if (rc != ECORE_SUCCESS)
3041 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3042 return ECORE_UNKNOWN_ERROR;
3044 return ECORE_SUCCESS;
3047 enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
3048 struct ecore_ptt *p_ptt,
3049 u16 gpio, u16 gpio_val)
3051 enum _ecore_status_t rc = ECORE_SUCCESS;
3052 u32 drv_mb_param = 0, param, rsp;
3054 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET) |
3055 (gpio_val << DRV_MB_PARAM_GPIO_VALUE_OFFSET);
3057 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE,
3058 drv_mb_param, &rsp, ¶m);
3060 if (rc != ECORE_SUCCESS)
3063 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3064 return ECORE_UNKNOWN_ERROR;
3066 return ECORE_SUCCESS;
3069 enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn,
3070 struct ecore_ptt *p_ptt,
3071 u16 gpio, u32 *gpio_direction,
3074 u32 drv_mb_param = 0, rsp, val = 0;
3075 enum _ecore_status_t rc = ECORE_SUCCESS;
3077 drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET;
3079 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO,
3080 drv_mb_param, &rsp, &val);
3081 if (rc != ECORE_SUCCESS)
3084 *gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >>
3085 DRV_MB_PARAM_GPIO_DIRECTION_OFFSET;
3086 *gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >>
3087 DRV_MB_PARAM_GPIO_CTRL_OFFSET;
3089 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3090 return ECORE_UNKNOWN_ERROR;
3092 return ECORE_SUCCESS;
3095 enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn,
3096 struct ecore_ptt *p_ptt)
3098 u32 drv_mb_param = 0, rsp, param;
3099 enum _ecore_status_t rc = ECORE_SUCCESS;
3101 drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
3102 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3104 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3105 drv_mb_param, &rsp, ¶m);
3107 if (rc != ECORE_SUCCESS)
3110 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3111 (param != DRV_MB_PARAM_BIST_RC_PASSED))
3112 rc = ECORE_UNKNOWN_ERROR;
3117 enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn,
3118 struct ecore_ptt *p_ptt)
3120 u32 drv_mb_param, rsp, param;
3121 enum _ecore_status_t rc = ECORE_SUCCESS;
3123 drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
3124 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3126 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3127 drv_mb_param, &rsp, ¶m);
3129 if (rc != ECORE_SUCCESS)
3132 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3133 (param != DRV_MB_PARAM_BIST_RC_PASSED))
3134 rc = ECORE_UNKNOWN_ERROR;
3139 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images(
3140 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *num_images)
3142 u32 drv_mb_param = 0, rsp;
3143 enum _ecore_status_t rc = ECORE_SUCCESS;
3145 drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
3146 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3148 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3149 drv_mb_param, &rsp, num_images);
3151 if (rc != ECORE_SUCCESS)
3154 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
3155 rc = ECORE_UNKNOWN_ERROR;
3160 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att(
3161 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3162 struct bist_nvm_image_att *p_image_att, u32 image_index)
3164 u32 buf_size, nvm_offset, resp, param;
3165 enum _ecore_status_t rc;
3167 nvm_offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
3168 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3169 nvm_offset |= (image_index <<
3170 DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_OFFSET);
3171 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3172 nvm_offset, &resp, ¶m, &buf_size,
3173 (u32 *)p_image_att);
3174 if (rc != ECORE_SUCCESS)
3177 if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3178 (p_image_att->return_code != 1))
3179 rc = ECORE_UNKNOWN_ERROR;
3184 enum _ecore_status_t
3185 ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn,
3186 struct ecore_ptt *p_ptt,
3187 struct ecore_temperature_info *p_temp_info)
3189 struct ecore_temperature_sensor *p_temp_sensor;
3190 struct temperature_status_stc mfw_temp_info;
3191 struct ecore_mcp_mb_params mb_params;
3193 enum _ecore_status_t rc;
3196 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3197 mb_params.cmd = DRV_MSG_CODE_GET_TEMPERATURE;
3198 mb_params.p_data_dst = &mfw_temp_info;
3199 mb_params.data_dst_size = sizeof(mfw_temp_info);
3200 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3201 if (rc != ECORE_SUCCESS)
3204 OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS != MAX_NUM_OF_SENSORS);
3205 p_temp_info->num_sensors = OSAL_MIN_T(u32, mfw_temp_info.num_of_sensors,
3206 ECORE_MAX_NUM_OF_SENSORS);
3207 for (i = 0; i < p_temp_info->num_sensors; i++) {
3208 val = mfw_temp_info.sensor[i];
3209 p_temp_sensor = &p_temp_info->sensors[i];
3210 p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >>
3211 SENSOR_LOCATION_OFFSET;
3212 p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >>
3213 THRESHOLD_HIGH_OFFSET;
3214 p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >>
3215 CRITICAL_TEMPERATURE_OFFSET;
3216 p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >>
3217 CURRENT_TEMP_OFFSET;
3220 return ECORE_SUCCESS;
3223 enum _ecore_status_t ecore_mcp_get_mba_versions(
3224 struct ecore_hwfn *p_hwfn,
3225 struct ecore_ptt *p_ptt,
3226 struct ecore_mba_vers *p_mba_vers)
3228 u32 buf_size, resp, param;
3229 enum _ecore_status_t rc;
3231 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MBA_VERSION,
3232 0, &resp, ¶m, &buf_size,
3233 &p_mba_vers->mba_vers[0]);
3235 if (rc != ECORE_SUCCESS)
3238 if ((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
3239 rc = ECORE_UNKNOWN_ERROR;
3241 if (buf_size != MCP_DRV_NVM_BUF_LEN)
3242 rc = ECORE_UNKNOWN_ERROR;
3247 enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
3248 struct ecore_ptt *p_ptt,
3253 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MEM_ECC_EVENTS,
3254 0, &rsp, (u32 *)num_events);
3257 static enum resource_id_enum
3258 ecore_mcp_get_mfw_res_id(enum ecore_resources res_id)
3260 enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
3264 mfw_res_id = RESOURCE_NUM_SB_E;
3266 case ECORE_L2_QUEUE:
3267 mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
3270 mfw_res_id = RESOURCE_NUM_VPORT_E;
3273 mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
3276 mfw_res_id = RESOURCE_NUM_PQ_E;
3279 mfw_res_id = RESOURCE_NUM_RL_E;
3283 /* Each VFC resource can accommodate both a MAC and a VLAN */
3284 mfw_res_id = RESOURCE_VFC_FILTER_E;
3287 mfw_res_id = RESOURCE_ILT_E;
3289 case ECORE_LL2_QUEUE:
3290 mfw_res_id = RESOURCE_LL2_QUEUE_E;
3292 case ECORE_RDMA_CNQ_RAM:
3293 case ECORE_CMDQS_CQS:
3294 /* CNQ/CMDQS are the same resource */
3295 mfw_res_id = RESOURCE_CQS_E;
3297 case ECORE_RDMA_STATS_QUEUE:
3298 mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
3301 mfw_res_id = RESOURCE_BDQ_E;
3310 #define ECORE_RESC_ALLOC_VERSION_MAJOR 2
3311 #define ECORE_RESC_ALLOC_VERSION_MINOR 0
3312 #define ECORE_RESC_ALLOC_VERSION \
3313 ((ECORE_RESC_ALLOC_VERSION_MAJOR << \
3314 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_OFFSET) | \
3315 (ECORE_RESC_ALLOC_VERSION_MINOR << \
3316 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_OFFSET))
3318 struct ecore_resc_alloc_in_params {
3320 enum ecore_resources res_id;
3324 struct ecore_resc_alloc_out_params {
3334 #define ECORE_RECOVERY_PROLOG_SLEEP_MS 100
3336 enum _ecore_status_t ecore_recovery_prolog(struct ecore_dev *p_dev)
3338 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3339 struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
3340 enum _ecore_status_t rc;
3342 /* Allow ongoing PCIe transactions to complete */
3343 OSAL_MSLEEP(ECORE_RECOVERY_PROLOG_SLEEP_MS);
3345 /* Clear the PF's internal FID_enable in the PXP */
3346 rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_ptt, false);
3347 if (rc != ECORE_SUCCESS)
3348 DP_NOTICE(p_hwfn, false,
3349 "ecore_pglueb_set_pfid_enable() failed. rc = %d.\n",
3355 static enum _ecore_status_t
3356 ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn,
3357 struct ecore_ptt *p_ptt,
3358 struct ecore_resc_alloc_in_params *p_in_params,
3359 struct ecore_resc_alloc_out_params *p_out_params)
3361 struct ecore_mcp_mb_params mb_params;
3362 struct resource_info mfw_resc_info;
3363 enum _ecore_status_t rc;
3365 OSAL_MEM_ZERO(&mfw_resc_info, sizeof(mfw_resc_info));
3367 mfw_resc_info.res_id = ecore_mcp_get_mfw_res_id(p_in_params->res_id);
3368 if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
3370 "Failed to match resource %d [%s] with the MFW resources\n",
3371 p_in_params->res_id,
3372 ecore_hw_get_resc_name(p_in_params->res_id));
3376 switch (p_in_params->cmd) {
3377 case DRV_MSG_SET_RESOURCE_VALUE_MSG:
3378 mfw_resc_info.size = p_in_params->resc_max_val;
3380 case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
3383 DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
3388 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3389 mb_params.cmd = p_in_params->cmd;
3390 mb_params.param = ECORE_RESC_ALLOC_VERSION;
3391 mb_params.p_data_src = &mfw_resc_info;
3392 mb_params.data_src_size = sizeof(mfw_resc_info);
3393 mb_params.p_data_dst = mb_params.p_data_src;
3394 mb_params.data_dst_size = mb_params.data_src_size;
3396 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3397 "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
3398 p_in_params->cmd, p_in_params->res_id,
3399 ecore_hw_get_resc_name(p_in_params->res_id),
3400 GET_MFW_FIELD(mb_params.param,
3401 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3402 GET_MFW_FIELD(mb_params.param,
3403 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3404 p_in_params->resc_max_val);
3406 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3407 if (rc != ECORE_SUCCESS)
3410 p_out_params->mcp_resp = mb_params.mcp_resp;
3411 p_out_params->mcp_param = mb_params.mcp_param;
3412 p_out_params->resc_num = mfw_resc_info.size;
3413 p_out_params->resc_start = mfw_resc_info.offset;
3414 p_out_params->vf_resc_num = mfw_resc_info.vf_size;
3415 p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
3416 p_out_params->flags = mfw_resc_info.flags;
3418 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3419 "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
3420 GET_MFW_FIELD(p_out_params->mcp_param,
3421 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3422 GET_MFW_FIELD(p_out_params->mcp_param,
3423 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3424 p_out_params->resc_num, p_out_params->resc_start,
3425 p_out_params->vf_resc_num, p_out_params->vf_resc_start,
3426 p_out_params->flags);
3428 return ECORE_SUCCESS;
3431 enum _ecore_status_t
3432 ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3433 enum ecore_resources res_id, u32 resc_max_val,
3436 struct ecore_resc_alloc_out_params out_params;
3437 struct ecore_resc_alloc_in_params in_params;
3438 enum _ecore_status_t rc;
3440 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
3441 in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
3442 in_params.res_id = res_id;
3443 in_params.resc_max_val = resc_max_val;
3444 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
3445 rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3447 if (rc != ECORE_SUCCESS)
3450 *p_mcp_resp = out_params.mcp_resp;
3452 return ECORE_SUCCESS;
3455 enum _ecore_status_t
3456 ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3457 enum ecore_resources res_id, u32 *p_mcp_resp,
3458 u32 *p_resc_num, u32 *p_resc_start)
3460 struct ecore_resc_alloc_out_params out_params;
3461 struct ecore_resc_alloc_in_params in_params;
3462 enum _ecore_status_t rc;
3464 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
3465 in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
3466 in_params.res_id = res_id;
3467 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
3468 rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3470 if (rc != ECORE_SUCCESS)
3473 *p_mcp_resp = out_params.mcp_resp;
3475 if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
3476 *p_resc_num = out_params.resc_num;
3477 *p_resc_start = out_params.resc_start;
3480 return ECORE_SUCCESS;
3483 enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn,
3484 struct ecore_ptt *p_ptt)
3486 u32 mcp_resp, mcp_param;
3488 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
3489 &mcp_resp, &mcp_param);
3492 static enum _ecore_status_t ecore_mcp_resource_cmd(struct ecore_hwfn *p_hwfn,
3493 struct ecore_ptt *p_ptt,
3494 u32 param, u32 *p_mcp_resp,
3497 enum _ecore_status_t rc;
3499 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
3500 p_mcp_resp, p_mcp_param);
3501 if (rc != ECORE_SUCCESS)
3504 if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3506 "The resource command is unsupported by the MFW\n");
3507 return ECORE_NOTIMPL;
3510 if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
3511 u8 opcode = GET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
3513 DP_NOTICE(p_hwfn, false,
3514 "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
3522 enum _ecore_status_t
3523 __ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3524 struct ecore_resc_lock_params *p_params)
3526 u32 param = 0, mcp_resp, mcp_param;
3528 enum _ecore_status_t rc;
3530 switch (p_params->timeout) {
3531 case ECORE_MCP_RESC_LOCK_TO_DEFAULT:
3532 opcode = RESOURCE_OPCODE_REQ;
3533 p_params->timeout = 0;
3535 case ECORE_MCP_RESC_LOCK_TO_NONE:
3536 opcode = RESOURCE_OPCODE_REQ_WO_AGING;
3537 p_params->timeout = 0;
3540 opcode = RESOURCE_OPCODE_REQ_W_AGING;
3544 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3545 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3546 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
3548 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3549 "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
3550 param, p_params->timeout, opcode, p_params->resource);
3552 /* Attempt to acquire the resource */
3553 rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
3555 if (rc != ECORE_SUCCESS)
3558 /* Analyze the response */
3559 p_params->owner = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER);
3560 opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3562 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3563 "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
3564 mcp_param, opcode, p_params->owner);
3567 case RESOURCE_OPCODE_GNT:
3568 p_params->b_granted = true;
3570 case RESOURCE_OPCODE_BUSY:
3571 p_params->b_granted = false;
3574 DP_NOTICE(p_hwfn, false,
3575 "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
3580 return ECORE_SUCCESS;
3583 enum _ecore_status_t
3584 ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3585 struct ecore_resc_lock_params *p_params)
3588 enum _ecore_status_t rc;
3591 /* No need for an interval before the first iteration */
3593 if (p_params->sleep_b4_retry) {
3594 u16 retry_interval_in_ms =
3595 DIV_ROUND_UP(p_params->retry_interval,
3598 OSAL_MSLEEP(retry_interval_in_ms);
3600 OSAL_UDELAY(p_params->retry_interval);
3604 rc = __ecore_mcp_resc_lock(p_hwfn, p_ptt, p_params);
3605 if (rc != ECORE_SUCCESS)
3608 if (p_params->b_granted)
3610 } while (retry_cnt++ < p_params->retry_num);
3612 return ECORE_SUCCESS;
3615 void ecore_mcp_resc_lock_default_init(struct ecore_resc_lock_params *p_lock,
3616 struct ecore_resc_unlock_params *p_unlock,
3617 enum ecore_resc_lock resource,
3618 bool b_is_permanent)
3620 if (p_lock != OSAL_NULL) {
3621 OSAL_MEM_ZERO(p_lock, sizeof(*p_lock));
3623 /* Permanent resources don't require aging, and there's no
3624 * point in trying to acquire them more than once since it's
3625 * unexpected another entity would release them.
3627 if (b_is_permanent) {
3628 p_lock->timeout = ECORE_MCP_RESC_LOCK_TO_NONE;
3630 p_lock->retry_num = ECORE_MCP_RESC_LOCK_RETRY_CNT_DFLT;
3631 p_lock->retry_interval =
3632 ECORE_MCP_RESC_LOCK_RETRY_VAL_DFLT;
3633 p_lock->sleep_b4_retry = true;
3636 p_lock->resource = resource;
3639 if (p_unlock != OSAL_NULL) {
3640 OSAL_MEM_ZERO(p_unlock, sizeof(*p_unlock));
3641 p_unlock->resource = resource;
3645 enum _ecore_status_t
3646 ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3647 struct ecore_resc_unlock_params *p_params)
3649 u32 param = 0, mcp_resp, mcp_param;
3651 enum _ecore_status_t rc;
3653 opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
3654 : RESOURCE_OPCODE_RELEASE;
3655 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3656 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3658 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3659 "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
3660 param, opcode, p_params->resource);
3662 /* Attempt to release the resource */
3663 rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
3665 if (rc != ECORE_SUCCESS)
3668 /* Analyze the response */
3669 opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3671 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3672 "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
3676 case RESOURCE_OPCODE_RELEASED_PREVIOUS:
3678 "Resource unlock request for an already released resource [%d]\n",
3679 p_params->resource);
3681 case RESOURCE_OPCODE_RELEASED:
3682 p_params->b_released = true;
3684 case RESOURCE_OPCODE_WRONG_OWNER:
3685 p_params->b_released = false;
3688 DP_NOTICE(p_hwfn, false,
3689 "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
3694 return ECORE_SUCCESS;
3697 bool ecore_mcp_is_smart_an_supported(struct ecore_hwfn *p_hwfn)
3699 return !!(p_hwfn->mcp_info->capabilities &
3700 FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ);
3703 enum _ecore_status_t ecore_mcp_get_capabilities(struct ecore_hwfn *p_hwfn,
3704 struct ecore_ptt *p_ptt)
3707 enum _ecore_status_t rc;
3709 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT,
3710 0, &mcp_resp, &p_hwfn->mcp_info->capabilities);
3711 if (rc == ECORE_SUCCESS)
3712 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_PROBE),
3713 "MFW supported features: %08x\n",
3714 p_hwfn->mcp_info->capabilities);
3719 enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn,
3720 struct ecore_ptt *p_ptt)
3722 u32 mcp_resp, mcp_param, features;
3724 features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ |
3725 DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE |
3726 DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK;
3728 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
3729 features, &mcp_resp, &mcp_param);
3732 enum _ecore_status_t
3733 ecore_mcp_drv_attribute(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3734 struct ecore_mcp_drv_attr *p_drv_attr)
3736 struct attribute_cmd_write_stc attr_cmd_write;
3737 enum _attribute_commands_e mfw_attr_cmd;
3738 struct ecore_mcp_mb_params mb_params;
3739 enum _ecore_status_t rc;
3741 switch (p_drv_attr->attr_cmd) {
3742 case ECORE_MCP_DRV_ATTR_CMD_READ:
3743 mfw_attr_cmd = ATTRIBUTE_CMD_READ;
3745 case ECORE_MCP_DRV_ATTR_CMD_WRITE:
3746 mfw_attr_cmd = ATTRIBUTE_CMD_WRITE;
3748 case ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR:
3749 mfw_attr_cmd = ATTRIBUTE_CMD_READ_CLEAR;
3751 case ECORE_MCP_DRV_ATTR_CMD_CLEAR:
3752 mfw_attr_cmd = ATTRIBUTE_CMD_CLEAR;
3755 DP_NOTICE(p_hwfn, false, "Unknown attribute command %d\n",
3756 p_drv_attr->attr_cmd);
3760 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3761 mb_params.cmd = DRV_MSG_CODE_ATTRIBUTE;
3762 SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_KEY,
3763 p_drv_attr->attr_num);
3764 SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_CMD,
3766 if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_WRITE) {
3767 OSAL_MEM_ZERO(&attr_cmd_write, sizeof(attr_cmd_write));
3768 attr_cmd_write.val = p_drv_attr->val;
3769 attr_cmd_write.mask = p_drv_attr->mask;
3770 attr_cmd_write.offset = p_drv_attr->offset;
3772 mb_params.p_data_src = &attr_cmd_write;
3773 mb_params.data_src_size = sizeof(attr_cmd_write);
3776 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3777 if (rc != ECORE_SUCCESS)
3780 if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3782 "The attribute command is not supported by the MFW\n");
3783 return ECORE_NOTIMPL;
3784 } else if (mb_params.mcp_resp != FW_MSG_CODE_OK) {
3786 "Failed to send an attribute command [mcp_resp 0x%x, attr_cmd %d, attr_num %d]\n",
3787 mb_params.mcp_resp, p_drv_attr->attr_cmd,
3788 p_drv_attr->attr_num);
3792 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3793 "Attribute Command: cmd %d [mfw_cmd %d], num %d, in={val 0x%08x, mask 0x%08x, offset 0x%08x}, out={val 0x%08x}\n",
3794 p_drv_attr->attr_cmd, mfw_attr_cmd, p_drv_attr->attr_num,
3795 p_drv_attr->val, p_drv_attr->mask, p_drv_attr->offset,
3796 mb_params.mcp_param);
3798 if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ ||
3799 p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR)
3800 p_drv_attr->val = mb_params.mcp_param;
3802 return ECORE_SUCCESS;
3805 void ecore_mcp_wol_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3806 u32 offset, u32 val)
3808 struct ecore_mcp_mb_params mb_params = {0};
3809 enum _ecore_status_t rc = ECORE_SUCCESS;
3812 mb_params.cmd = DRV_MSG_CODE_WRITE_WOL_REG;
3813 mb_params.param = offset;
3814 mb_params.p_data_src = &dword;
3815 mb_params.data_src_size = sizeof(dword);
3817 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3818 if (rc != ECORE_SUCCESS) {
3819 DP_NOTICE(p_hwfn, false,
3820 "Failed to wol write request, rc = %d\n", rc);
3823 if (mb_params.mcp_resp != FW_MSG_CODE_WOL_READ_WRITE_OK) {
3824 DP_NOTICE(p_hwfn, false,
3825 "Failed to write value 0x%x to offset 0x%x [mcp_resp 0x%x]\n",
3826 val, offset, mb_params.mcp_resp);
3827 rc = ECORE_UNKNOWN_ERROR;