1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2016 - 2018 Cavium Inc.
9 #include "ecore_status.h"
11 #include "ecore_mcp.h"
12 #include "mcp_public.h"
15 #include "ecore_init_fw_funcs.h"
16 #include "ecore_sriov.h"
18 #include "ecore_iov_api.h"
19 #include "ecore_gtt_reg_addr.h"
20 #include "ecore_iro.h"
21 #include "ecore_dcbx.h"
22 #include "ecore_sp_commands.h"
23 #include "ecore_cxt.h"
25 #define CHIP_MCP_RESP_ITER_US 10
26 #define EMUL_MCP_RESP_ITER_US (1000 * 1000)
27 #define GRCBASE_MCP 0xe00000
29 #define ECORE_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
30 #define ECORE_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
32 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
33 ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
36 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
37 ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
39 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
40 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
41 OFFSETOF(struct public_drv_mb, _field), _val)
43 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
44 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
45 OFFSETOF(struct public_drv_mb, _field))
47 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
48 DRV_ID_PDA_COMP_VER_OFFSET)
50 #define MCP_BYTES_PER_MBIT_OFFSET 17
54 static int loaded_port[MAX_NUM_PORTS] = { 0 };
57 bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn)
59 if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
64 void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
66 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
68 u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr);
70 p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
72 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
73 "port_addr = 0x%x, port_id 0x%02x\n",
74 p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
77 void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
79 u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
84 if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev))
88 if (!p_hwfn->mcp_info->public_base)
91 for (i = 0; i < length; i++) {
92 tmp = ecore_rd(p_hwfn, p_ptt,
93 p_hwfn->mcp_info->mfw_mb_addr +
94 (i << 2) + sizeof(u32));
96 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
97 OSAL_BE32_TO_CPU(tmp);
101 struct ecore_mcp_cmd_elem {
102 osal_list_entry_t list;
103 struct ecore_mcp_mb_params *p_mb_params;
104 u16 expected_seq_num;
108 /* Must be called while cmd_lock is acquired */
109 static struct ecore_mcp_cmd_elem *
110 ecore_mcp_cmd_add_elem(struct ecore_hwfn *p_hwfn,
111 struct ecore_mcp_mb_params *p_mb_params,
112 u16 expected_seq_num)
114 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
116 p_cmd_elem = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
117 sizeof(*p_cmd_elem));
119 DP_NOTICE(p_hwfn, false,
120 "Failed to allocate `struct ecore_mcp_cmd_elem'\n");
124 p_cmd_elem->p_mb_params = p_mb_params;
125 p_cmd_elem->expected_seq_num = expected_seq_num;
126 OSAL_LIST_PUSH_HEAD(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
131 /* Must be called while cmd_lock is acquired */
132 static void ecore_mcp_cmd_del_elem(struct ecore_hwfn *p_hwfn,
133 struct ecore_mcp_cmd_elem *p_cmd_elem)
135 OSAL_LIST_REMOVE_ENTRY(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
136 OSAL_FREE(p_hwfn->p_dev, p_cmd_elem);
139 /* Must be called while cmd_lock is acquired */
140 static struct ecore_mcp_cmd_elem *
141 ecore_mcp_cmd_get_elem(struct ecore_hwfn *p_hwfn, u16 seq_num)
143 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
145 OSAL_LIST_FOR_EACH_ENTRY(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list,
146 struct ecore_mcp_cmd_elem) {
147 if (p_cmd_elem->expected_seq_num == seq_num)
154 enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
156 if (p_hwfn->mcp_info) {
157 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL, *p_tmp;
159 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
160 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
162 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
163 OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_cmd_elem, p_tmp,
164 &p_hwfn->mcp_info->cmd_list, list,
165 struct ecore_mcp_cmd_elem) {
166 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
168 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
170 #ifdef CONFIG_ECORE_LOCK_ALLOC
171 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->cmd_lock);
172 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->link_lock);
176 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
178 return ECORE_SUCCESS;
181 /* Maximum of 1 sec to wait for the SHMEM ready indication */
182 #define ECORE_MCP_SHMEM_RDY_MAX_RETRIES 20
183 #define ECORE_MCP_SHMEM_RDY_ITER_MS 50
185 static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
186 struct ecore_ptt *p_ptt)
188 struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
189 u8 cnt = ECORE_MCP_SHMEM_RDY_MAX_RETRIES;
190 u8 msec = ECORE_MCP_SHMEM_RDY_ITER_MS;
191 u32 drv_mb_offsize, mfw_mb_offsize;
192 u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
195 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
196 DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n");
197 p_info->public_base = 0;
202 p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
203 if (!p_info->public_base)
206 p_info->public_base |= GRCBASE_MCP;
208 /* Get the MFW MB address and number of supported messages */
209 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
210 SECTION_OFFSIZE_ADDR(p_info->public_base,
212 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
213 p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
214 p_info->mfw_mb_addr);
217 * The driver can notify that there was an MCP reset, and read the SHMEM
218 * values before the MFW has completed initializing them.
219 * As a temporary solution, the "sup_msgs" field is used as a data ready
221 * This should be replaced with an actual indication when it is provided
224 while (!p_info->mfw_mb_length && cnt--) {
226 p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
227 p_info->mfw_mb_addr);
231 DP_NOTICE(p_hwfn, false,
232 "Failed to get the SHMEM ready notification after %d msec\n",
233 ECORE_MCP_SHMEM_RDY_MAX_RETRIES * msec);
234 return ECORE_TIMEOUT;
237 /* Calculate the driver and MFW mailbox address */
238 drv_mb_offsize = ecore_rd(p_hwfn, p_ptt,
239 SECTION_OFFSIZE_ADDR(p_info->public_base,
241 p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
242 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
243 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x"
244 " mcp_pf_id = 0x%x\n",
245 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
247 /* Get the current driver mailbox sequence before sending
250 p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
251 DRV_MSG_SEQ_NUMBER_MASK;
253 /* Get current FW pulse sequence */
254 p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
257 p_info->mcp_hist = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
259 return ECORE_SUCCESS;
262 enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
263 struct ecore_ptt *p_ptt)
265 struct ecore_mcp_info *p_info;
268 /* Allocate mcp_info structure */
269 p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
270 sizeof(*p_hwfn->mcp_info));
271 if (!p_hwfn->mcp_info) {
272 DP_NOTICE(p_hwfn, false, "Failed to allocate mcp_info\n");
275 p_info = p_hwfn->mcp_info;
277 /* Initialize the MFW spinlocks */
278 #ifdef CONFIG_ECORE_LOCK_ALLOC
279 if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock)) {
280 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
283 if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock)) {
284 OSAL_SPIN_LOCK_DEALLOC(&p_info->cmd_lock);
285 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
289 OSAL_SPIN_LOCK_INIT(&p_info->cmd_lock);
290 OSAL_SPIN_LOCK_INIT(&p_info->link_lock);
292 OSAL_LIST_INIT(&p_info->cmd_list);
294 if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
295 DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
296 /* Do not free mcp_info here, since public_base indicate that
297 * the MCP is not initialized
299 return ECORE_SUCCESS;
302 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
303 p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
304 p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
305 if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
308 return ECORE_SUCCESS;
311 DP_NOTICE(p_hwfn, false, "Failed to allocate mcp memory\n");
312 ecore_mcp_free(p_hwfn);
316 static void ecore_mcp_reread_offsets(struct ecore_hwfn *p_hwfn,
317 struct ecore_ptt *p_ptt)
319 u32 generic_por_0 = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
321 /* Use MCP history register to check if MCP reset occurred between init
324 if (p_hwfn->mcp_info->mcp_hist != generic_por_0) {
325 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
326 "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
327 p_hwfn->mcp_info->mcp_hist, generic_por_0);
329 ecore_load_mcp_offsets(p_hwfn, p_ptt);
330 ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
334 enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
335 struct ecore_ptt *p_ptt)
337 u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0;
338 enum _ecore_status_t rc = ECORE_SUCCESS;
341 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
342 delay = EMUL_MCP_RESP_ITER_US;
345 if (p_hwfn->mcp_info->b_block_cmd) {
346 DP_NOTICE(p_hwfn, false,
347 "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
348 return ECORE_ABORTED;
351 /* Ensure that only a single thread is accessing the mailbox */
352 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
354 org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
356 /* Set drv command along with the updated sequence */
357 ecore_mcp_reread_offsets(p_hwfn, p_ptt);
358 seq = ++p_hwfn->mcp_info->drv_mb_seq;
359 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
362 /* Wait for MFW response */
364 /* Give the FW up to 500 second (50*1000*10usec) */
365 } while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt,
366 MISCS_REG_GENERIC_POR_0)) &&
367 (cnt++ < ECORE_MCP_RESET_RETRIES));
369 if (org_mcp_reset_seq !=
370 ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
371 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
372 "MCP was reset after %d usec\n", cnt * delay);
374 DP_ERR(p_hwfn, "Failed to reset MCP\n");
378 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
383 /* Must be called while cmd_lock is acquired */
384 static bool ecore_mcp_has_pending_cmd(struct ecore_hwfn *p_hwfn)
386 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
388 /* There is at most one pending command at a certain time, and if it
389 * exists - it is placed at the HEAD of the list.
391 if (!OSAL_LIST_IS_EMPTY(&p_hwfn->mcp_info->cmd_list)) {
392 p_cmd_elem = OSAL_LIST_FIRST_ENTRY(&p_hwfn->mcp_info->cmd_list,
393 struct ecore_mcp_cmd_elem,
395 return !p_cmd_elem->b_is_completed;
401 /* Must be called while cmd_lock is acquired */
402 static enum _ecore_status_t
403 ecore_mcp_update_pending_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
405 struct ecore_mcp_mb_params *p_mb_params;
406 struct ecore_mcp_cmd_elem *p_cmd_elem;
410 mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
411 seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK);
413 /* Return if no new non-handled response has been received */
414 if (seq_num != p_hwfn->mcp_info->drv_mb_seq)
417 p_cmd_elem = ecore_mcp_cmd_get_elem(p_hwfn, seq_num);
420 "Failed to find a pending mailbox cmd that expects sequence number %d\n",
422 return ECORE_UNKNOWN_ERROR;
425 p_mb_params = p_cmd_elem->p_mb_params;
427 /* Get the MFW response along with the sequence number */
428 p_mb_params->mcp_resp = mcp_resp;
430 /* Get the MFW param */
431 p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
433 /* Get the union data */
434 if (p_mb_params->p_data_dst != OSAL_NULL &&
435 p_mb_params->data_dst_size) {
436 u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
437 OFFSETOF(struct public_drv_mb,
439 ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
440 union_data_addr, p_mb_params->data_dst_size);
443 p_cmd_elem->b_is_completed = true;
445 return ECORE_SUCCESS;
448 /* Must be called while cmd_lock is acquired */
449 static void __ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
450 struct ecore_ptt *p_ptt,
451 struct ecore_mcp_mb_params *p_mb_params,
454 union drv_union_data union_data;
457 /* Set the union data */
458 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
459 OFFSETOF(struct public_drv_mb, union_data);
460 OSAL_MEM_ZERO(&union_data, sizeof(union_data));
461 if (p_mb_params->p_data_src != OSAL_NULL && p_mb_params->data_src_size)
462 OSAL_MEMCPY(&union_data, p_mb_params->p_data_src,
463 p_mb_params->data_src_size);
464 ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
467 /* Set the drv param */
468 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param);
470 /* Set the drv command along with the sequence number */
471 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
473 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
474 "MFW mailbox: command 0x%08x param 0x%08x\n",
475 (p_mb_params->cmd | seq_num), p_mb_params->param);
478 static void ecore_mcp_cmd_set_blocking(struct ecore_hwfn *p_hwfn,
481 p_hwfn->mcp_info->b_block_cmd = block_cmd;
483 DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n",
484 block_cmd ? "Block" : "Unblock");
487 void ecore_mcp_print_cpu_info(struct ecore_hwfn *p_hwfn,
488 struct ecore_ptt *p_ptt)
490 u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
492 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
493 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
494 cpu_pc_0 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
495 OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
496 cpu_pc_1 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
497 OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
498 cpu_pc_2 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
500 DP_NOTICE(p_hwfn, false,
501 "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
502 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
505 static enum _ecore_status_t
506 _ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
507 struct ecore_mcp_mb_params *p_mb_params,
508 u32 max_retries, u32 delay)
510 struct ecore_mcp_cmd_elem *p_cmd_elem;
513 enum _ecore_status_t rc = ECORE_SUCCESS;
515 /* Wait until the mailbox is non-occupied */
517 /* Exit the loop if there is no pending command, or if the
518 * pending command is completed during this iteration.
519 * The spinlock stays locked until the command is sent.
522 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
524 if (!ecore_mcp_has_pending_cmd(p_hwfn))
527 rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt);
528 if (rc == ECORE_SUCCESS)
530 else if (rc != ECORE_AGAIN)
533 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
535 OSAL_MFW_CMD_PREEMPT(p_hwfn);
536 } while (++cnt < max_retries);
538 if (cnt >= max_retries) {
539 DP_NOTICE(p_hwfn, false,
540 "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
541 p_mb_params->cmd, p_mb_params->param);
545 /* Send the mailbox command */
546 ecore_mcp_reread_offsets(p_hwfn, p_ptt);
547 seq_num = ++p_hwfn->mcp_info->drv_mb_seq;
548 p_cmd_elem = ecore_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num);
554 __ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num);
555 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
557 /* Wait for the MFW response */
559 /* Exit the loop if the command is already completed, or if the
560 * command is completed during this iteration.
561 * The spinlock stays locked until the list element is removed.
565 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
567 if (p_cmd_elem->b_is_completed)
570 rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt);
571 if (rc == ECORE_SUCCESS)
573 else if (rc != ECORE_AGAIN)
576 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
577 OSAL_MFW_CMD_PREEMPT(p_hwfn);
578 } while (++cnt < max_retries);
580 if (cnt >= max_retries) {
581 DP_NOTICE(p_hwfn, false,
582 "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
583 p_mb_params->cmd, p_mb_params->param);
584 ecore_mcp_print_cpu_info(p_hwfn, p_ptt);
586 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
587 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
588 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
590 ecore_mcp_cmd_set_blocking(p_hwfn, true);
591 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
595 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
596 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
598 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
599 "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
600 p_mb_params->mcp_resp, p_mb_params->mcp_param,
601 (cnt * delay) / 1000, (cnt * delay) % 1000);
603 /* Clear the sequence number from the MFW response */
604 p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
606 return ECORE_SUCCESS;
609 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
613 static enum _ecore_status_t
614 ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
615 struct ecore_ptt *p_ptt,
616 struct ecore_mcp_mb_params *p_mb_params)
618 osal_size_t union_data_size = sizeof(union drv_union_data);
619 u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
620 u32 delay = CHIP_MCP_RESP_ITER_US;
623 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
624 delay = EMUL_MCP_RESP_ITER_US;
625 /* There is a built-in delay of 100usec in each MFW response read */
626 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
630 /* MCP not initialized */
631 if (!ecore_mcp_is_init(p_hwfn)) {
632 DP_NOTICE(p_hwfn, true, "MFW is not initialized!\n");
636 if (p_mb_params->data_src_size > union_data_size ||
637 p_mb_params->data_dst_size > union_data_size) {
639 "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
640 p_mb_params->data_src_size, p_mb_params->data_dst_size,
645 if (p_hwfn->mcp_info->b_block_cmd) {
646 DP_NOTICE(p_hwfn, false,
647 "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
648 p_mb_params->cmd, p_mb_params->param);
649 return ECORE_ABORTED;
652 return _ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
656 enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
657 struct ecore_ptt *p_ptt, u32 cmd, u32 param,
658 u32 *o_mcp_resp, u32 *o_mcp_param)
660 struct ecore_mcp_mb_params mb_params;
661 enum _ecore_status_t rc;
664 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
665 if (cmd == DRV_MSG_CODE_UNLOAD_REQ) {
667 loaded_port[p_hwfn->port_id]--;
668 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n",
671 return ECORE_SUCCESS;
675 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
677 mb_params.param = param;
678 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
679 if (rc != ECORE_SUCCESS)
682 *o_mcp_resp = mb_params.mcp_resp;
683 *o_mcp_param = mb_params.mcp_param;
685 return ECORE_SUCCESS;
688 enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
689 struct ecore_ptt *p_ptt,
694 u32 i_txn_size, u32 *i_buf)
696 struct ecore_mcp_mb_params mb_params;
697 enum _ecore_status_t rc;
699 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
701 mb_params.param = param;
702 mb_params.p_data_src = i_buf;
703 mb_params.data_src_size = (u8)i_txn_size;
704 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
705 if (rc != ECORE_SUCCESS)
708 *o_mcp_resp = mb_params.mcp_resp;
709 *o_mcp_param = mb_params.mcp_param;
711 return ECORE_SUCCESS;
714 enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
715 struct ecore_ptt *p_ptt,
720 u32 *o_txn_size, u32 *o_buf)
722 struct ecore_mcp_mb_params mb_params;
723 u8 raw_data[MCP_DRV_NVM_BUF_LEN];
724 enum _ecore_status_t rc;
726 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
728 mb_params.param = param;
729 mb_params.p_data_dst = raw_data;
731 /* Use the maximal value since the actual one is part of the response */
732 mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
734 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
735 if (rc != ECORE_SUCCESS)
738 *o_mcp_resp = mb_params.mcp_resp;
739 *o_mcp_param = mb_params.mcp_param;
741 *o_txn_size = *o_mcp_param;
743 OSAL_MEMCPY(o_buf, raw_data, RTE_MIN(*o_txn_size, MCP_DRV_NVM_BUF_LEN));
745 return ECORE_SUCCESS;
749 static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
752 static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
755 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
756 else if (!loaded_port[p_hwfn->port_id])
757 load_phase = FW_MSG_CODE_DRV_LOAD_PORT;
759 load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
761 /* On CMT, always tell that it's engine */
762 if (ECORE_IS_CMT(p_hwfn->p_dev))
763 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
765 *p_load_code = load_phase;
767 loaded_port[p_hwfn->port_id]++;
769 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
770 "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n",
771 *p_load_code, loaded, p_hwfn->port_id,
772 loaded_port[p_hwfn->port_id]);
777 ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role,
778 enum ecore_override_force_load override_force_load)
780 bool can_force_load = false;
782 switch (override_force_load) {
783 case ECORE_OVERRIDE_FORCE_LOAD_ALWAYS:
784 can_force_load = true;
786 case ECORE_OVERRIDE_FORCE_LOAD_NEVER:
787 can_force_load = false;
790 can_force_load = (drv_role == DRV_ROLE_OS &&
791 exist_drv_role == DRV_ROLE_PREBOOT) ||
792 (drv_role == DRV_ROLE_KDUMP &&
793 exist_drv_role == DRV_ROLE_OS);
797 return can_force_load;
800 static enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn,
801 struct ecore_ptt *p_ptt)
803 u32 resp = 0, param = 0;
804 enum _ecore_status_t rc;
806 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
808 if (rc != ECORE_SUCCESS)
809 DP_NOTICE(p_hwfn, false,
810 "Failed to send cancel load request, rc = %d\n", rc);
815 #define CONFIG_ECORE_L2_BITMAP_IDX (0x1 << 0)
816 #define CONFIG_ECORE_SRIOV_BITMAP_IDX (0x1 << 1)
817 #define CONFIG_ECORE_ROCE_BITMAP_IDX (0x1 << 2)
818 #define CONFIG_ECORE_IWARP_BITMAP_IDX (0x1 << 3)
819 #define CONFIG_ECORE_FCOE_BITMAP_IDX (0x1 << 4)
820 #define CONFIG_ECORE_ISCSI_BITMAP_IDX (0x1 << 5)
821 #define CONFIG_ECORE_LL2_BITMAP_IDX (0x1 << 6)
823 static u32 ecore_get_config_bitmap(void)
825 u32 config_bitmap = 0x0;
827 #ifdef CONFIG_ECORE_L2
828 config_bitmap |= CONFIG_ECORE_L2_BITMAP_IDX;
830 #ifdef CONFIG_ECORE_SRIOV
831 config_bitmap |= CONFIG_ECORE_SRIOV_BITMAP_IDX;
833 #ifdef CONFIG_ECORE_ROCE
834 config_bitmap |= CONFIG_ECORE_ROCE_BITMAP_IDX;
836 #ifdef CONFIG_ECORE_IWARP
837 config_bitmap |= CONFIG_ECORE_IWARP_BITMAP_IDX;
839 #ifdef CONFIG_ECORE_FCOE
840 config_bitmap |= CONFIG_ECORE_FCOE_BITMAP_IDX;
842 #ifdef CONFIG_ECORE_ISCSI
843 config_bitmap |= CONFIG_ECORE_ISCSI_BITMAP_IDX;
845 #ifdef CONFIG_ECORE_LL2
846 config_bitmap |= CONFIG_ECORE_LL2_BITMAP_IDX;
849 return config_bitmap;
852 struct ecore_load_req_in_params {
854 #define ECORE_LOAD_REQ_HSI_VER_DEFAULT 0
855 #define ECORE_LOAD_REQ_HSI_VER_1 1
862 bool avoid_eng_reset;
865 struct ecore_load_req_out_params {
875 static enum _ecore_status_t
876 __ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
877 struct ecore_load_req_in_params *p_in_params,
878 struct ecore_load_req_out_params *p_out_params)
880 struct ecore_mcp_mb_params mb_params;
881 struct load_req_stc load_req;
882 struct load_rsp_stc load_rsp;
884 enum _ecore_status_t rc;
886 OSAL_MEM_ZERO(&load_req, sizeof(load_req));
887 load_req.drv_ver_0 = p_in_params->drv_ver_0;
888 load_req.drv_ver_1 = p_in_params->drv_ver_1;
889 load_req.fw_ver = p_in_params->fw_ver;
890 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role);
891 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
892 p_in_params->timeout_val);
893 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE, p_in_params->force_cmd);
894 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
895 p_in_params->avoid_eng_reset);
897 hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ?
898 DRV_ID_MCP_HSI_VER_CURRENT :
899 (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_OFFSET);
901 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
902 mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
903 mb_params.param = PDA_COMP | hsi_ver | p_hwfn->p_dev->drv_type;
904 mb_params.p_data_src = &load_req;
905 mb_params.data_src_size = sizeof(load_req);
906 mb_params.p_data_dst = &load_rsp;
907 mb_params.data_dst_size = sizeof(load_rsp);
909 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
910 "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
912 GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
913 GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
914 GET_MFW_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
915 GET_MFW_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
917 if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1)
918 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
919 "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
920 load_req.drv_ver_0, load_req.drv_ver_1,
921 load_req.fw_ver, load_req.misc0,
922 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE),
923 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO),
924 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE),
925 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0));
927 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
928 if (rc != ECORE_SUCCESS) {
929 DP_NOTICE(p_hwfn, false,
930 "Failed to send load request, rc = %d\n", rc);
934 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
935 "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
936 p_out_params->load_code = mb_params.mcp_resp;
938 if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
939 p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
940 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
941 "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
942 load_rsp.drv_ver_0, load_rsp.drv_ver_1,
943 load_rsp.fw_ver, load_rsp.misc0,
944 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
945 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
946 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0));
948 p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
949 p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
950 p_out_params->exist_fw_ver = load_rsp.fw_ver;
951 p_out_params->exist_drv_role =
952 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
953 p_out_params->mfw_hsi_ver =
954 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
955 p_out_params->drv_exists =
956 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
957 LOAD_RSP_FLAGS0_DRV_EXISTS;
960 return ECORE_SUCCESS;
963 static void ecore_get_mfw_drv_role(enum ecore_drv_role drv_role,
967 case ECORE_DRV_ROLE_OS:
968 *p_mfw_drv_role = DRV_ROLE_OS;
970 case ECORE_DRV_ROLE_KDUMP:
971 *p_mfw_drv_role = DRV_ROLE_KDUMP;
976 enum ecore_load_req_force {
977 ECORE_LOAD_REQ_FORCE_NONE,
978 ECORE_LOAD_REQ_FORCE_PF,
979 ECORE_LOAD_REQ_FORCE_ALL,
982 static void ecore_get_mfw_force_cmd(enum ecore_load_req_force force_cmd,
986 case ECORE_LOAD_REQ_FORCE_NONE:
987 *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
989 case ECORE_LOAD_REQ_FORCE_PF:
990 *p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
992 case ECORE_LOAD_REQ_FORCE_ALL:
993 *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
998 enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
999 struct ecore_ptt *p_ptt,
1000 struct ecore_load_req_params *p_params)
1002 struct ecore_load_req_out_params out_params;
1003 struct ecore_load_req_in_params in_params;
1004 u8 mfw_drv_role = 0, mfw_force_cmd;
1005 enum _ecore_status_t rc;
1008 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1009 ecore_mcp_mf_workaround(p_hwfn, &p_params->load_code);
1010 return ECORE_SUCCESS;
1014 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
1015 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT;
1016 in_params.drv_ver_0 = ECORE_VERSION;
1017 in_params.drv_ver_1 = ecore_get_config_bitmap();
1018 in_params.fw_ver = STORM_FW_VERSION;
1019 ecore_get_mfw_drv_role(p_params->drv_role, &mfw_drv_role);
1020 in_params.drv_role = mfw_drv_role;
1021 in_params.timeout_val = p_params->timeout_val;
1022 ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_NONE, &mfw_force_cmd);
1023 in_params.force_cmd = mfw_force_cmd;
1024 in_params.avoid_eng_reset = p_params->avoid_eng_reset;
1026 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
1027 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
1028 if (rc != ECORE_SUCCESS)
1031 /* First handle cases where another load request should/might be sent:
1032 * - MFW expects the old interface [HSI version = 1]
1033 * - MFW responds that a force load request is required
1035 if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
1037 "MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n");
1039 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1;
1040 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
1041 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
1043 if (rc != ECORE_SUCCESS)
1045 } else if (out_params.load_code ==
1046 FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
1047 if (ecore_mcp_can_force_load(in_params.drv_role,
1048 out_params.exist_drv_role,
1049 p_params->override_force_load)) {
1051 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, 0x%08x_%08x}, existing={%d, 0x%08x, 0x%08x_%08x}]\n",
1052 in_params.drv_role, in_params.fw_ver,
1053 in_params.drv_ver_0, in_params.drv_ver_1,
1054 out_params.exist_drv_role,
1055 out_params.exist_fw_ver,
1056 out_params.exist_drv_ver_0,
1057 out_params.exist_drv_ver_1);
1059 ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_ALL,
1062 in_params.force_cmd = mfw_force_cmd;
1063 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
1064 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
1066 if (rc != ECORE_SUCCESS)
1069 DP_NOTICE(p_hwfn, false,
1070 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
1071 in_params.drv_role, in_params.fw_ver,
1072 in_params.drv_ver_0, in_params.drv_ver_1,
1073 out_params.exist_drv_role,
1074 out_params.exist_fw_ver,
1075 out_params.exist_drv_ver_0,
1076 out_params.exist_drv_ver_1);
1078 ecore_mcp_cancel_load_req(p_hwfn, p_ptt);
1083 /* Now handle the other types of responses.
1084 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
1085 * expected here after the additional revised load requests were sent.
1087 switch (out_params.load_code) {
1088 case FW_MSG_CODE_DRV_LOAD_ENGINE:
1089 case FW_MSG_CODE_DRV_LOAD_PORT:
1090 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
1091 if (out_params.mfw_hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
1092 out_params.drv_exists) {
1093 /* The role and fw/driver version match, but the PF is
1094 * already loaded and has not been unloaded gracefully.
1095 * This is unexpected since a quasi-FLR request was
1096 * previously sent as part of ecore_hw_prepare().
1098 DP_NOTICE(p_hwfn, false,
1099 "PF is already loaded - shouldn't have got here since a quasi-FLR request was previously sent!\n");
1104 DP_NOTICE(p_hwfn, false,
1105 "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
1106 out_params.load_code);
1110 p_params->load_code = out_params.load_code;
1112 return ECORE_SUCCESS;
1115 enum _ecore_status_t ecore_mcp_load_done(struct ecore_hwfn *p_hwfn,
1116 struct ecore_ptt *p_ptt)
1118 u32 resp = 0, param = 0;
1119 enum _ecore_status_t rc;
1121 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp,
1123 if (rc != ECORE_SUCCESS) {
1124 DP_NOTICE(p_hwfn, false,
1125 "Failed to send a LOAD_DONE command, rc = %d\n", rc);
1129 /* Check if there is a DID mismatch between nvm-cfg/efuse */
1130 if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
1131 DP_NOTICE(p_hwfn, false,
1132 "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
1134 return ECORE_SUCCESS;
1137 enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn,
1138 struct ecore_ptt *p_ptt)
1140 u32 wol_param, mcp_resp, mcp_param;
1143 wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
1145 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param,
1146 &mcp_resp, &mcp_param);
1149 enum _ecore_status_t ecore_mcp_unload_done(struct ecore_hwfn *p_hwfn,
1150 struct ecore_ptt *p_ptt)
1152 struct ecore_mcp_mb_params mb_params;
1153 struct mcp_mac wol_mac;
1155 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1156 mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
1158 return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1161 static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
1162 struct ecore_ptt *p_ptt)
1164 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1166 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1167 u32 path_addr = SECTION_ADDR(mfw_path_offsize,
1168 ECORE_PATH_ID(p_hwfn));
1169 u32 disabled_vfs[VF_MAX_STATIC / 32];
1172 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1173 "Reading Disabled VF information from [offset %08x],"
1174 " path_addr %08x\n",
1175 mfw_path_offsize, path_addr);
1177 for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
1178 disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,
1180 OFFSETOF(struct public_path,
1183 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
1184 "FLR-ed VFs [%08x,...,%08x] - %08x\n",
1185 i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
1188 if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs))
1189 OSAL_VF_FLR_UPDATE(p_hwfn);
1192 enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
1193 struct ecore_ptt *p_ptt,
1196 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1198 u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1199 u32 func_addr = SECTION_ADDR(mfw_func_offsize,
1201 struct ecore_mcp_mb_params mb_params;
1202 enum _ecore_status_t rc;
1205 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1206 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
1207 "Acking VFs [%08x,...,%08x] - %08x\n",
1208 i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
1210 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1211 mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
1212 mb_params.p_data_src = vfs_to_ack;
1213 mb_params.data_src_size = VF_MAX_STATIC / 8;
1214 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt,
1216 if (rc != ECORE_SUCCESS) {
1217 DP_NOTICE(p_hwfn, false,
1218 "Failed to pass ACK for VF flr to MFW\n");
1219 return ECORE_TIMEOUT;
1222 /* TMP - clear the ACK bits; should be done by MFW */
1223 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1224 ecore_wr(p_hwfn, p_ptt,
1226 OFFSETOF(struct public_func, drv_ack_vf_disabled) +
1227 i * sizeof(u32), 0);
1232 static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
1233 struct ecore_ptt *p_ptt)
1235 u32 transceiver_state;
1237 transceiver_state = ecore_rd(p_hwfn, p_ptt,
1238 p_hwfn->mcp_info->port_addr +
1239 OFFSETOF(struct public_port,
1242 DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP),
1243 "Received transceiver state update [0x%08x] from mfw"
1245 transceiver_state, (u32)(p_hwfn->mcp_info->port_addr +
1246 OFFSETOF(struct public_port,
1247 transceiver_data)));
1249 transceiver_state = GET_MFW_FIELD(transceiver_state,
1250 ETH_TRANSCEIVER_STATE);
1252 if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
1253 DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
1255 DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
1257 OSAL_TRANSCEIVER_UPDATE(p_hwfn);
1260 static void ecore_mcp_read_eee_config(struct ecore_hwfn *p_hwfn,
1261 struct ecore_ptt *p_ptt,
1262 struct ecore_mcp_link_state *p_link)
1264 u32 eee_status, val;
1266 p_link->eee_adv_caps = 0;
1267 p_link->eee_lp_adv_caps = 0;
1268 eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1269 OFFSETOF(struct public_port, eee_status));
1270 p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT);
1271 val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET;
1272 if (val & EEE_1G_ADV)
1273 p_link->eee_adv_caps |= ECORE_EEE_1G_ADV;
1274 if (val & EEE_10G_ADV)
1275 p_link->eee_adv_caps |= ECORE_EEE_10G_ADV;
1276 val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET;
1277 if (val & EEE_1G_ADV)
1278 p_link->eee_lp_adv_caps |= ECORE_EEE_1G_ADV;
1279 if (val & EEE_10G_ADV)
1280 p_link->eee_lp_adv_caps |= ECORE_EEE_10G_ADV;
1283 static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
1284 struct ecore_ptt *p_ptt,
1285 struct public_func *p_data,
1288 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1290 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1291 u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1294 OSAL_MEM_ZERO(p_data, sizeof(*p_data));
1296 size = OSAL_MIN_T(u32, sizeof(*p_data),
1297 SECTION_SIZE(mfw_path_offsize));
1298 for (i = 0; i < size / sizeof(u32); i++)
1299 ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
1300 func_addr + (i << 2));
1305 static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
1306 struct ecore_ptt *p_ptt,
1309 struct ecore_mcp_link_state *p_link;
1313 /* Prevent SW/attentions from doing this at the same time */
1314 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->link_lock);
1316 p_link = &p_hwfn->mcp_info->link_output;
1317 OSAL_MEMSET(p_link, 0, sizeof(*p_link));
1319 status = ecore_rd(p_hwfn, p_ptt,
1320 p_hwfn->mcp_info->port_addr +
1321 OFFSETOF(struct public_port, link_status));
1322 DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP),
1323 "Received link update [0x%08x] from mfw"
1325 status, (u32)(p_hwfn->mcp_info->port_addr +
1326 OFFSETOF(struct public_port,
1329 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1330 "Resetting link indications\n");
1334 if (p_hwfn->b_drv_link_init) {
1335 /* Link indication with modern MFW arrives as per-PF
1338 if (p_hwfn->mcp_info->capabilities &
1339 FW_MB_PARAM_FEATURE_SUPPORT_VLINK) {
1340 struct public_func shmem_info;
1342 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1344 p_link->link_up = !!(shmem_info.status &
1345 FUNC_STATUS_VIRTUAL_LINK_UP);
1347 p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1350 p_link->link_up = false;
1353 p_link->full_duplex = true;
1354 switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
1355 case LINK_STATUS_SPEED_AND_DUPLEX_100G:
1356 p_link->speed = 100000;
1358 case LINK_STATUS_SPEED_AND_DUPLEX_50G:
1359 p_link->speed = 50000;
1361 case LINK_STATUS_SPEED_AND_DUPLEX_40G:
1362 p_link->speed = 40000;
1364 case LINK_STATUS_SPEED_AND_DUPLEX_25G:
1365 p_link->speed = 25000;
1367 case LINK_STATUS_SPEED_AND_DUPLEX_20G:
1368 p_link->speed = 20000;
1370 case LINK_STATUS_SPEED_AND_DUPLEX_10G:
1371 p_link->speed = 10000;
1373 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
1374 p_link->full_duplex = false;
1376 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
1377 p_link->speed = 1000;
1383 /* We never store total line speed as p_link->speed is
1384 * again changes according to bandwidth allocation.
1386 if (p_link->link_up && p_link->speed)
1387 p_link->line_speed = p_link->speed;
1389 p_link->line_speed = 0;
1391 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
1392 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
1394 /* Max bandwidth configuration */
1395 __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
1398 /* Min bandwidth configuration */
1399 __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
1401 ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev, p_ptt,
1402 p_link->min_pf_rate);
1404 p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
1405 p_link->an_complete = !!(status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
1406 p_link->parallel_detection = !!(status &
1407 LINK_STATUS_PARALLEL_DETECTION_USED);
1408 p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
1410 p_link->partner_adv_speed |=
1411 (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
1412 ECORE_LINK_PARTNER_SPEED_1G_FD : 0;
1413 p_link->partner_adv_speed |=
1414 (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
1415 ECORE_LINK_PARTNER_SPEED_1G_HD : 0;
1416 p_link->partner_adv_speed |=
1417 (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
1418 ECORE_LINK_PARTNER_SPEED_10G : 0;
1419 p_link->partner_adv_speed |=
1420 (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
1421 ECORE_LINK_PARTNER_SPEED_20G : 0;
1422 p_link->partner_adv_speed |=
1423 (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
1424 ECORE_LINK_PARTNER_SPEED_25G : 0;
1425 p_link->partner_adv_speed |=
1426 (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
1427 ECORE_LINK_PARTNER_SPEED_40G : 0;
1428 p_link->partner_adv_speed |=
1429 (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
1430 ECORE_LINK_PARTNER_SPEED_50G : 0;
1431 p_link->partner_adv_speed |=
1432 (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
1433 ECORE_LINK_PARTNER_SPEED_100G : 0;
1435 p_link->partner_tx_flow_ctrl_en =
1436 !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
1437 p_link->partner_rx_flow_ctrl_en =
1438 !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
1440 switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
1441 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
1442 p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE;
1444 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
1445 p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE;
1447 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
1448 p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE;
1451 p_link->partner_adv_pause = 0;
1454 p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
1456 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
1457 ecore_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
1459 OSAL_LINK_UPDATE(p_hwfn);
1461 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->link_lock);
1464 enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
1465 struct ecore_ptt *p_ptt, bool b_up)
1467 struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
1468 struct ecore_mcp_mb_params mb_params;
1469 struct eth_phy_cfg phy_cfg;
1470 enum _ecore_status_t rc = ECORE_SUCCESS;
1474 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
1475 return ECORE_SUCCESS;
1478 /* Set the shmem configuration according to params */
1479 OSAL_MEM_ZERO(&phy_cfg, sizeof(phy_cfg));
1480 cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
1481 if (!params->speed.autoneg)
1482 phy_cfg.speed = params->speed.forced_speed;
1483 phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
1484 phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
1485 phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1486 phy_cfg.adv_speed = params->speed.advertised_speeds;
1487 phy_cfg.loopback_mode = params->loopback_mode;
1489 /* There are MFWs that share this capability regardless of whether
1490 * this is feasible or not. And given that at the very least adv_caps
1491 * would be set internally by ecore, we want to make sure LFA would
1494 if ((p_hwfn->mcp_info->capabilities &
1495 FW_MB_PARAM_FEATURE_SUPPORT_EEE) &&
1496 params->eee.enable) {
1497 phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
1498 if (params->eee.tx_lpi_enable)
1499 phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
1500 if (params->eee.adv_caps & ECORE_EEE_1G_ADV)
1501 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G;
1502 if (params->eee.adv_caps & ECORE_EEE_10G_ADV)
1503 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G;
1504 phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer <<
1505 EEE_TX_TIMER_USEC_OFFSET) &
1506 EEE_TX_TIMER_USEC_MASK;
1509 p_hwfn->b_drv_link_init = b_up;
1512 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1513 "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x\n",
1514 phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed,
1515 phy_cfg.loopback_mode);
1517 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
1519 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1520 mb_params.cmd = cmd;
1521 mb_params.p_data_src = &phy_cfg;
1522 mb_params.data_src_size = sizeof(phy_cfg);
1523 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1525 /* if mcp fails to respond we must abort */
1526 if (rc != ECORE_SUCCESS) {
1527 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1531 /* Mimic link-change attention, done for several reasons:
1532 * - On reset, there's no guarantee MFW would trigger
1534 * - On initialization, older MFWs might not indicate link change
1535 * during LFA, so we'll never get an UP indication.
1537 ecore_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
1539 return ECORE_SUCCESS;
1542 u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
1543 struct ecore_ptt *p_ptt)
1545 u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
1547 /* TODO - Add support for VFs */
1548 if (IS_VF(p_hwfn->p_dev))
1551 path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1553 path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr);
1554 path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn));
1556 proc_kill_cnt = ecore_rd(p_hwfn, p_ptt,
1558 OFFSETOF(struct public_path, process_kill)) &
1559 PROCESS_KILL_COUNTER_MASK;
1561 return proc_kill_cnt;
1564 static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn,
1565 struct ecore_ptt *p_ptt)
1567 struct ecore_dev *p_dev = p_hwfn->p_dev;
1570 /* Prevent possible attentions/interrupts during the recovery handling
1571 * and till its load phase, during which they will be re-enabled.
1573 ecore_int_igu_disable_int(p_hwfn, p_ptt);
1575 DP_NOTICE(p_hwfn, false, "Received a process kill indication\n");
1577 /* The following operations should be done once, and thus in CMT mode
1578 * are carried out by only the first HW function.
1580 if (p_hwfn != ECORE_LEADING_HWFN(p_dev))
1583 if (p_dev->recov_in_prog) {
1584 DP_NOTICE(p_hwfn, false,
1585 "Ignoring the indication since a recovery"
1586 " process is already in progress\n");
1590 p_dev->recov_in_prog = true;
1592 proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt);
1593 DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt);
1595 OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn);
1598 static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
1599 struct ecore_ptt *p_ptt,
1600 enum MFW_DRV_MSG_TYPE type)
1602 enum ecore_mcp_protocol_type stats_type;
1603 union ecore_mcp_protocol_stats stats;
1604 struct ecore_mcp_mb_params mb_params;
1606 enum _ecore_status_t rc;
1609 case MFW_DRV_MSG_GET_LAN_STATS:
1610 stats_type = ECORE_MCP_LAN_STATS;
1611 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1614 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1615 "Invalid protocol type %d\n", type);
1619 OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats);
1621 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1622 mb_params.cmd = DRV_MSG_CODE_GET_STATS;
1623 mb_params.param = hsi_param;
1624 mb_params.p_data_src = &stats;
1625 mb_params.data_src_size = sizeof(stats);
1626 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1627 if (rc != ECORE_SUCCESS)
1628 DP_ERR(p_hwfn, "Failed to send protocol stats, rc = %d\n", rc);
1631 static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
1632 struct public_func *p_shmem_info)
1634 struct ecore_mcp_function_info *p_info;
1636 p_info = &p_hwfn->mcp_info->func_info;
1638 /* TODO - bandwidth min/max should have valid values of 1-100,
1639 * as well as some indication that the feature is disabled.
1640 * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS
1641 * limit and correct value to min `1' and max `100' if limit isn't in
1644 p_info->bandwidth_min = (p_shmem_info->config &
1645 FUNC_MF_CFG_MIN_BW_MASK) >>
1646 FUNC_MF_CFG_MIN_BW_OFFSET;
1647 if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1649 "bandwidth minimum out of bounds [%02x]. Set to 1\n",
1650 p_info->bandwidth_min);
1651 p_info->bandwidth_min = 1;
1654 p_info->bandwidth_max = (p_shmem_info->config &
1655 FUNC_MF_CFG_MAX_BW_MASK) >>
1656 FUNC_MF_CFG_MAX_BW_OFFSET;
1657 if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1659 "bandwidth maximum out of bounds [%02x]. Set to 100\n",
1660 p_info->bandwidth_max);
1661 p_info->bandwidth_max = 100;
1666 ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1668 struct ecore_mcp_function_info *p_info;
1669 struct public_func shmem_info;
1670 u32 resp = 0, param = 0;
1672 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1674 ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1676 p_info = &p_hwfn->mcp_info->func_info;
1678 ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min);
1680 ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max);
1682 /* Acknowledge the MFW */
1683 ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1687 static void ecore_mcp_update_stag(struct ecore_hwfn *p_hwfn,
1688 struct ecore_ptt *p_ptt)
1690 struct public_func shmem_info;
1691 u32 resp = 0, param = 0;
1693 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1696 p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
1697 FUNC_MF_CFG_OV_STAG_MASK;
1698 p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
1699 if (OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS, &p_hwfn->p_dev->mf_bits)) {
1700 if (p_hwfn->hw_info.ovlan != ECORE_MCP_VLAN_UNSET) {
1701 ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE,
1702 p_hwfn->hw_info.ovlan);
1703 ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 1);
1705 /* Configure DB to add external vlan to EDPM packets */
1706 ecore_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
1707 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID,
1708 p_hwfn->hw_info.ovlan);
1710 ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 0);
1711 ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, 0);
1713 /* Configure DB to add external vlan to EDPM packets */
1714 ecore_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 0);
1715 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID, 0);
1718 ecore_sp_pf_update_stag(p_hwfn);
1721 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "ovlan = %d hw_mode = 0x%x\n",
1722 p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode);
1723 OSAL_HW_INFO_CHANGE(p_hwfn, ECORE_HW_INFO_CHANGE_OVLAN);
1725 /* Acknowledge the MFW */
1726 ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
1730 static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn)
1732 /* A single notification should be sent to upper driver in CMT mode */
1733 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1736 DP_NOTICE(p_hwfn, false,
1737 "Fan failure was detected on the network interface card"
1738 " and it's going to be shut down.\n");
1740 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
1743 struct ecore_mdump_cmd_params {
1752 static enum _ecore_status_t
1753 ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1754 struct ecore_mdump_cmd_params *p_mdump_cmd_params)
1756 struct ecore_mcp_mb_params mb_params;
1757 enum _ecore_status_t rc;
1759 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1760 mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
1761 mb_params.param = p_mdump_cmd_params->cmd;
1762 mb_params.p_data_src = p_mdump_cmd_params->p_data_src;
1763 mb_params.data_src_size = p_mdump_cmd_params->data_src_size;
1764 mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst;
1765 mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size;
1766 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1767 if (rc != ECORE_SUCCESS)
1770 p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
1772 if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
1774 "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n",
1775 p_mdump_cmd_params->cmd);
1777 } else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
1779 "The mdump command is not supported by the MFW\n");
1786 static enum _ecore_status_t ecore_mcp_mdump_ack(struct ecore_hwfn *p_hwfn,
1787 struct ecore_ptt *p_ptt)
1789 struct ecore_mdump_cmd_params mdump_cmd_params;
1791 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1792 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK;
1794 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1797 enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn,
1798 struct ecore_ptt *p_ptt,
1801 struct ecore_mdump_cmd_params mdump_cmd_params;
1803 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1804 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_SET_VALUES;
1805 mdump_cmd_params.p_data_src = &epoch;
1806 mdump_cmd_params.data_src_size = sizeof(epoch);
1808 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1811 enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
1812 struct ecore_ptt *p_ptt)
1814 struct ecore_mdump_cmd_params mdump_cmd_params;
1816 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1817 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_TRIGGER;
1819 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1822 static enum _ecore_status_t
1823 ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1824 struct mdump_config_stc *p_mdump_config)
1826 struct ecore_mdump_cmd_params mdump_cmd_params;
1827 enum _ecore_status_t rc;
1829 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1830 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_CONFIG;
1831 mdump_cmd_params.p_data_dst = p_mdump_config;
1832 mdump_cmd_params.data_dst_size = sizeof(*p_mdump_config);
1834 rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1835 if (rc != ECORE_SUCCESS)
1838 if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1840 "Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n",
1841 mdump_cmd_params.mcp_resp);
1842 rc = ECORE_UNKNOWN_ERROR;
1848 enum _ecore_status_t
1849 ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1850 struct ecore_mdump_info *p_mdump_info)
1852 u32 addr, global_offsize, global_addr;
1853 struct mdump_config_stc mdump_config;
1854 enum _ecore_status_t rc;
1856 OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info));
1858 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1860 global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1861 global_addr = SECTION_ADDR(global_offsize, 0);
1862 p_mdump_info->reason = ecore_rd(p_hwfn, p_ptt,
1864 OFFSETOF(struct public_global,
1867 if (p_mdump_info->reason) {
1868 rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config);
1869 if (rc != ECORE_SUCCESS)
1872 p_mdump_info->version = mdump_config.version;
1873 p_mdump_info->config = mdump_config.config;
1874 p_mdump_info->epoch = mdump_config.epoc;
1875 p_mdump_info->num_of_logs = mdump_config.num_of_logs;
1876 p_mdump_info->valid_logs = mdump_config.valid_logs;
1878 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1879 "MFW mdump info: reason %d, version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n",
1880 p_mdump_info->reason, p_mdump_info->version,
1881 p_mdump_info->config, p_mdump_info->epoch,
1882 p_mdump_info->num_of_logs, p_mdump_info->valid_logs);
1884 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1885 "MFW mdump info: reason %d\n", p_mdump_info->reason);
1888 return ECORE_SUCCESS;
1891 enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
1892 struct ecore_ptt *p_ptt)
1894 struct ecore_mdump_cmd_params mdump_cmd_params;
1896 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1897 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLEAR_LOGS;
1899 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1902 enum _ecore_status_t
1903 ecore_mcp_mdump_get_retain(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1904 struct ecore_mdump_retain_data *p_mdump_retain)
1906 struct ecore_mdump_cmd_params mdump_cmd_params;
1907 struct mdump_retain_data_stc mfw_mdump_retain;
1908 enum _ecore_status_t rc;
1910 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1911 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN;
1912 mdump_cmd_params.p_data_dst = &mfw_mdump_retain;
1913 mdump_cmd_params.data_dst_size = sizeof(mfw_mdump_retain);
1915 rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1916 if (rc != ECORE_SUCCESS)
1919 if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1921 "Failed to get the mdump retained data [mcp_resp 0x%x]\n",
1922 mdump_cmd_params.mcp_resp);
1923 return ECORE_UNKNOWN_ERROR;
1926 p_mdump_retain->valid = mfw_mdump_retain.valid;
1927 p_mdump_retain->epoch = mfw_mdump_retain.epoch;
1928 p_mdump_retain->pf = mfw_mdump_retain.pf;
1929 p_mdump_retain->status = mfw_mdump_retain.status;
1931 return ECORE_SUCCESS;
1934 enum _ecore_status_t ecore_mcp_mdump_clr_retain(struct ecore_hwfn *p_hwfn,
1935 struct ecore_ptt *p_ptt)
1937 struct ecore_mdump_cmd_params mdump_cmd_params;
1939 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1940 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLR_RETAIN;
1942 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1945 static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
1946 struct ecore_ptt *p_ptt)
1948 struct ecore_mdump_retain_data mdump_retain;
1949 enum _ecore_status_t rc;
1951 /* In CMT mode - no need for more than a single acknowledgment to the
1952 * MFW, and no more than a single notification to the upper driver.
1954 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1957 rc = ecore_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain);
1958 if (rc == ECORE_SUCCESS && mdump_retain.valid) {
1959 DP_NOTICE(p_hwfn, false,
1960 "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n",
1961 mdump_retain.epoch, mdump_retain.pf,
1962 mdump_retain.status);
1964 DP_NOTICE(p_hwfn, false,
1965 "The MFW notified that a critical error occurred in the device\n");
1968 if (p_hwfn->p_dev->allow_mdump) {
1969 DP_NOTICE(p_hwfn, false,
1970 "Not acknowledging the notification to allow the MFW crash dump\n");
1974 DP_NOTICE(p_hwfn, false,
1975 "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n");
1976 ecore_mcp_mdump_ack(p_hwfn, p_ptt);
1977 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
1981 ecore_mcp_read_ufp_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1983 struct public_func shmem_info;
1986 if (!OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits))
1989 OSAL_MEMSET(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info));
1990 port_cfg = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1991 OFFSETOF(struct public_port, oem_cfg_port));
1992 val = GET_MFW_FIELD(port_cfg, OEM_CFG_CHANNEL_TYPE);
1993 if (val != OEM_CFG_CHANNEL_TYPE_STAGGED)
1994 DP_NOTICE(p_hwfn, false, "Incorrect UFP Channel type %d\n",
1997 val = GET_MFW_FIELD(port_cfg, OEM_CFG_SCHED_TYPE);
1998 if (val == OEM_CFG_SCHED_TYPE_ETS)
1999 p_hwfn->ufp_info.mode = ECORE_UFP_MODE_ETS;
2000 else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW)
2001 p_hwfn->ufp_info.mode = ECORE_UFP_MODE_VNIC_BW;
2003 DP_NOTICE(p_hwfn, false, "Unknown UFP scheduling mode %d\n",
2006 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
2008 val = GET_MFW_FIELD(shmem_info.oem_cfg_func, OEM_CFG_FUNC_TC);
2009 p_hwfn->ufp_info.tc = (u8)val;
2010 val = GET_MFW_FIELD(shmem_info.oem_cfg_func,
2011 OEM_CFG_FUNC_HOST_PRI_CTRL);
2012 if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC)
2013 p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_VNIC;
2014 else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS)
2015 p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_OS;
2017 DP_NOTICE(p_hwfn, false, "Unknown Host priority control %d\n",
2020 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2021 "UFP shmem config: mode = %d tc = %d pri_type = %d\n",
2022 p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc,
2023 p_hwfn->ufp_info.pri_type);
2026 static enum _ecore_status_t
2027 ecore_mcp_handle_ufp_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
2029 ecore_mcp_read_ufp_config(p_hwfn, p_ptt);
2031 if (p_hwfn->ufp_info.mode == ECORE_UFP_MODE_VNIC_BW) {
2032 p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc;
2033 p_hwfn->hw_info.offload_tc = p_hwfn->ufp_info.tc;
2035 ecore_qm_reconf(p_hwfn, p_ptt);
2037 /* Merge UFP TC with the dcbx TC data */
2038 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
2039 ECORE_DCBX_OPERATIONAL_MIB);
2042 /* update storm FW with negotiation results */
2043 ecore_sp_pf_update_ufp(p_hwfn);
2045 return ECORE_SUCCESS;
2048 enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
2049 struct ecore_ptt *p_ptt)
2051 struct ecore_mcp_info *info = p_hwfn->mcp_info;
2052 enum _ecore_status_t rc = ECORE_SUCCESS;
2056 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n");
2058 /* Read Messages from MFW */
2059 ecore_mcp_read_mb(p_hwfn, p_ptt);
2061 /* Compare current messages to old ones */
2062 for (i = 0; i < info->mfw_mb_length; i++) {
2063 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
2068 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
2069 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
2070 i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
2073 case MFW_DRV_MSG_LINK_CHANGE:
2074 ecore_mcp_handle_link_change(p_hwfn, p_ptt, false);
2076 case MFW_DRV_MSG_VF_DISABLED:
2077 ecore_mcp_handle_vf_flr(p_hwfn, p_ptt);
2079 case MFW_DRV_MSG_LLDP_DATA_UPDATED:
2080 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
2081 ECORE_DCBX_REMOTE_LLDP_MIB);
2083 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
2084 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
2085 ECORE_DCBX_REMOTE_MIB);
2087 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
2088 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
2089 ECORE_DCBX_OPERATIONAL_MIB);
2090 /* clear the user-config cache */
2091 OSAL_MEMSET(&p_hwfn->p_dcbx_info->set, 0,
2092 sizeof(struct ecore_dcbx_set));
2094 case MFW_DRV_MSG_LLDP_RECEIVED_TLVS_UPDATED:
2095 ecore_lldp_mib_update_event(p_hwfn, p_ptt);
2097 case MFW_DRV_MSG_OEM_CFG_UPDATE:
2098 ecore_mcp_handle_ufp_event(p_hwfn, p_ptt);
2100 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
2101 ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
2103 case MFW_DRV_MSG_ERROR_RECOVERY:
2104 ecore_mcp_handle_process_kill(p_hwfn, p_ptt);
2106 case MFW_DRV_MSG_GET_LAN_STATS:
2107 case MFW_DRV_MSG_GET_FCOE_STATS:
2108 case MFW_DRV_MSG_GET_ISCSI_STATS:
2109 case MFW_DRV_MSG_GET_RDMA_STATS:
2110 ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
2112 case MFW_DRV_MSG_BW_UPDATE:
2113 ecore_mcp_update_bw(p_hwfn, p_ptt);
2115 case MFW_DRV_MSG_S_TAG_UPDATE:
2116 ecore_mcp_update_stag(p_hwfn, p_ptt);
2118 case MFW_DRV_MSG_FAILURE_DETECTED:
2119 ecore_mcp_handle_fan_failure(p_hwfn);
2121 case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
2122 ecore_mcp_handle_critical_error(p_hwfn, p_ptt);
2125 DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
2130 /* ACK everything */
2131 for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
2132 OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]);
2134 /* MFW expect answer in BE, so we force write in that format */
2135 ecore_wr(p_hwfn, p_ptt,
2136 info->mfw_mb_addr + sizeof(u32) +
2137 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
2138 sizeof(u32) + i * sizeof(u32), val);
2142 DP_NOTICE(p_hwfn, false,
2143 "Received an MFW message indication but no"
2148 /* Copy the new mfw messages into the shadow */
2149 OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
2154 enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
2155 struct ecore_ptt *p_ptt,
2157 u32 *p_running_bundle_id)
2162 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
2163 DP_NOTICE(p_hwfn, false, "Emulation - can't get MFW version\n");
2164 return ECORE_SUCCESS;
2168 if (IS_VF(p_hwfn->p_dev)) {
2169 if (p_hwfn->vf_iov_info) {
2170 struct pfvf_acquire_resp_tlv *p_resp;
2172 p_resp = &p_hwfn->vf_iov_info->acquire_resp;
2173 *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
2174 return ECORE_SUCCESS;
2176 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2177 "VF requested MFW version prior to ACQUIRE\n");
2182 global_offsize = ecore_rd(p_hwfn, p_ptt,
2183 SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
2187 ecore_rd(p_hwfn, p_ptt,
2188 SECTION_ADDR(global_offsize,
2189 0) + OFFSETOF(struct public_global, mfw_ver));
2191 if (p_running_bundle_id != OSAL_NULL) {
2192 *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt,
2193 SECTION_ADDR(global_offsize,
2195 OFFSETOF(struct public_global,
2196 running_bundle_id));
2199 return ECORE_SUCCESS;
2202 enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,
2203 struct ecore_ptt *p_ptt,
2206 enum _ecore_status_t rc = ECORE_SUCCESS;
2208 /* TODO - Add support for VFs */
2209 if (IS_VF(p_hwfn->p_dev))
2212 if (!ecore_mcp_is_init(p_hwfn)) {
2213 DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
2218 *p_media_type = MEDIA_UNSPECIFIED;
2221 *p_media_type = ecore_rd(p_hwfn, p_ptt,
2222 p_hwfn->mcp_info->port_addr +
2223 OFFSETOF(struct public_port,
2227 return ECORE_SUCCESS;
2230 enum _ecore_status_t ecore_mcp_get_transceiver_data(struct ecore_hwfn *p_hwfn,
2231 struct ecore_ptt *p_ptt,
2232 u32 *p_transceiver_state,
2233 u32 *p_transceiver_type)
2235 u32 transceiver_info;
2236 enum _ecore_status_t rc = ECORE_SUCCESS;
2238 /* TODO - Add support for VFs */
2239 if (IS_VF(p_hwfn->p_dev))
2242 if (!ecore_mcp_is_init(p_hwfn)) {
2243 DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
2247 *p_transceiver_type = ETH_TRANSCEIVER_TYPE_NONE;
2248 *p_transceiver_state = ETH_TRANSCEIVER_STATE_UPDATING;
2250 transceiver_info = ecore_rd(p_hwfn, p_ptt,
2251 p_hwfn->mcp_info->port_addr +
2252 offsetof(struct public_port,
2255 *p_transceiver_state = GET_MFW_FIELD(transceiver_info,
2256 ETH_TRANSCEIVER_STATE);
2258 if (*p_transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT) {
2259 *p_transceiver_type = GET_MFW_FIELD(transceiver_info,
2260 ETH_TRANSCEIVER_TYPE);
2262 *p_transceiver_type = ETH_TRANSCEIVER_TYPE_UNKNOWN;
2268 static int is_transceiver_ready(u32 transceiver_state, u32 transceiver_type)
2270 if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) &&
2271 ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) == 0x0) &&
2272 (transceiver_type != ETH_TRANSCEIVER_TYPE_NONE))
2278 enum _ecore_status_t ecore_mcp_trans_speed_mask(struct ecore_hwfn *p_hwfn,
2279 struct ecore_ptt *p_ptt,
2282 u32 transceiver_type, transceiver_state;
2284 ecore_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state,
2288 if (is_transceiver_ready(transceiver_state, transceiver_type) == 0)
2291 switch (transceiver_type) {
2292 case ETH_TRANSCEIVER_TYPE_1G_LX:
2293 case ETH_TRANSCEIVER_TYPE_1G_SX:
2294 case ETH_TRANSCEIVER_TYPE_1G_PCC:
2295 case ETH_TRANSCEIVER_TYPE_1G_ACC:
2296 case ETH_TRANSCEIVER_TYPE_1000BASET:
2297 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2300 case ETH_TRANSCEIVER_TYPE_10G_SR:
2301 case ETH_TRANSCEIVER_TYPE_10G_LR:
2302 case ETH_TRANSCEIVER_TYPE_10G_LRM:
2303 case ETH_TRANSCEIVER_TYPE_10G_ER:
2304 case ETH_TRANSCEIVER_TYPE_10G_PCC:
2305 case ETH_TRANSCEIVER_TYPE_10G_ACC:
2306 case ETH_TRANSCEIVER_TYPE_4x10G:
2307 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2310 case ETH_TRANSCEIVER_TYPE_40G_LR4:
2311 case ETH_TRANSCEIVER_TYPE_40G_SR4:
2312 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
2313 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
2314 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2315 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2318 case ETH_TRANSCEIVER_TYPE_100G_AOC:
2319 case ETH_TRANSCEIVER_TYPE_100G_SR4:
2320 case ETH_TRANSCEIVER_TYPE_100G_LR4:
2321 case ETH_TRANSCEIVER_TYPE_100G_ER4:
2322 case ETH_TRANSCEIVER_TYPE_100G_ACC:
2324 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2325 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2328 case ETH_TRANSCEIVER_TYPE_25G_SR:
2329 case ETH_TRANSCEIVER_TYPE_25G_LR:
2330 case ETH_TRANSCEIVER_TYPE_25G_AOC:
2331 case ETH_TRANSCEIVER_TYPE_25G_ACC_S:
2332 case ETH_TRANSCEIVER_TYPE_25G_ACC_M:
2333 case ETH_TRANSCEIVER_TYPE_25G_ACC_L:
2334 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2337 case ETH_TRANSCEIVER_TYPE_25G_CA_N:
2338 case ETH_TRANSCEIVER_TYPE_25G_CA_S:
2339 case ETH_TRANSCEIVER_TYPE_25G_CA_L:
2340 case ETH_TRANSCEIVER_TYPE_4x25G_CR:
2341 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2342 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2343 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2346 case ETH_TRANSCEIVER_TYPE_40G_CR4:
2347 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR:
2348 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2349 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2350 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2353 case ETH_TRANSCEIVER_TYPE_100G_CR4:
2354 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
2356 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2357 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G |
2358 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2359 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2360 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G |
2361 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2362 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2365 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
2366 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
2367 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC:
2369 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2370 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2371 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2372 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2375 case ETH_TRANSCEIVER_TYPE_XLPPI:
2376 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
2379 case ETH_TRANSCEIVER_TYPE_10G_BASET:
2380 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2381 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2385 DP_INFO(p_hwfn, "Unknown transcevier type 0x%x\n",
2387 *p_speed_mask = 0xff;
2391 return ECORE_SUCCESS;
2394 enum _ecore_status_t ecore_mcp_get_board_config(struct ecore_hwfn *p_hwfn,
2395 struct ecore_ptt *p_ptt,
2396 u32 *p_board_config)
2398 u32 nvm_cfg_addr, nvm_cfg1_offset, port_cfg_addr;
2399 enum _ecore_status_t rc = ECORE_SUCCESS;
2401 /* TODO - Add support for VFs */
2402 if (IS_VF(p_hwfn->p_dev))
2405 if (!ecore_mcp_is_init(p_hwfn)) {
2406 DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
2410 *p_board_config = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
2413 nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt,
2414 MISC_REG_GEN_PURP_CR0);
2415 nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt,
2417 port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
2418 offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
2419 *p_board_config = ecore_rd(p_hwfn, p_ptt,
2421 offsetof(struct nvm_cfg1_port,
2429 /* Old MFW has a global configuration for all PFs regarding RDMA support */
2431 ecore_mcp_get_shmem_proto_legacy(struct ecore_hwfn *p_hwfn,
2432 enum ecore_pci_personality *p_proto)
2434 *p_proto = ECORE_PCI_ETH;
2436 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2437 "According to Legacy capabilities, L2 personality is %08x\n",
2442 static enum _ecore_status_t
2443 ecore_mcp_get_shmem_proto_mfw(struct ecore_hwfn *p_hwfn,
2444 struct ecore_ptt *p_ptt,
2445 enum ecore_pci_personality *p_proto)
2447 u32 resp = 0, param = 0;
2448 enum _ecore_status_t rc;
2450 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2451 "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
2452 (u32)*p_proto, resp, param);
2453 return ECORE_SUCCESS;
2456 static enum _ecore_status_t
2457 ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn,
2458 struct public_func *p_info,
2459 struct ecore_ptt *p_ptt,
2460 enum ecore_pci_personality *p_proto)
2462 enum _ecore_status_t rc = ECORE_SUCCESS;
2464 switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
2465 case FUNC_MF_CFG_PROTOCOL_ETHERNET:
2466 if (ecore_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto) !=
2468 ecore_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
2477 enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
2478 struct ecore_ptt *p_ptt)
2480 struct ecore_mcp_function_info *info;
2481 struct public_func shmem_info;
2483 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
2484 info = &p_hwfn->mcp_info->func_info;
2486 info->pause_on_host = (shmem_info.config &
2487 FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
2489 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2491 DP_ERR(p_hwfn, "Unknown personality %08x\n",
2492 (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
2496 ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
2498 if (shmem_info.mac_upper || shmem_info.mac_lower) {
2499 info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
2500 info->mac[1] = (u8)(shmem_info.mac_upper);
2501 info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
2502 info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
2503 info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
2504 info->mac[5] = (u8)(shmem_info.mac_lower);
2506 /* TODO - are there protocols for which there's no MAC? */
2507 DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n");
2510 /* TODO - are these calculations true for BE machine? */
2511 info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
2512 (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
2513 info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
2514 (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
2516 info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
2518 info->mtu = (u16)shmem_info.mtu_size;
2523 info->mtu = (u16)shmem_info.mtu_size;
2525 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP),
2526 "Read configuration from shmem: pause_on_host %02x"
2527 " protocol %02x BW [%02x - %02x]"
2528 " MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %lx"
2529 " node %lx ovlan %04x\n",
2530 info->pause_on_host, info->protocol,
2531 info->bandwidth_min, info->bandwidth_max,
2532 info->mac[0], info->mac[1], info->mac[2],
2533 info->mac[3], info->mac[4], info->mac[5],
2534 (unsigned long)info->wwn_port,
2535 (unsigned long)info->wwn_node, info->ovlan);
2537 return ECORE_SUCCESS;
2540 struct ecore_mcp_link_params
2541 *ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn)
2543 if (!p_hwfn || !p_hwfn->mcp_info)
2545 return &p_hwfn->mcp_info->link_input;
2548 struct ecore_mcp_link_state
2549 *ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn)
2551 if (!p_hwfn || !p_hwfn->mcp_info)
2555 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
2556 DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n");
2557 p_hwfn->mcp_info->link_output.link_up = true;
2561 return &p_hwfn->mcp_info->link_output;
2564 struct ecore_mcp_link_capabilities
2565 *ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn)
2567 if (!p_hwfn || !p_hwfn->mcp_info)
2569 return &p_hwfn->mcp_info->link_capabilities;
2572 enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
2573 struct ecore_ptt *p_ptt)
2575 u32 resp = 0, param = 0;
2576 enum _ecore_status_t rc;
2578 rc = ecore_mcp_cmd(p_hwfn, p_ptt,
2579 DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, ¶m);
2581 /* Wait for the drain to complete before returning */
2587 const struct ecore_mcp_function_info
2588 *ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn)
2590 if (!p_hwfn || !p_hwfn->mcp_info)
2592 return &p_hwfn->mcp_info->func_info;
2595 int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
2596 struct ecore_ptt *p_ptt, u32 personalities)
2598 enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT;
2599 struct public_func shmem_info;
2600 int i, count = 0, num_pfs;
2602 num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
2604 for (i = 0; i < num_pfs; i++) {
2605 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
2606 MCP_PF_ID_BY_REL(p_hwfn, i));
2607 if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
2610 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2615 if ((1 << ((u32)protocol)) & personalities)
2622 enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
2623 struct ecore_ptt *p_ptt,
2629 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
2630 DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n");
2635 if (IS_VF(p_hwfn->p_dev))
2638 flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
2639 flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
2640 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
2641 flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_OFFSET));
2643 *p_flash_size = flash_size;
2645 return ECORE_SUCCESS;
2648 enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
2649 struct ecore_ptt *p_ptt)
2651 struct ecore_dev *p_dev = p_hwfn->p_dev;
2653 if (p_dev->recov_in_prog) {
2654 DP_NOTICE(p_hwfn, false,
2655 "Avoid triggering a recovery since such a process"
2656 " is already in progress\n");
2660 DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n");
2661 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
2663 return ECORE_SUCCESS;
2666 static enum _ecore_status_t
2667 ecore_mcp_config_vf_msix_bb(struct ecore_hwfn *p_hwfn,
2668 struct ecore_ptt *p_ptt,
2671 u32 resp = 0, param = 0, rc_param = 0;
2672 enum _ecore_status_t rc;
2674 /* Only Leader can configure MSIX, and need to take CMT into account */
2676 if (!IS_LEAD_HWFN(p_hwfn))
2677 return ECORE_SUCCESS;
2678 num *= p_hwfn->p_dev->num_hwfns;
2680 param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_OFFSET) &
2681 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
2682 param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_OFFSET) &
2683 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
2685 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
2688 if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
2689 DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n",
2693 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2694 "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
2701 static enum _ecore_status_t
2702 ecore_mcp_config_vf_msix_ah(struct ecore_hwfn *p_hwfn,
2703 struct ecore_ptt *p_ptt,
2706 u32 resp = 0, param = num, rc_param = 0;
2707 enum _ecore_status_t rc;
2709 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX,
2710 param, &resp, &rc_param);
2712 if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) {
2713 DP_NOTICE(p_hwfn, true, "MFW failed to set MSI-X for VFs\n");
2716 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2717 "Requested 0x%02x MSI-x interrupts for VFs\n",
2724 enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
2725 struct ecore_ptt *p_ptt,
2728 if (ECORE_IS_BB(p_hwfn->p_dev))
2729 return ecore_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
2731 return ecore_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num);
2734 enum _ecore_status_t
2735 ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2736 struct ecore_mcp_drv_version *p_ver)
2738 struct ecore_mcp_mb_params mb_params;
2739 struct drv_version_stc drv_version;
2743 enum _ecore_status_t rc;
2746 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
2747 return ECORE_SUCCESS;
2750 OSAL_MEM_ZERO(&drv_version, sizeof(drv_version));
2751 drv_version.version = p_ver->version;
2752 num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4;
2753 for (i = 0; i < num_words; i++) {
2754 /* The driver name is expected to be in a big-endian format */
2755 p_name = &p_ver->name[i * sizeof(u32)];
2756 val = OSAL_CPU_TO_BE32(*(u32 *)p_name);
2757 *(u32 *)&drv_version.name[i * sizeof(u32)] = val;
2760 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2761 mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
2762 mb_params.p_data_src = &drv_version;
2763 mb_params.data_src_size = sizeof(drv_version);
2764 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2765 if (rc != ECORE_SUCCESS)
2766 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2771 /* A maximal 100 msec waiting time for the MCP to halt */
2772 #define ECORE_MCP_HALT_SLEEP_MS 10
2773 #define ECORE_MCP_HALT_MAX_RETRIES 10
2775 enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
2776 struct ecore_ptt *p_ptt)
2778 u32 resp = 0, param = 0, cpu_state, cnt = 0;
2779 enum _ecore_status_t rc;
2781 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2783 if (rc != ECORE_SUCCESS) {
2784 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2789 OSAL_MSLEEP(ECORE_MCP_HALT_SLEEP_MS);
2790 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2791 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
2793 } while (++cnt < ECORE_MCP_HALT_MAX_RETRIES);
2795 if (cnt == ECORE_MCP_HALT_MAX_RETRIES) {
2796 DP_NOTICE(p_hwfn, false,
2797 "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2798 ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
2802 ecore_mcp_cmd_set_blocking(p_hwfn, true);
2804 return ECORE_SUCCESS;
2807 #define ECORE_MCP_RESUME_SLEEP_MS 10
2809 enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
2810 struct ecore_ptt *p_ptt)
2812 u32 cpu_mode, cpu_state;
2814 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
2816 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2817 cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2818 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
2820 OSAL_MSLEEP(ECORE_MCP_RESUME_SLEEP_MS);
2821 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2823 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
2824 DP_NOTICE(p_hwfn, false,
2825 "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2826 cpu_mode, cpu_state);
2830 ecore_mcp_cmd_set_blocking(p_hwfn, false);
2832 return ECORE_SUCCESS;
2835 enum _ecore_status_t
2836 ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
2837 struct ecore_ptt *p_ptt,
2838 enum ecore_ov_client client)
2840 u32 resp = 0, param = 0;
2842 enum _ecore_status_t rc;
2845 case ECORE_OV_CLIENT_DRV:
2846 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
2848 case ECORE_OV_CLIENT_USER:
2849 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
2851 case ECORE_OV_CLIENT_VENDOR_SPEC:
2852 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
2855 DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", client);
2859 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
2860 drv_mb_param, &resp, ¶m);
2861 if (rc != ECORE_SUCCESS)
2862 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2867 enum _ecore_status_t
2868 ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
2869 struct ecore_ptt *p_ptt,
2870 enum ecore_ov_driver_state drv_state)
2872 u32 resp = 0, param = 0;
2874 enum _ecore_status_t rc;
2876 switch (drv_state) {
2877 case ECORE_OV_DRIVER_STATE_NOT_LOADED:
2878 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
2880 case ECORE_OV_DRIVER_STATE_DISABLED:
2881 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
2883 case ECORE_OV_DRIVER_STATE_ACTIVE:
2884 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
2887 DP_NOTICE(p_hwfn, true, "Invalid driver state %d\n", drv_state);
2891 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
2892 drv_mb_param, &resp, ¶m);
2893 if (rc != ECORE_SUCCESS)
2894 DP_ERR(p_hwfn, "Failed to send driver state\n");
2899 enum _ecore_status_t
2900 ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2901 struct ecore_fc_npiv_tbl *p_table)
2906 enum _ecore_status_t
2907 ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2910 u32 resp = 0, param = 0, drv_mb_param = 0;
2911 enum _ecore_status_t rc;
2913 SET_MFW_FIELD(drv_mb_param, DRV_MB_PARAM_OV_MTU_SIZE, (u32)mtu);
2914 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU,
2915 drv_mb_param, &resp, ¶m);
2916 if (rc != ECORE_SUCCESS)
2917 DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc);
2922 enum _ecore_status_t
2923 ecore_mcp_ov_update_mac(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2926 struct ecore_mcp_mb_params mb_params;
2927 union drv_union_data union_data;
2928 enum _ecore_status_t rc;
2930 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2931 mb_params.cmd = DRV_MSG_CODE_SET_VMAC;
2932 SET_MFW_FIELD(mb_params.param, DRV_MSG_CODE_VMAC_TYPE,
2933 DRV_MSG_CODE_VMAC_TYPE_MAC);
2934 mb_params.param |= MCP_PF_ID(p_hwfn);
2935 OSAL_MEMCPY(&union_data.raw_data, mac, ETH_ALEN);
2936 mb_params.p_data_src = &union_data;
2937 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2938 if (rc != ECORE_SUCCESS)
2939 DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
2944 enum _ecore_status_t
2945 ecore_mcp_ov_update_eswitch(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2946 enum ecore_ov_eswitch eswitch)
2948 enum _ecore_status_t rc;
2949 u32 resp = 0, param = 0;
2953 case ECORE_OV_ESWITCH_NONE:
2954 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE;
2956 case ECORE_OV_ESWITCH_VEB:
2957 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB;
2959 case ECORE_OV_ESWITCH_VEPA:
2960 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA;
2963 DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch);
2967 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE,
2968 drv_mb_param, &resp, ¶m);
2969 if (rc != ECORE_SUCCESS)
2970 DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc);
2975 enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
2976 struct ecore_ptt *p_ptt,
2977 enum ecore_led_mode mode)
2979 u32 resp = 0, param = 0, drv_mb_param;
2980 enum _ecore_status_t rc;
2983 case ECORE_LED_MODE_ON:
2984 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
2986 case ECORE_LED_MODE_OFF:
2987 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
2989 case ECORE_LED_MODE_RESTORE:
2990 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
2993 DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode);
2997 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
2998 drv_mb_param, &resp, ¶m);
2999 if (rc != ECORE_SUCCESS)
3000 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
3005 enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
3006 struct ecore_ptt *p_ptt,
3009 u32 resp = 0, param = 0;
3010 enum _ecore_status_t rc;
3012 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
3013 mask_parities, &resp, ¶m);
3015 if (rc != ECORE_SUCCESS) {
3017 "MCP response failure for mask parities, aborting\n");
3018 } else if (resp != FW_MSG_CODE_OK) {
3020 "MCP did not ack mask parity request. Old MFW?\n");
3027 enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
3030 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3031 u32 bytes_left, offset, bytes_to_copy, buf_size;
3032 u32 nvm_offset, resp, param;
3033 struct ecore_ptt *p_ptt;
3034 enum _ecore_status_t rc = ECORE_SUCCESS;
3036 p_ptt = ecore_ptt_acquire(p_hwfn);
3042 while (bytes_left > 0) {
3043 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
3044 MCP_DRV_NVM_BUF_LEN);
3045 nvm_offset = (addr + offset) | (bytes_to_copy <<
3046 DRV_MB_PARAM_NVM_LEN_OFFSET);
3047 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3048 DRV_MSG_CODE_NVM_READ_NVRAM,
3049 nvm_offset, &resp, ¶m, &buf_size,
3050 (u32 *)(p_buf + offset));
3051 if (rc != ECORE_SUCCESS) {
3052 DP_NOTICE(p_dev, false,
3053 "ecore_mcp_nvm_rd_cmd() failed, rc = %d\n",
3055 resp = FW_MSG_CODE_ERROR;
3059 if (resp != FW_MSG_CODE_NVM_OK) {
3060 DP_NOTICE(p_dev, false,
3061 "nvm read failed, resp = 0x%08x\n", resp);
3062 rc = ECORE_UNKNOWN_ERROR;
3066 /* This can be a lengthy process, and it's possible scheduler
3067 * isn't preemptible. Sleep a bit to prevent CPU hogging.
3069 if (bytes_left % 0x1000 <
3070 (bytes_left - buf_size) % 0x1000)
3074 bytes_left -= buf_size;
3077 p_dev->mcp_nvm_resp = resp;
3078 ecore_ptt_release(p_hwfn, p_ptt);
3083 enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
3084 u32 addr, u8 *p_buf, u32 *p_len)
3086 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3087 struct ecore_ptt *p_ptt;
3088 u32 resp = 0, param;
3089 enum _ecore_status_t rc;
3091 p_ptt = ecore_ptt_acquire(p_hwfn);
3095 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3096 (cmd == ECORE_PHY_CORE_READ) ?
3097 DRV_MSG_CODE_PHY_CORE_READ :
3098 DRV_MSG_CODE_PHY_RAW_READ,
3099 addr, &resp, ¶m, p_len, (u32 *)p_buf);
3100 if (rc != ECORE_SUCCESS)
3101 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
3103 p_dev->mcp_nvm_resp = resp;
3104 ecore_ptt_release(p_hwfn, p_ptt);
3109 enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
3111 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3112 struct ecore_ptt *p_ptt;
3114 p_ptt = ecore_ptt_acquire(p_hwfn);
3118 OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp));
3119 ecore_ptt_release(p_hwfn, p_ptt);
3121 return ECORE_SUCCESS;
3124 enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr)
3126 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3127 struct ecore_ptt *p_ptt;
3128 u32 resp = 0, param;
3129 enum _ecore_status_t rc;
3131 p_ptt = ecore_ptt_acquire(p_hwfn);
3134 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_DEL_FILE, addr,
3136 p_dev->mcp_nvm_resp = resp;
3137 ecore_ptt_release(p_hwfn, p_ptt);
3142 enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
3145 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3146 struct ecore_ptt *p_ptt;
3147 u32 resp = 0, param;
3148 enum _ecore_status_t rc;
3150 p_ptt = ecore_ptt_acquire(p_hwfn);
3153 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, addr,
3155 p_dev->mcp_nvm_resp = resp;
3156 ecore_ptt_release(p_hwfn, p_ptt);
3161 /* rc receives ECORE_INVAL as default parameter because
3162 * it might not enter the while loop if the len is 0
3164 enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
3165 u32 addr, u8 *p_buf, u32 len)
3167 u32 buf_idx, buf_size, nvm_cmd, nvm_offset, resp, param;
3168 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3169 enum _ecore_status_t rc = ECORE_INVAL;
3170 struct ecore_ptt *p_ptt;
3172 p_ptt = ecore_ptt_acquire(p_hwfn);
3177 case ECORE_PUT_FILE_DATA:
3178 nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
3180 case ECORE_NVM_WRITE_NVRAM:
3181 nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
3183 case ECORE_EXT_PHY_FW_UPGRADE:
3184 nvm_cmd = DRV_MSG_CODE_EXT_PHY_FW_UPGRADE;
3187 DP_NOTICE(p_hwfn, true, "Invalid nvm write command 0x%x\n",
3194 while (buf_idx < len) {
3195 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
3196 MCP_DRV_NVM_BUF_LEN);
3197 nvm_offset = ((buf_size << DRV_MB_PARAM_NVM_LEN_OFFSET) |
3200 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset,
3201 &resp, ¶m, buf_size,
3202 (u32 *)&p_buf[buf_idx]);
3203 if (rc != ECORE_SUCCESS) {
3204 DP_NOTICE(p_dev, false,
3205 "ecore_mcp_nvm_write() failed, rc = %d\n",
3207 resp = FW_MSG_CODE_ERROR;
3211 if (resp != FW_MSG_CODE_OK &&
3212 resp != FW_MSG_CODE_NVM_OK &&
3213 resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK) {
3214 DP_NOTICE(p_dev, false,
3215 "nvm write failed, resp = 0x%08x\n", resp);
3216 rc = ECORE_UNKNOWN_ERROR;
3220 /* This can be a lengthy process, and it's possible scheduler
3221 * isn't preemptible. Sleep a bit to prevent CPU hogging.
3223 if (buf_idx % 0x1000 >
3224 (buf_idx + buf_size) % 0x1000)
3227 buf_idx += buf_size;
3230 p_dev->mcp_nvm_resp = resp;
3232 ecore_ptt_release(p_hwfn, p_ptt);
3237 enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
3238 u32 addr, u8 *p_buf, u32 len)
3240 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3241 u32 resp = 0, param, nvm_cmd;
3242 struct ecore_ptt *p_ptt;
3243 enum _ecore_status_t rc;
3245 p_ptt = ecore_ptt_acquire(p_hwfn);
3249 nvm_cmd = (cmd == ECORE_PHY_CORE_WRITE) ? DRV_MSG_CODE_PHY_CORE_WRITE :
3250 DRV_MSG_CODE_PHY_RAW_WRITE;
3251 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, addr,
3252 &resp, ¶m, len, (u32 *)p_buf);
3253 if (rc != ECORE_SUCCESS)
3254 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
3255 p_dev->mcp_nvm_resp = resp;
3256 ecore_ptt_release(p_hwfn, p_ptt);
3261 enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
3264 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3265 struct ecore_ptt *p_ptt;
3267 enum _ecore_status_t rc;
3269 p_ptt = ecore_ptt_acquire(p_hwfn);
3273 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_SECURE_MODE, addr,
3275 p_dev->mcp_nvm_resp = resp;
3276 ecore_ptt_release(p_hwfn, p_ptt);
3281 enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
3282 struct ecore_ptt *p_ptt,
3283 u32 port, u32 addr, u32 offset,
3286 u32 bytes_left, bytes_to_copy, buf_size, nvm_offset;
3288 enum _ecore_status_t rc;
3290 nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) |
3291 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET);
3295 while (bytes_left > 0) {
3296 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
3297 MAX_I2C_TRANSACTION_SIZE);
3298 nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
3299 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
3300 nvm_offset |= ((addr + offset) <<
3301 DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET);
3302 nvm_offset |= (bytes_to_copy <<
3303 DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET);
3304 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3305 DRV_MSG_CODE_TRANSCEIVER_READ,
3306 nvm_offset, &resp, ¶m, &buf_size,
3307 (u32 *)(p_buf + offset));
3308 if (rc != ECORE_SUCCESS) {
3309 DP_NOTICE(p_hwfn, false,
3310 "Failed to send a transceiver read command to the MFW. rc = %d.\n",
3315 if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
3317 else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
3318 return ECORE_UNKNOWN_ERROR;
3321 bytes_left -= buf_size;
3324 return ECORE_SUCCESS;
3327 enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
3328 struct ecore_ptt *p_ptt,
3329 u32 port, u32 addr, u32 offset,
3332 u32 buf_idx, buf_size, nvm_offset, resp, param;
3333 enum _ecore_status_t rc;
3335 nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) |
3336 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET);
3338 while (buf_idx < len) {
3339 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
3340 MAX_I2C_TRANSACTION_SIZE);
3341 nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
3342 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
3343 nvm_offset |= ((offset + buf_idx) <<
3344 DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET);
3345 nvm_offset |= (buf_size <<
3346 DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET);
3347 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt,
3348 DRV_MSG_CODE_TRANSCEIVER_WRITE,
3349 nvm_offset, &resp, ¶m, buf_size,
3350 (u32 *)&p_buf[buf_idx]);
3351 if (rc != ECORE_SUCCESS) {
3352 DP_NOTICE(p_hwfn, false,
3353 "Failed to send a transceiver write command to the MFW. rc = %d.\n",
3358 if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
3360 else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
3361 return ECORE_UNKNOWN_ERROR;
3363 buf_idx += buf_size;
3366 return ECORE_SUCCESS;
3369 enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
3370 struct ecore_ptt *p_ptt,
3371 u16 gpio, u32 *gpio_val)
3373 enum _ecore_status_t rc = ECORE_SUCCESS;
3374 u32 drv_mb_param = 0, rsp;
3376 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET);
3378 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ,
3379 drv_mb_param, &rsp, gpio_val);
3381 if (rc != ECORE_SUCCESS)
3384 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3385 return ECORE_UNKNOWN_ERROR;
3387 return ECORE_SUCCESS;
3390 enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
3391 struct ecore_ptt *p_ptt,
3392 u16 gpio, u16 gpio_val)
3394 enum _ecore_status_t rc = ECORE_SUCCESS;
3395 u32 drv_mb_param = 0, param, rsp;
3397 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET) |
3398 (gpio_val << DRV_MB_PARAM_GPIO_VALUE_OFFSET);
3400 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE,
3401 drv_mb_param, &rsp, ¶m);
3403 if (rc != ECORE_SUCCESS)
3406 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3407 return ECORE_UNKNOWN_ERROR;
3409 return ECORE_SUCCESS;
3412 enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn,
3413 struct ecore_ptt *p_ptt,
3414 u16 gpio, u32 *gpio_direction,
3417 u32 drv_mb_param = 0, rsp, val = 0;
3418 enum _ecore_status_t rc = ECORE_SUCCESS;
3420 drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET;
3422 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO,
3423 drv_mb_param, &rsp, &val);
3424 if (rc != ECORE_SUCCESS)
3427 *gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >>
3428 DRV_MB_PARAM_GPIO_DIRECTION_OFFSET;
3429 *gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >>
3430 DRV_MB_PARAM_GPIO_CTRL_OFFSET;
3432 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3433 return ECORE_UNKNOWN_ERROR;
3435 return ECORE_SUCCESS;
3438 enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn,
3439 struct ecore_ptt *p_ptt)
3441 u32 drv_mb_param = 0, rsp, param;
3442 enum _ecore_status_t rc = ECORE_SUCCESS;
3444 drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
3445 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3447 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3448 drv_mb_param, &rsp, ¶m);
3450 if (rc != ECORE_SUCCESS)
3453 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3454 (param != DRV_MB_PARAM_BIST_RC_PASSED))
3455 rc = ECORE_UNKNOWN_ERROR;
3460 enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn,
3461 struct ecore_ptt *p_ptt)
3463 u32 drv_mb_param, rsp, param;
3464 enum _ecore_status_t rc = ECORE_SUCCESS;
3466 drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
3467 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3469 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3470 drv_mb_param, &rsp, ¶m);
3472 if (rc != ECORE_SUCCESS)
3475 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3476 (param != DRV_MB_PARAM_BIST_RC_PASSED))
3477 rc = ECORE_UNKNOWN_ERROR;
3482 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images(
3483 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *num_images)
3485 u32 drv_mb_param = 0, rsp;
3486 enum _ecore_status_t rc = ECORE_SUCCESS;
3488 drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
3489 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3491 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3492 drv_mb_param, &rsp, num_images);
3494 if (rc != ECORE_SUCCESS)
3497 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
3498 rc = ECORE_UNKNOWN_ERROR;
3503 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att(
3504 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3505 struct bist_nvm_image_att *p_image_att, u32 image_index)
3507 u32 buf_size, nvm_offset, resp, param;
3508 enum _ecore_status_t rc;
3510 nvm_offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
3511 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3512 nvm_offset |= (image_index <<
3513 DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_OFFSET);
3514 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3515 nvm_offset, &resp, ¶m, &buf_size,
3516 (u32 *)p_image_att);
3517 if (rc != ECORE_SUCCESS)
3520 if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3521 (p_image_att->return_code != 1))
3522 rc = ECORE_UNKNOWN_ERROR;
3527 enum _ecore_status_t
3528 ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn,
3529 struct ecore_ptt *p_ptt,
3530 struct ecore_temperature_info *p_temp_info)
3532 struct ecore_temperature_sensor *p_temp_sensor;
3533 struct temperature_status_stc mfw_temp_info;
3534 struct ecore_mcp_mb_params mb_params;
3536 enum _ecore_status_t rc;
3539 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3540 mb_params.cmd = DRV_MSG_CODE_GET_TEMPERATURE;
3541 mb_params.p_data_dst = &mfw_temp_info;
3542 mb_params.data_dst_size = sizeof(mfw_temp_info);
3543 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3544 if (rc != ECORE_SUCCESS)
3547 OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS != MAX_NUM_OF_SENSORS);
3548 p_temp_info->num_sensors = OSAL_MIN_T(u32, mfw_temp_info.num_of_sensors,
3549 ECORE_MAX_NUM_OF_SENSORS);
3550 for (i = 0; i < p_temp_info->num_sensors; i++) {
3551 val = mfw_temp_info.sensor[i];
3552 p_temp_sensor = &p_temp_info->sensors[i];
3553 p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >>
3554 SENSOR_LOCATION_OFFSET;
3555 p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >>
3556 THRESHOLD_HIGH_OFFSET;
3557 p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >>
3558 CRITICAL_TEMPERATURE_OFFSET;
3559 p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >>
3560 CURRENT_TEMP_OFFSET;
3563 return ECORE_SUCCESS;
3566 enum _ecore_status_t ecore_mcp_get_mba_versions(
3567 struct ecore_hwfn *p_hwfn,
3568 struct ecore_ptt *p_ptt,
3569 struct ecore_mba_vers *p_mba_vers)
3571 u32 buf_size, resp, param;
3572 enum _ecore_status_t rc;
3574 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MBA_VERSION,
3575 0, &resp, ¶m, &buf_size,
3576 &p_mba_vers->mba_vers[0]);
3578 if (rc != ECORE_SUCCESS)
3581 if ((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
3582 rc = ECORE_UNKNOWN_ERROR;
3584 if (buf_size != MCP_DRV_NVM_BUF_LEN)
3585 rc = ECORE_UNKNOWN_ERROR;
3590 enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
3591 struct ecore_ptt *p_ptt,
3596 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MEM_ECC_EVENTS,
3597 0, &rsp, (u32 *)num_events);
3600 static enum resource_id_enum
3601 ecore_mcp_get_mfw_res_id(enum ecore_resources res_id)
3603 enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
3607 mfw_res_id = RESOURCE_NUM_SB_E;
3609 case ECORE_L2_QUEUE:
3610 mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
3613 mfw_res_id = RESOURCE_NUM_VPORT_E;
3616 mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
3619 mfw_res_id = RESOURCE_NUM_PQ_E;
3622 mfw_res_id = RESOURCE_NUM_RL_E;
3626 /* Each VFC resource can accommodate both a MAC and a VLAN */
3627 mfw_res_id = RESOURCE_VFC_FILTER_E;
3630 mfw_res_id = RESOURCE_ILT_E;
3632 case ECORE_LL2_QUEUE:
3633 mfw_res_id = RESOURCE_LL2_QUEUE_E;
3635 case ECORE_RDMA_CNQ_RAM:
3636 case ECORE_CMDQS_CQS:
3637 /* CNQ/CMDQS are the same resource */
3638 mfw_res_id = RESOURCE_CQS_E;
3640 case ECORE_RDMA_STATS_QUEUE:
3641 mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
3644 mfw_res_id = RESOURCE_BDQ_E;
3653 #define ECORE_RESC_ALLOC_VERSION_MAJOR 2
3654 #define ECORE_RESC_ALLOC_VERSION_MINOR 0
3655 #define ECORE_RESC_ALLOC_VERSION \
3656 ((ECORE_RESC_ALLOC_VERSION_MAJOR << \
3657 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_OFFSET) | \
3658 (ECORE_RESC_ALLOC_VERSION_MINOR << \
3659 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_OFFSET))
3661 struct ecore_resc_alloc_in_params {
3663 enum ecore_resources res_id;
3667 struct ecore_resc_alloc_out_params {
3677 #define ECORE_RECOVERY_PROLOG_SLEEP_MS 100
3679 enum _ecore_status_t ecore_recovery_prolog(struct ecore_dev *p_dev)
3681 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3682 struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
3683 enum _ecore_status_t rc;
3685 /* Allow ongoing PCIe transactions to complete */
3686 OSAL_MSLEEP(ECORE_RECOVERY_PROLOG_SLEEP_MS);
3688 /* Clear the PF's internal FID_enable in the PXP */
3689 rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_ptt, false);
3690 if (rc != ECORE_SUCCESS)
3691 DP_NOTICE(p_hwfn, false,
3692 "ecore_pglueb_set_pfid_enable() failed. rc = %d.\n",
3698 static enum _ecore_status_t
3699 ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn,
3700 struct ecore_ptt *p_ptt,
3701 struct ecore_resc_alloc_in_params *p_in_params,
3702 struct ecore_resc_alloc_out_params *p_out_params)
3704 struct ecore_mcp_mb_params mb_params;
3705 struct resource_info mfw_resc_info;
3706 enum _ecore_status_t rc;
3708 OSAL_MEM_ZERO(&mfw_resc_info, sizeof(mfw_resc_info));
3710 mfw_resc_info.res_id = ecore_mcp_get_mfw_res_id(p_in_params->res_id);
3711 if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
3713 "Failed to match resource %d [%s] with the MFW resources\n",
3714 p_in_params->res_id,
3715 ecore_hw_get_resc_name(p_in_params->res_id));
3719 switch (p_in_params->cmd) {
3720 case DRV_MSG_SET_RESOURCE_VALUE_MSG:
3721 mfw_resc_info.size = p_in_params->resc_max_val;
3723 case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
3726 DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
3731 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3732 mb_params.cmd = p_in_params->cmd;
3733 mb_params.param = ECORE_RESC_ALLOC_VERSION;
3734 mb_params.p_data_src = &mfw_resc_info;
3735 mb_params.data_src_size = sizeof(mfw_resc_info);
3736 mb_params.p_data_dst = mb_params.p_data_src;
3737 mb_params.data_dst_size = mb_params.data_src_size;
3739 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3740 "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
3741 p_in_params->cmd, p_in_params->res_id,
3742 ecore_hw_get_resc_name(p_in_params->res_id),
3743 GET_MFW_FIELD(mb_params.param,
3744 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3745 GET_MFW_FIELD(mb_params.param,
3746 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3747 p_in_params->resc_max_val);
3749 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3750 if (rc != ECORE_SUCCESS)
3753 p_out_params->mcp_resp = mb_params.mcp_resp;
3754 p_out_params->mcp_param = mb_params.mcp_param;
3755 p_out_params->resc_num = mfw_resc_info.size;
3756 p_out_params->resc_start = mfw_resc_info.offset;
3757 p_out_params->vf_resc_num = mfw_resc_info.vf_size;
3758 p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
3759 p_out_params->flags = mfw_resc_info.flags;
3761 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3762 "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
3763 GET_MFW_FIELD(p_out_params->mcp_param,
3764 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3765 GET_MFW_FIELD(p_out_params->mcp_param,
3766 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3767 p_out_params->resc_num, p_out_params->resc_start,
3768 p_out_params->vf_resc_num, p_out_params->vf_resc_start,
3769 p_out_params->flags);
3771 return ECORE_SUCCESS;
3774 enum _ecore_status_t
3775 ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3776 enum ecore_resources res_id, u32 resc_max_val,
3779 struct ecore_resc_alloc_out_params out_params;
3780 struct ecore_resc_alloc_in_params in_params;
3781 enum _ecore_status_t rc;
3783 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
3784 in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
3785 in_params.res_id = res_id;
3786 in_params.resc_max_val = resc_max_val;
3787 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
3788 rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3790 if (rc != ECORE_SUCCESS)
3793 *p_mcp_resp = out_params.mcp_resp;
3795 return ECORE_SUCCESS;
3798 enum _ecore_status_t
3799 ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3800 enum ecore_resources res_id, u32 *p_mcp_resp,
3801 u32 *p_resc_num, u32 *p_resc_start)
3803 struct ecore_resc_alloc_out_params out_params;
3804 struct ecore_resc_alloc_in_params in_params;
3805 enum _ecore_status_t rc;
3807 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
3808 in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
3809 in_params.res_id = res_id;
3810 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
3811 rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3813 if (rc != ECORE_SUCCESS)
3816 *p_mcp_resp = out_params.mcp_resp;
3818 if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
3819 *p_resc_num = out_params.resc_num;
3820 *p_resc_start = out_params.resc_start;
3823 return ECORE_SUCCESS;
3826 enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn,
3827 struct ecore_ptt *p_ptt)
3829 u32 mcp_resp, mcp_param;
3831 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
3832 &mcp_resp, &mcp_param);
3835 static enum _ecore_status_t ecore_mcp_resource_cmd(struct ecore_hwfn *p_hwfn,
3836 struct ecore_ptt *p_ptt,
3837 u32 param, u32 *p_mcp_resp,
3840 enum _ecore_status_t rc;
3842 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
3843 p_mcp_resp, p_mcp_param);
3844 if (rc != ECORE_SUCCESS)
3847 if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3849 "The resource command is unsupported by the MFW\n");
3850 return ECORE_NOTIMPL;
3853 if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
3854 u8 opcode = GET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
3856 DP_NOTICE(p_hwfn, false,
3857 "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
3865 enum _ecore_status_t
3866 __ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3867 struct ecore_resc_lock_params *p_params)
3869 u32 param = 0, mcp_resp, mcp_param;
3871 enum _ecore_status_t rc;
3873 switch (p_params->timeout) {
3874 case ECORE_MCP_RESC_LOCK_TO_DEFAULT:
3875 opcode = RESOURCE_OPCODE_REQ;
3876 p_params->timeout = 0;
3878 case ECORE_MCP_RESC_LOCK_TO_NONE:
3879 opcode = RESOURCE_OPCODE_REQ_WO_AGING;
3880 p_params->timeout = 0;
3883 opcode = RESOURCE_OPCODE_REQ_W_AGING;
3887 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3888 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3889 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
3891 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3892 "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
3893 param, p_params->timeout, opcode, p_params->resource);
3895 /* Attempt to acquire the resource */
3896 rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
3898 if (rc != ECORE_SUCCESS)
3901 /* Analyze the response */
3902 p_params->owner = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER);
3903 opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3905 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3906 "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
3907 mcp_param, opcode, p_params->owner);
3910 case RESOURCE_OPCODE_GNT:
3911 p_params->b_granted = true;
3913 case RESOURCE_OPCODE_BUSY:
3914 p_params->b_granted = false;
3917 DP_NOTICE(p_hwfn, false,
3918 "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
3923 return ECORE_SUCCESS;
3926 enum _ecore_status_t
3927 ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3928 struct ecore_resc_lock_params *p_params)
3931 enum _ecore_status_t rc;
3934 /* No need for an interval before the first iteration */
3936 if (p_params->sleep_b4_retry) {
3937 u16 retry_interval_in_ms =
3938 DIV_ROUND_UP(p_params->retry_interval,
3941 OSAL_MSLEEP(retry_interval_in_ms);
3943 OSAL_UDELAY(p_params->retry_interval);
3947 rc = __ecore_mcp_resc_lock(p_hwfn, p_ptt, p_params);
3948 if (rc != ECORE_SUCCESS)
3951 if (p_params->b_granted)
3953 } while (retry_cnt++ < p_params->retry_num);
3955 return ECORE_SUCCESS;
3958 void ecore_mcp_resc_lock_default_init(struct ecore_resc_lock_params *p_lock,
3959 struct ecore_resc_unlock_params *p_unlock,
3960 enum ecore_resc_lock resource,
3961 bool b_is_permanent)
3963 if (p_lock != OSAL_NULL) {
3964 OSAL_MEM_ZERO(p_lock, sizeof(*p_lock));
3966 /* Permanent resources don't require aging, and there's no
3967 * point in trying to acquire them more than once since it's
3968 * unexpected another entity would release them.
3970 if (b_is_permanent) {
3971 p_lock->timeout = ECORE_MCP_RESC_LOCK_TO_NONE;
3973 p_lock->retry_num = ECORE_MCP_RESC_LOCK_RETRY_CNT_DFLT;
3974 p_lock->retry_interval =
3975 ECORE_MCP_RESC_LOCK_RETRY_VAL_DFLT;
3976 p_lock->sleep_b4_retry = true;
3979 p_lock->resource = resource;
3982 if (p_unlock != OSAL_NULL) {
3983 OSAL_MEM_ZERO(p_unlock, sizeof(*p_unlock));
3984 p_unlock->resource = resource;
3988 enum _ecore_status_t
3989 ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3990 struct ecore_resc_unlock_params *p_params)
3992 u32 param = 0, mcp_resp, mcp_param;
3994 enum _ecore_status_t rc;
3996 opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
3997 : RESOURCE_OPCODE_RELEASE;
3998 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3999 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
4001 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
4002 "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
4003 param, opcode, p_params->resource);
4005 /* Attempt to release the resource */
4006 rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
4008 if (rc != ECORE_SUCCESS)
4011 /* Analyze the response */
4012 opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
4014 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
4015 "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
4019 case RESOURCE_OPCODE_RELEASED_PREVIOUS:
4021 "Resource unlock request for an already released resource [%d]\n",
4022 p_params->resource);
4024 case RESOURCE_OPCODE_RELEASED:
4025 p_params->b_released = true;
4027 case RESOURCE_OPCODE_WRONG_OWNER:
4028 p_params->b_released = false;
4031 DP_NOTICE(p_hwfn, false,
4032 "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
4037 return ECORE_SUCCESS;
4040 bool ecore_mcp_is_smart_an_supported(struct ecore_hwfn *p_hwfn)
4042 return !!(p_hwfn->mcp_info->capabilities &
4043 FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ);
4046 enum _ecore_status_t ecore_mcp_get_capabilities(struct ecore_hwfn *p_hwfn,
4047 struct ecore_ptt *p_ptt)
4050 enum _ecore_status_t rc;
4052 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT,
4053 0, &mcp_resp, &p_hwfn->mcp_info->capabilities);
4054 if (rc == ECORE_SUCCESS)
4055 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_PROBE),
4056 "MFW supported features: %08x\n",
4057 p_hwfn->mcp_info->capabilities);
4062 enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn,
4063 struct ecore_ptt *p_ptt)
4065 u32 mcp_resp, mcp_param, features;
4067 features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ |
4068 DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE |
4069 DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK;
4071 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
4072 features, &mcp_resp, &mcp_param);
4075 enum _ecore_status_t
4076 ecore_mcp_drv_attribute(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
4077 struct ecore_mcp_drv_attr *p_drv_attr)
4079 struct attribute_cmd_write_stc attr_cmd_write;
4080 enum _attribute_commands_e mfw_attr_cmd;
4081 struct ecore_mcp_mb_params mb_params;
4082 enum _ecore_status_t rc;
4084 switch (p_drv_attr->attr_cmd) {
4085 case ECORE_MCP_DRV_ATTR_CMD_READ:
4086 mfw_attr_cmd = ATTRIBUTE_CMD_READ;
4088 case ECORE_MCP_DRV_ATTR_CMD_WRITE:
4089 mfw_attr_cmd = ATTRIBUTE_CMD_WRITE;
4091 case ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR:
4092 mfw_attr_cmd = ATTRIBUTE_CMD_READ_CLEAR;
4094 case ECORE_MCP_DRV_ATTR_CMD_CLEAR:
4095 mfw_attr_cmd = ATTRIBUTE_CMD_CLEAR;
4098 DP_NOTICE(p_hwfn, false, "Unknown attribute command %d\n",
4099 p_drv_attr->attr_cmd);
4103 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
4104 mb_params.cmd = DRV_MSG_CODE_ATTRIBUTE;
4105 SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_KEY,
4106 p_drv_attr->attr_num);
4107 SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_CMD,
4109 if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_WRITE) {
4110 OSAL_MEM_ZERO(&attr_cmd_write, sizeof(attr_cmd_write));
4111 attr_cmd_write.val = p_drv_attr->val;
4112 attr_cmd_write.mask = p_drv_attr->mask;
4113 attr_cmd_write.offset = p_drv_attr->offset;
4115 mb_params.p_data_src = &attr_cmd_write;
4116 mb_params.data_src_size = sizeof(attr_cmd_write);
4119 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4120 if (rc != ECORE_SUCCESS)
4123 if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
4125 "The attribute command is not supported by the MFW\n");
4126 return ECORE_NOTIMPL;
4127 } else if (mb_params.mcp_resp != FW_MSG_CODE_OK) {
4129 "Failed to send an attribute command [mcp_resp 0x%x, attr_cmd %d, attr_num %d]\n",
4130 mb_params.mcp_resp, p_drv_attr->attr_cmd,
4131 p_drv_attr->attr_num);
4135 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
4136 "Attribute Command: cmd %d [mfw_cmd %d], num %d, in={val 0x%08x, mask 0x%08x, offset 0x%08x}, out={val 0x%08x}\n",
4137 p_drv_attr->attr_cmd, mfw_attr_cmd, p_drv_attr->attr_num,
4138 p_drv_attr->val, p_drv_attr->mask, p_drv_attr->offset,
4139 mb_params.mcp_param);
4141 if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ ||
4142 p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR)
4143 p_drv_attr->val = mb_params.mcp_param;
4145 return ECORE_SUCCESS;
4148 enum _ecore_status_t ecore_mcp_get_engine_config(struct ecore_hwfn *p_hwfn,
4149 struct ecore_ptt *p_ptt)
4151 struct ecore_dev *p_dev = p_hwfn->p_dev;
4152 struct ecore_mcp_mb_params mb_params;
4153 u8 fir_valid, l2_valid;
4154 enum _ecore_status_t rc;
4156 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
4157 mb_params.cmd = DRV_MSG_CODE_GET_ENGINE_CONFIG;
4158 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4159 if (rc != ECORE_SUCCESS)
4162 if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
4164 "The get_engine_config command is unsupported by the MFW\n");
4165 return ECORE_NOTIMPL;
4168 fir_valid = GET_MFW_FIELD(mb_params.mcp_param,
4169 FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID);
4172 GET_MFW_FIELD(mb_params.mcp_param,
4173 FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE);
4175 l2_valid = GET_MFW_FIELD(mb_params.mcp_param,
4176 FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID);
4178 p_dev->l2_affin_hint =
4179 GET_MFW_FIELD(mb_params.mcp_param,
4180 FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE);
4183 "Engine affinity config: FIR={valid %hhd, value %hhd}, L2_hint={valid %hhd, value %hhd}\n",
4184 fir_valid, p_dev->fir_affin, l2_valid, p_dev->l2_affin_hint);
4186 return ECORE_SUCCESS;
4189 enum _ecore_status_t ecore_mcp_get_ppfid_bitmap(struct ecore_hwfn *p_hwfn,
4190 struct ecore_ptt *p_ptt)
4192 struct ecore_dev *p_dev = p_hwfn->p_dev;
4193 struct ecore_mcp_mb_params mb_params;
4194 enum _ecore_status_t rc;
4196 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
4197 mb_params.cmd = DRV_MSG_CODE_GET_PPFID_BITMAP;
4198 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4199 if (rc != ECORE_SUCCESS)
4202 if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
4204 "The get_ppfid_bitmap command is unsupported by the MFW\n");
4205 return ECORE_NOTIMPL;
4208 p_dev->ppfid_bitmap = GET_MFW_FIELD(mb_params.mcp_param,
4209 FW_MB_PARAM_PPFID_BITMAP);
4211 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "PPFID bitmap 0x%hhx\n",
4212 p_dev->ppfid_bitmap);
4214 return ECORE_SUCCESS;
4217 void ecore_mcp_wol_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
4218 u32 offset, u32 val)
4220 enum _ecore_status_t rc = ECORE_SUCCESS;
4222 struct ecore_mcp_mb_params mb_params;
4224 OSAL_MEMSET(&mb_params, 0, sizeof(struct ecore_mcp_mb_params));
4225 mb_params.cmd = DRV_MSG_CODE_WRITE_WOL_REG;
4226 mb_params.param = offset;
4227 mb_params.p_data_src = &dword;
4228 mb_params.data_src_size = sizeof(dword);
4230 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4231 if (rc != ECORE_SUCCESS) {
4232 DP_NOTICE(p_hwfn, false,
4233 "Failed to wol write request, rc = %d\n", rc);
4236 if (mb_params.mcp_resp != FW_MSG_CODE_WOL_READ_WRITE_OK) {
4237 DP_NOTICE(p_hwfn, false,
4238 "Failed to write value 0x%x to offset 0x%x [mcp_resp 0x%x]\n",
4239 val, offset, mb_params.mcp_resp);
4240 rc = ECORE_UNKNOWN_ERROR;