2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
11 #include "ecore_status.h"
13 #include "ecore_mcp.h"
14 #include "mcp_public.h"
17 #include "ecore_init_fw_funcs.h"
18 #include "ecore_sriov.h"
20 #include "ecore_iov_api.h"
21 #include "ecore_gtt_reg_addr.h"
22 #include "ecore_iro.h"
23 #include "ecore_dcbx.h"
24 #include "ecore_sp_commands.h"
25 #include "ecore_cxt.h"
27 #define CHIP_MCP_RESP_ITER_US 10
28 #define EMUL_MCP_RESP_ITER_US (1000 * 1000)
30 #define ECORE_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
31 #define ECORE_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
33 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
34 ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
37 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
38 ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
40 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
41 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
42 OFFSETOF(struct public_drv_mb, _field), _val)
44 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
45 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
46 OFFSETOF(struct public_drv_mb, _field))
48 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
49 DRV_ID_PDA_COMP_VER_OFFSET)
51 #define MCP_BYTES_PER_MBIT_OFFSET 17
55 static int loaded_port[MAX_NUM_PORTS] = { 0 };
58 bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn)
60 if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
65 void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
67 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
69 u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr);
71 p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
73 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
74 "port_addr = 0x%x, port_id 0x%02x\n",
75 p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
78 void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
80 u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
85 if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev))
89 if (!p_hwfn->mcp_info->public_base)
92 for (i = 0; i < length; i++) {
93 tmp = ecore_rd(p_hwfn, p_ptt,
94 p_hwfn->mcp_info->mfw_mb_addr +
95 (i << 2) + sizeof(u32));
97 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
98 OSAL_BE32_TO_CPU(tmp);
102 struct ecore_mcp_cmd_elem {
103 osal_list_entry_t list;
104 struct ecore_mcp_mb_params *p_mb_params;
105 u16 expected_seq_num;
109 /* Must be called while cmd_lock is acquired */
110 static struct ecore_mcp_cmd_elem *
111 ecore_mcp_cmd_add_elem(struct ecore_hwfn *p_hwfn,
112 struct ecore_mcp_mb_params *p_mb_params,
113 u16 expected_seq_num)
115 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
117 p_cmd_elem = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
118 sizeof(*p_cmd_elem));
120 DP_NOTICE(p_hwfn, false,
121 "Failed to allocate `struct ecore_mcp_cmd_elem'\n");
125 p_cmd_elem->p_mb_params = p_mb_params;
126 p_cmd_elem->expected_seq_num = expected_seq_num;
127 OSAL_LIST_PUSH_HEAD(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
132 /* Must be called while cmd_lock is acquired */
133 static void ecore_mcp_cmd_del_elem(struct ecore_hwfn *p_hwfn,
134 struct ecore_mcp_cmd_elem *p_cmd_elem)
136 OSAL_LIST_REMOVE_ENTRY(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
137 OSAL_FREE(p_hwfn->p_dev, p_cmd_elem);
140 /* Must be called while cmd_lock is acquired */
141 static struct ecore_mcp_cmd_elem *
142 ecore_mcp_cmd_get_elem(struct ecore_hwfn *p_hwfn, u16 seq_num)
144 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
146 OSAL_LIST_FOR_EACH_ENTRY(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list,
147 struct ecore_mcp_cmd_elem) {
148 if (p_cmd_elem->expected_seq_num == seq_num)
155 enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
157 if (p_hwfn->mcp_info) {
158 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL, *p_tmp;
160 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
161 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
163 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
164 OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_cmd_elem, p_tmp,
165 &p_hwfn->mcp_info->cmd_list, list,
166 struct ecore_mcp_cmd_elem) {
167 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
169 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
171 #ifdef CONFIG_ECORE_LOCK_ALLOC
172 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->cmd_lock);
173 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->link_lock);
177 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
179 return ECORE_SUCCESS;
182 static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
183 struct ecore_ptt *p_ptt)
185 struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
186 u32 drv_mb_offsize, mfw_mb_offsize;
187 u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
190 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
191 DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n");
192 p_info->public_base = 0;
197 p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
198 if (!p_info->public_base)
201 p_info->public_base |= GRCBASE_MCP;
203 /* Calculate the driver and MFW mailbox address */
204 drv_mb_offsize = ecore_rd(p_hwfn, p_ptt,
205 SECTION_OFFSIZE_ADDR(p_info->public_base,
207 p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
208 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
209 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x"
210 " mcp_pf_id = 0x%x\n",
211 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
213 /* Set the MFW MB address */
214 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
215 SECTION_OFFSIZE_ADDR(p_info->public_base,
217 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
218 p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
219 p_info->mfw_mb_addr);
221 /* Get the current driver mailbox sequence before sending
224 p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
225 DRV_MSG_SEQ_NUMBER_MASK;
227 /* Get current FW pulse sequence */
228 p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
231 p_info->mcp_hist = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
233 return ECORE_SUCCESS;
236 enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
237 struct ecore_ptt *p_ptt)
239 struct ecore_mcp_info *p_info;
242 /* Allocate mcp_info structure */
243 p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
244 sizeof(*p_hwfn->mcp_info));
245 if (!p_hwfn->mcp_info) {
246 DP_NOTICE(p_hwfn, false, "Failed to allocate mcp_info\n");
249 p_info = p_hwfn->mcp_info;
251 /* Initialize the MFW spinlocks */
252 #ifdef CONFIG_ECORE_LOCK_ALLOC
253 if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock)) {
254 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
257 if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock)) {
258 OSAL_SPIN_LOCK_DEALLOC(&p_info->cmd_lock);
259 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
263 OSAL_SPIN_LOCK_INIT(&p_info->cmd_lock);
264 OSAL_SPIN_LOCK_INIT(&p_info->link_lock);
266 OSAL_LIST_INIT(&p_info->cmd_list);
268 if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
269 DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
270 /* Do not free mcp_info here, since public_base indicate that
271 * the MCP is not initialized
273 return ECORE_SUCCESS;
276 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
277 p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
278 p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
279 if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
282 return ECORE_SUCCESS;
285 DP_NOTICE(p_hwfn, false, "Failed to allocate mcp memory\n");
286 ecore_mcp_free(p_hwfn);
290 static void ecore_mcp_reread_offsets(struct ecore_hwfn *p_hwfn,
291 struct ecore_ptt *p_ptt)
293 u32 generic_por_0 = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
295 /* Use MCP history register to check if MCP reset occurred between init
298 if (p_hwfn->mcp_info->mcp_hist != generic_por_0) {
299 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
300 "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
301 p_hwfn->mcp_info->mcp_hist, generic_por_0);
303 ecore_load_mcp_offsets(p_hwfn, p_ptt);
304 ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
308 enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
309 struct ecore_ptt *p_ptt)
311 u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0;
312 enum _ecore_status_t rc = ECORE_SUCCESS;
315 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
316 delay = EMUL_MCP_RESP_ITER_US;
319 if (p_hwfn->mcp_info->b_block_cmd) {
320 DP_NOTICE(p_hwfn, false,
321 "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
322 return ECORE_ABORTED;
325 /* Ensure that only a single thread is accessing the mailbox */
326 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
328 org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
330 /* Set drv command along with the updated sequence */
331 ecore_mcp_reread_offsets(p_hwfn, p_ptt);
332 seq = ++p_hwfn->mcp_info->drv_mb_seq;
333 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
336 /* Wait for MFW response */
338 /* Give the FW up to 500 second (50*1000*10usec) */
339 } while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt,
340 MISCS_REG_GENERIC_POR_0)) &&
341 (cnt++ < ECORE_MCP_RESET_RETRIES));
343 if (org_mcp_reset_seq !=
344 ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
345 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
346 "MCP was reset after %d usec\n", cnt * delay);
348 DP_ERR(p_hwfn, "Failed to reset MCP\n");
352 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
357 /* Must be called while cmd_lock is acquired */
358 static bool ecore_mcp_has_pending_cmd(struct ecore_hwfn *p_hwfn)
360 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
362 /* There is at most one pending command at a certain time, and if it
363 * exists - it is placed at the HEAD of the list.
365 if (!OSAL_LIST_IS_EMPTY(&p_hwfn->mcp_info->cmd_list)) {
366 p_cmd_elem = OSAL_LIST_FIRST_ENTRY(&p_hwfn->mcp_info->cmd_list,
367 struct ecore_mcp_cmd_elem,
369 return !p_cmd_elem->b_is_completed;
375 /* Must be called while cmd_lock is acquired */
376 static enum _ecore_status_t
377 ecore_mcp_update_pending_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
379 struct ecore_mcp_mb_params *p_mb_params;
380 struct ecore_mcp_cmd_elem *p_cmd_elem;
384 mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
385 seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK);
387 /* Return if no new non-handled response has been received */
388 if (seq_num != p_hwfn->mcp_info->drv_mb_seq)
391 p_cmd_elem = ecore_mcp_cmd_get_elem(p_hwfn, seq_num);
394 "Failed to find a pending mailbox cmd that expects sequence number %d\n",
396 return ECORE_UNKNOWN_ERROR;
399 p_mb_params = p_cmd_elem->p_mb_params;
401 /* Get the MFW response along with the sequence number */
402 p_mb_params->mcp_resp = mcp_resp;
404 /* Get the MFW param */
405 p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
407 /* Get the union data */
408 if (p_mb_params->p_data_dst != OSAL_NULL &&
409 p_mb_params->data_dst_size) {
410 u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
411 OFFSETOF(struct public_drv_mb,
413 ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
414 union_data_addr, p_mb_params->data_dst_size);
417 p_cmd_elem->b_is_completed = true;
419 return ECORE_SUCCESS;
422 /* Must be called while cmd_lock is acquired */
423 static void __ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
424 struct ecore_ptt *p_ptt,
425 struct ecore_mcp_mb_params *p_mb_params,
428 union drv_union_data union_data;
431 /* Set the union data */
432 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
433 OFFSETOF(struct public_drv_mb, union_data);
434 OSAL_MEM_ZERO(&union_data, sizeof(union_data));
435 if (p_mb_params->p_data_src != OSAL_NULL && p_mb_params->data_src_size)
436 OSAL_MEMCPY(&union_data, p_mb_params->p_data_src,
437 p_mb_params->data_src_size);
438 ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
441 /* Set the drv param */
442 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param);
444 /* Set the drv command along with the sequence number */
445 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
447 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
448 "MFW mailbox: command 0x%08x param 0x%08x\n",
449 (p_mb_params->cmd | seq_num), p_mb_params->param);
452 static void ecore_mcp_cmd_set_blocking(struct ecore_hwfn *p_hwfn,
455 p_hwfn->mcp_info->b_block_cmd = block_cmd;
457 DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n",
458 block_cmd ? "Block" : "Unblock");
461 void ecore_mcp_print_cpu_info(struct ecore_hwfn *p_hwfn,
462 struct ecore_ptt *p_ptt)
464 u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
466 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
467 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
468 cpu_pc_0 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
469 OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
470 cpu_pc_1 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
471 OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
472 cpu_pc_2 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
474 DP_NOTICE(p_hwfn, false,
475 "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
476 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
479 static enum _ecore_status_t
480 _ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
481 struct ecore_mcp_mb_params *p_mb_params,
482 u32 max_retries, u32 delay)
484 struct ecore_mcp_cmd_elem *p_cmd_elem;
487 enum _ecore_status_t rc = ECORE_SUCCESS;
489 /* Wait until the mailbox is non-occupied */
491 /* Exit the loop if there is no pending command, or if the
492 * pending command is completed during this iteration.
493 * The spinlock stays locked until the command is sent.
496 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
498 if (!ecore_mcp_has_pending_cmd(p_hwfn))
501 rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt);
502 if (rc == ECORE_SUCCESS)
504 else if (rc != ECORE_AGAIN)
507 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
509 OSAL_MFW_CMD_PREEMPT(p_hwfn);
510 } while (++cnt < max_retries);
512 if (cnt >= max_retries) {
513 DP_NOTICE(p_hwfn, false,
514 "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
515 p_mb_params->cmd, p_mb_params->param);
519 /* Send the mailbox command */
520 ecore_mcp_reread_offsets(p_hwfn, p_ptt);
521 seq_num = ++p_hwfn->mcp_info->drv_mb_seq;
522 p_cmd_elem = ecore_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num);
528 __ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num);
529 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
531 /* Wait for the MFW response */
533 /* Exit the loop if the command is already completed, or if the
534 * command is completed during this iteration.
535 * The spinlock stays locked until the list element is removed.
539 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
541 if (p_cmd_elem->b_is_completed)
544 rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt);
545 if (rc == ECORE_SUCCESS)
547 else if (rc != ECORE_AGAIN)
550 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
551 OSAL_MFW_CMD_PREEMPT(p_hwfn);
552 } while (++cnt < max_retries);
554 if (cnt >= max_retries) {
555 DP_NOTICE(p_hwfn, false,
556 "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
557 p_mb_params->cmd, p_mb_params->param);
558 ecore_mcp_print_cpu_info(p_hwfn, p_ptt);
560 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
561 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
562 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
564 ecore_mcp_cmd_set_blocking(p_hwfn, true);
565 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
569 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
570 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
572 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
573 "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
574 p_mb_params->mcp_resp, p_mb_params->mcp_param,
575 (cnt * delay) / 1000, (cnt * delay) % 1000);
577 /* Clear the sequence number from the MFW response */
578 p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
580 return ECORE_SUCCESS;
583 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
587 static enum _ecore_status_t
588 ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
589 struct ecore_ptt *p_ptt,
590 struct ecore_mcp_mb_params *p_mb_params)
592 osal_size_t union_data_size = sizeof(union drv_union_data);
593 u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
594 u32 delay = CHIP_MCP_RESP_ITER_US;
597 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
598 delay = EMUL_MCP_RESP_ITER_US;
599 /* There is a built-in delay of 100usec in each MFW response read */
600 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
604 /* MCP not initialized */
605 if (!ecore_mcp_is_init(p_hwfn)) {
606 DP_NOTICE(p_hwfn, true, "MFW is not initialized!\n");
610 if (p_mb_params->data_src_size > union_data_size ||
611 p_mb_params->data_dst_size > union_data_size) {
613 "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
614 p_mb_params->data_src_size, p_mb_params->data_dst_size,
619 if (p_hwfn->mcp_info->b_block_cmd) {
620 DP_NOTICE(p_hwfn, false,
621 "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
622 p_mb_params->cmd, p_mb_params->param);
623 return ECORE_ABORTED;
626 return _ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
630 enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
631 struct ecore_ptt *p_ptt, u32 cmd, u32 param,
632 u32 *o_mcp_resp, u32 *o_mcp_param)
634 struct ecore_mcp_mb_params mb_params;
635 enum _ecore_status_t rc;
638 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
639 if (cmd == DRV_MSG_CODE_UNLOAD_REQ) {
641 loaded_port[p_hwfn->port_id]--;
642 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n",
645 return ECORE_SUCCESS;
649 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
651 mb_params.param = param;
652 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
653 if (rc != ECORE_SUCCESS)
656 *o_mcp_resp = mb_params.mcp_resp;
657 *o_mcp_param = mb_params.mcp_param;
659 return ECORE_SUCCESS;
662 enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
663 struct ecore_ptt *p_ptt,
668 u32 i_txn_size, u32 *i_buf)
670 struct ecore_mcp_mb_params mb_params;
671 enum _ecore_status_t rc;
673 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
675 mb_params.param = param;
676 mb_params.p_data_src = i_buf;
677 mb_params.data_src_size = (u8)i_txn_size;
678 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
679 if (rc != ECORE_SUCCESS)
682 *o_mcp_resp = mb_params.mcp_resp;
683 *o_mcp_param = mb_params.mcp_param;
685 return ECORE_SUCCESS;
688 enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
689 struct ecore_ptt *p_ptt,
694 u32 *o_txn_size, u32 *o_buf)
696 struct ecore_mcp_mb_params mb_params;
697 u8 raw_data[MCP_DRV_NVM_BUF_LEN];
698 enum _ecore_status_t rc;
700 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
702 mb_params.param = param;
703 mb_params.p_data_dst = raw_data;
705 /* Use the maximal value since the actual one is part of the response */
706 mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
708 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
709 if (rc != ECORE_SUCCESS)
712 *o_mcp_resp = mb_params.mcp_resp;
713 *o_mcp_param = mb_params.mcp_param;
715 *o_txn_size = *o_mcp_param;
717 OSAL_MEMCPY(o_buf, raw_data, RTE_MIN(*o_txn_size, MCP_DRV_NVM_BUF_LEN));
719 return ECORE_SUCCESS;
723 static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
726 static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
729 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
730 else if (!loaded_port[p_hwfn->port_id])
731 load_phase = FW_MSG_CODE_DRV_LOAD_PORT;
733 load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
735 /* On CMT, always tell that it's engine */
736 if (ECORE_IS_CMT(p_hwfn->p_dev))
737 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
739 *p_load_code = load_phase;
741 loaded_port[p_hwfn->port_id]++;
743 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
744 "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n",
745 *p_load_code, loaded, p_hwfn->port_id,
746 loaded_port[p_hwfn->port_id]);
751 ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role,
752 enum ecore_override_force_load override_force_load)
754 bool can_force_load = false;
756 switch (override_force_load) {
757 case ECORE_OVERRIDE_FORCE_LOAD_ALWAYS:
758 can_force_load = true;
760 case ECORE_OVERRIDE_FORCE_LOAD_NEVER:
761 can_force_load = false;
764 can_force_load = (drv_role == DRV_ROLE_OS &&
765 exist_drv_role == DRV_ROLE_PREBOOT) ||
766 (drv_role == DRV_ROLE_KDUMP &&
767 exist_drv_role == DRV_ROLE_OS);
771 return can_force_load;
774 static enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn,
775 struct ecore_ptt *p_ptt)
777 u32 resp = 0, param = 0;
778 enum _ecore_status_t rc;
780 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
782 if (rc != ECORE_SUCCESS)
783 DP_NOTICE(p_hwfn, false,
784 "Failed to send cancel load request, rc = %d\n", rc);
789 #define CONFIG_ECORE_L2_BITMAP_IDX (0x1 << 0)
790 #define CONFIG_ECORE_SRIOV_BITMAP_IDX (0x1 << 1)
791 #define CONFIG_ECORE_ROCE_BITMAP_IDX (0x1 << 2)
792 #define CONFIG_ECORE_IWARP_BITMAP_IDX (0x1 << 3)
793 #define CONFIG_ECORE_FCOE_BITMAP_IDX (0x1 << 4)
794 #define CONFIG_ECORE_ISCSI_BITMAP_IDX (0x1 << 5)
795 #define CONFIG_ECORE_LL2_BITMAP_IDX (0x1 << 6)
797 static u32 ecore_get_config_bitmap(void)
799 u32 config_bitmap = 0x0;
801 #ifdef CONFIG_ECORE_L2
802 config_bitmap |= CONFIG_ECORE_L2_BITMAP_IDX;
804 #ifdef CONFIG_ECORE_SRIOV
805 config_bitmap |= CONFIG_ECORE_SRIOV_BITMAP_IDX;
807 #ifdef CONFIG_ECORE_ROCE
808 config_bitmap |= CONFIG_ECORE_ROCE_BITMAP_IDX;
810 #ifdef CONFIG_ECORE_IWARP
811 config_bitmap |= CONFIG_ECORE_IWARP_BITMAP_IDX;
813 #ifdef CONFIG_ECORE_FCOE
814 config_bitmap |= CONFIG_ECORE_FCOE_BITMAP_IDX;
816 #ifdef CONFIG_ECORE_ISCSI
817 config_bitmap |= CONFIG_ECORE_ISCSI_BITMAP_IDX;
819 #ifdef CONFIG_ECORE_LL2
820 config_bitmap |= CONFIG_ECORE_LL2_BITMAP_IDX;
823 return config_bitmap;
826 struct ecore_load_req_in_params {
828 #define ECORE_LOAD_REQ_HSI_VER_DEFAULT 0
829 #define ECORE_LOAD_REQ_HSI_VER_1 1
836 bool avoid_eng_reset;
839 struct ecore_load_req_out_params {
849 static enum _ecore_status_t
850 __ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
851 struct ecore_load_req_in_params *p_in_params,
852 struct ecore_load_req_out_params *p_out_params)
854 struct ecore_mcp_mb_params mb_params;
855 struct load_req_stc load_req;
856 struct load_rsp_stc load_rsp;
858 enum _ecore_status_t rc;
860 OSAL_MEM_ZERO(&load_req, sizeof(load_req));
861 load_req.drv_ver_0 = p_in_params->drv_ver_0;
862 load_req.drv_ver_1 = p_in_params->drv_ver_1;
863 load_req.fw_ver = p_in_params->fw_ver;
864 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role);
865 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
866 p_in_params->timeout_val);
867 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE, p_in_params->force_cmd);
868 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
869 p_in_params->avoid_eng_reset);
871 hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ?
872 DRV_ID_MCP_HSI_VER_CURRENT :
873 (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_OFFSET);
875 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
876 mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
877 mb_params.param = PDA_COMP | hsi_ver | p_hwfn->p_dev->drv_type;
878 mb_params.p_data_src = &load_req;
879 mb_params.data_src_size = sizeof(load_req);
880 mb_params.p_data_dst = &load_rsp;
881 mb_params.data_dst_size = sizeof(load_rsp);
883 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
884 "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
886 GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
887 GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
888 GET_MFW_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
889 GET_MFW_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
891 if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1)
892 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
893 "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
894 load_req.drv_ver_0, load_req.drv_ver_1,
895 load_req.fw_ver, load_req.misc0,
896 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE),
897 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO),
898 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE),
899 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0));
901 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
902 if (rc != ECORE_SUCCESS) {
903 DP_NOTICE(p_hwfn, false,
904 "Failed to send load request, rc = %d\n", rc);
908 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
909 "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
910 p_out_params->load_code = mb_params.mcp_resp;
912 if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
913 p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
914 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
915 "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
916 load_rsp.drv_ver_0, load_rsp.drv_ver_1,
917 load_rsp.fw_ver, load_rsp.misc0,
918 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
919 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
920 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0));
922 p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
923 p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
924 p_out_params->exist_fw_ver = load_rsp.fw_ver;
925 p_out_params->exist_drv_role =
926 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
927 p_out_params->mfw_hsi_ver =
928 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
929 p_out_params->drv_exists =
930 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
931 LOAD_RSP_FLAGS0_DRV_EXISTS;
934 return ECORE_SUCCESS;
937 static void ecore_get_mfw_drv_role(enum ecore_drv_role drv_role,
941 case ECORE_DRV_ROLE_OS:
942 *p_mfw_drv_role = DRV_ROLE_OS;
944 case ECORE_DRV_ROLE_KDUMP:
945 *p_mfw_drv_role = DRV_ROLE_KDUMP;
950 enum ecore_load_req_force {
951 ECORE_LOAD_REQ_FORCE_NONE,
952 ECORE_LOAD_REQ_FORCE_PF,
953 ECORE_LOAD_REQ_FORCE_ALL,
956 static void ecore_get_mfw_force_cmd(enum ecore_load_req_force force_cmd,
960 case ECORE_LOAD_REQ_FORCE_NONE:
961 *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
963 case ECORE_LOAD_REQ_FORCE_PF:
964 *p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
966 case ECORE_LOAD_REQ_FORCE_ALL:
967 *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
972 enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
973 struct ecore_ptt *p_ptt,
974 struct ecore_load_req_params *p_params)
976 struct ecore_load_req_out_params out_params;
977 struct ecore_load_req_in_params in_params;
978 u8 mfw_drv_role = 0, mfw_force_cmd;
979 enum _ecore_status_t rc;
982 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
983 ecore_mcp_mf_workaround(p_hwfn, &p_params->load_code);
984 return ECORE_SUCCESS;
988 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
989 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT;
990 in_params.drv_ver_0 = ECORE_VERSION;
991 in_params.drv_ver_1 = ecore_get_config_bitmap();
992 in_params.fw_ver = STORM_FW_VERSION;
993 ecore_get_mfw_drv_role(p_params->drv_role, &mfw_drv_role);
994 in_params.drv_role = mfw_drv_role;
995 in_params.timeout_val = p_params->timeout_val;
996 ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_NONE, &mfw_force_cmd);
997 in_params.force_cmd = mfw_force_cmd;
998 in_params.avoid_eng_reset = p_params->avoid_eng_reset;
1000 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
1001 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
1002 if (rc != ECORE_SUCCESS)
1005 /* First handle cases where another load request should/might be sent:
1006 * - MFW expects the old interface [HSI version = 1]
1007 * - MFW responds that a force load request is required
1009 if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
1011 "MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n");
1013 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1;
1014 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
1015 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
1017 if (rc != ECORE_SUCCESS)
1019 } else if (out_params.load_code ==
1020 FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
1021 if (ecore_mcp_can_force_load(in_params.drv_role,
1022 out_params.exist_drv_role,
1023 p_params->override_force_load)) {
1025 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, 0x%08x_%08x}, existing={%d, 0x%08x, 0x%08x_%08x}]\n",
1026 in_params.drv_role, in_params.fw_ver,
1027 in_params.drv_ver_0, in_params.drv_ver_1,
1028 out_params.exist_drv_role,
1029 out_params.exist_fw_ver,
1030 out_params.exist_drv_ver_0,
1031 out_params.exist_drv_ver_1);
1033 ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_ALL,
1036 in_params.force_cmd = mfw_force_cmd;
1037 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
1038 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
1040 if (rc != ECORE_SUCCESS)
1043 DP_NOTICE(p_hwfn, false,
1044 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
1045 in_params.drv_role, in_params.fw_ver,
1046 in_params.drv_ver_0, in_params.drv_ver_1,
1047 out_params.exist_drv_role,
1048 out_params.exist_fw_ver,
1049 out_params.exist_drv_ver_0,
1050 out_params.exist_drv_ver_1);
1052 ecore_mcp_cancel_load_req(p_hwfn, p_ptt);
1057 /* Now handle the other types of responses.
1058 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
1059 * expected here after the additional revised load requests were sent.
1061 switch (out_params.load_code) {
1062 case FW_MSG_CODE_DRV_LOAD_ENGINE:
1063 case FW_MSG_CODE_DRV_LOAD_PORT:
1064 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
1065 if (out_params.mfw_hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
1066 out_params.drv_exists) {
1067 /* The role and fw/driver version match, but the PF is
1068 * already loaded and has not been unloaded gracefully.
1069 * This is unexpected since a quasi-FLR request was
1070 * previously sent as part of ecore_hw_prepare().
1072 DP_NOTICE(p_hwfn, false,
1073 "PF is already loaded - shouldn't have got here since a quasi-FLR request was previously sent!\n");
1078 DP_NOTICE(p_hwfn, false,
1079 "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
1080 out_params.load_code);
1084 p_params->load_code = out_params.load_code;
1086 return ECORE_SUCCESS;
1089 enum _ecore_status_t ecore_mcp_load_done(struct ecore_hwfn *p_hwfn,
1090 struct ecore_ptt *p_ptt)
1092 u32 resp = 0, param = 0;
1093 enum _ecore_status_t rc;
1095 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp,
1097 if (rc != ECORE_SUCCESS) {
1098 DP_NOTICE(p_hwfn, false,
1099 "Failed to send a LOAD_DONE command, rc = %d\n", rc);
1103 /* Check if there is a DID mismatch between nvm-cfg/efuse */
1104 if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
1105 DP_NOTICE(p_hwfn, false,
1106 "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
1108 return ECORE_SUCCESS;
1111 enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn,
1112 struct ecore_ptt *p_ptt)
1114 u32 wol_param, mcp_resp, mcp_param;
1117 wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
1119 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param,
1120 &mcp_resp, &mcp_param);
1123 enum _ecore_status_t ecore_mcp_unload_done(struct ecore_hwfn *p_hwfn,
1124 struct ecore_ptt *p_ptt)
1126 struct ecore_mcp_mb_params mb_params;
1127 struct mcp_mac wol_mac;
1129 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1130 mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
1132 return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1135 static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
1136 struct ecore_ptt *p_ptt)
1138 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1140 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1141 u32 path_addr = SECTION_ADDR(mfw_path_offsize,
1142 ECORE_PATH_ID(p_hwfn));
1143 u32 disabled_vfs[VF_MAX_STATIC / 32];
1146 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1147 "Reading Disabled VF information from [offset %08x],"
1148 " path_addr %08x\n",
1149 mfw_path_offsize, path_addr);
1151 for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
1152 disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,
1154 OFFSETOF(struct public_path,
1157 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
1158 "FLR-ed VFs [%08x,...,%08x] - %08x\n",
1159 i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
1162 if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs))
1163 OSAL_VF_FLR_UPDATE(p_hwfn);
1166 enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
1167 struct ecore_ptt *p_ptt,
1170 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1172 u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1173 u32 func_addr = SECTION_ADDR(mfw_func_offsize,
1175 struct ecore_mcp_mb_params mb_params;
1176 enum _ecore_status_t rc;
1179 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1180 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
1181 "Acking VFs [%08x,...,%08x] - %08x\n",
1182 i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
1184 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1185 mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
1186 mb_params.p_data_src = vfs_to_ack;
1187 mb_params.data_src_size = VF_MAX_STATIC / 8;
1188 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt,
1190 if (rc != ECORE_SUCCESS) {
1191 DP_NOTICE(p_hwfn, false,
1192 "Failed to pass ACK for VF flr to MFW\n");
1193 return ECORE_TIMEOUT;
1196 /* TMP - clear the ACK bits; should be done by MFW */
1197 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1198 ecore_wr(p_hwfn, p_ptt,
1200 OFFSETOF(struct public_func, drv_ack_vf_disabled) +
1201 i * sizeof(u32), 0);
1206 static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
1207 struct ecore_ptt *p_ptt)
1209 u32 transceiver_state;
1211 transceiver_state = ecore_rd(p_hwfn, p_ptt,
1212 p_hwfn->mcp_info->port_addr +
1213 OFFSETOF(struct public_port,
1216 DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP),
1217 "Received transceiver state update [0x%08x] from mfw"
1219 transceiver_state, (u32)(p_hwfn->mcp_info->port_addr +
1220 OFFSETOF(struct public_port,
1221 transceiver_data)));
1223 transceiver_state = GET_MFW_FIELD(transceiver_state,
1224 ETH_TRANSCEIVER_STATE);
1226 if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
1227 DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
1229 DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
1231 OSAL_TRANSCEIVER_UPDATE(p_hwfn);
1234 static void ecore_mcp_read_eee_config(struct ecore_hwfn *p_hwfn,
1235 struct ecore_ptt *p_ptt,
1236 struct ecore_mcp_link_state *p_link)
1238 u32 eee_status, val;
1240 p_link->eee_adv_caps = 0;
1241 p_link->eee_lp_adv_caps = 0;
1242 eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1243 OFFSETOF(struct public_port, eee_status));
1244 p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT);
1245 val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET;
1246 if (val & EEE_1G_ADV)
1247 p_link->eee_adv_caps |= ECORE_EEE_1G_ADV;
1248 if (val & EEE_10G_ADV)
1249 p_link->eee_adv_caps |= ECORE_EEE_10G_ADV;
1250 val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET;
1251 if (val & EEE_1G_ADV)
1252 p_link->eee_lp_adv_caps |= ECORE_EEE_1G_ADV;
1253 if (val & EEE_10G_ADV)
1254 p_link->eee_lp_adv_caps |= ECORE_EEE_10G_ADV;
1257 static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
1258 struct ecore_ptt *p_ptt,
1259 struct public_func *p_data,
1262 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1264 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1265 u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1268 OSAL_MEM_ZERO(p_data, sizeof(*p_data));
1270 size = OSAL_MIN_T(u32, sizeof(*p_data),
1271 SECTION_SIZE(mfw_path_offsize));
1272 for (i = 0; i < size / sizeof(u32); i++)
1273 ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
1274 func_addr + (i << 2));
1279 static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
1280 struct ecore_ptt *p_ptt,
1283 struct ecore_mcp_link_state *p_link;
1287 /* Prevent SW/attentions from doing this at the same time */
1288 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->link_lock);
1290 p_link = &p_hwfn->mcp_info->link_output;
1291 OSAL_MEMSET(p_link, 0, sizeof(*p_link));
1293 status = ecore_rd(p_hwfn, p_ptt,
1294 p_hwfn->mcp_info->port_addr +
1295 OFFSETOF(struct public_port, link_status));
1296 DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP),
1297 "Received link update [0x%08x] from mfw"
1299 status, (u32)(p_hwfn->mcp_info->port_addr +
1300 OFFSETOF(struct public_port,
1303 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1304 "Resetting link indications\n");
1308 if (p_hwfn->b_drv_link_init) {
1309 /* Link indication with modern MFW arrives as per-PF
1312 if (p_hwfn->mcp_info->capabilities &
1313 FW_MB_PARAM_FEATURE_SUPPORT_VLINK) {
1314 struct public_func shmem_info;
1316 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1318 p_link->link_up = !!(shmem_info.status &
1319 FUNC_STATUS_VIRTUAL_LINK_UP);
1321 p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1324 p_link->link_up = false;
1327 p_link->full_duplex = true;
1328 switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
1329 case LINK_STATUS_SPEED_AND_DUPLEX_100G:
1330 p_link->speed = 100000;
1332 case LINK_STATUS_SPEED_AND_DUPLEX_50G:
1333 p_link->speed = 50000;
1335 case LINK_STATUS_SPEED_AND_DUPLEX_40G:
1336 p_link->speed = 40000;
1338 case LINK_STATUS_SPEED_AND_DUPLEX_25G:
1339 p_link->speed = 25000;
1341 case LINK_STATUS_SPEED_AND_DUPLEX_20G:
1342 p_link->speed = 20000;
1344 case LINK_STATUS_SPEED_AND_DUPLEX_10G:
1345 p_link->speed = 10000;
1347 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
1348 p_link->full_duplex = false;
1350 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
1351 p_link->speed = 1000;
1357 /* We never store total line speed as p_link->speed is
1358 * again changes according to bandwidth allocation.
1360 if (p_link->link_up && p_link->speed)
1361 p_link->line_speed = p_link->speed;
1363 p_link->line_speed = 0;
1365 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
1366 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
1368 /* Max bandwidth configuration */
1369 __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
1372 /* Min bandwidth configuration */
1373 __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
1375 ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev, p_ptt,
1376 p_link->min_pf_rate);
1378 p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
1379 p_link->an_complete = !!(status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
1380 p_link->parallel_detection = !!(status &
1381 LINK_STATUS_PARALLEL_DETECTION_USED);
1382 p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
1384 p_link->partner_adv_speed |=
1385 (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
1386 ECORE_LINK_PARTNER_SPEED_1G_FD : 0;
1387 p_link->partner_adv_speed |=
1388 (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
1389 ECORE_LINK_PARTNER_SPEED_1G_HD : 0;
1390 p_link->partner_adv_speed |=
1391 (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
1392 ECORE_LINK_PARTNER_SPEED_10G : 0;
1393 p_link->partner_adv_speed |=
1394 (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
1395 ECORE_LINK_PARTNER_SPEED_20G : 0;
1396 p_link->partner_adv_speed |=
1397 (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
1398 ECORE_LINK_PARTNER_SPEED_25G : 0;
1399 p_link->partner_adv_speed |=
1400 (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
1401 ECORE_LINK_PARTNER_SPEED_40G : 0;
1402 p_link->partner_adv_speed |=
1403 (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
1404 ECORE_LINK_PARTNER_SPEED_50G : 0;
1405 p_link->partner_adv_speed |=
1406 (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
1407 ECORE_LINK_PARTNER_SPEED_100G : 0;
1409 p_link->partner_tx_flow_ctrl_en =
1410 !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
1411 p_link->partner_rx_flow_ctrl_en =
1412 !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
1414 switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
1415 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
1416 p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE;
1418 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
1419 p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE;
1421 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
1422 p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE;
1425 p_link->partner_adv_pause = 0;
1428 p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
1430 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
1431 ecore_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
1433 OSAL_LINK_UPDATE(p_hwfn);
1435 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->link_lock);
1438 enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
1439 struct ecore_ptt *p_ptt, bool b_up)
1441 struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
1442 struct ecore_mcp_mb_params mb_params;
1443 struct eth_phy_cfg phy_cfg;
1444 enum _ecore_status_t rc = ECORE_SUCCESS;
1448 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
1449 return ECORE_SUCCESS;
1452 /* Set the shmem configuration according to params */
1453 OSAL_MEM_ZERO(&phy_cfg, sizeof(phy_cfg));
1454 cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
1455 if (!params->speed.autoneg)
1456 phy_cfg.speed = params->speed.forced_speed;
1457 phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
1458 phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
1459 phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1460 phy_cfg.adv_speed = params->speed.advertised_speeds;
1461 phy_cfg.loopback_mode = params->loopback_mode;
1463 /* There are MFWs that share this capability regardless of whether
1464 * this is feasible or not. And given that at the very least adv_caps
1465 * would be set internally by ecore, we want to make sure LFA would
1468 if ((p_hwfn->mcp_info->capabilities &
1469 FW_MB_PARAM_FEATURE_SUPPORT_EEE) &&
1470 params->eee.enable) {
1471 phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
1472 if (params->eee.tx_lpi_enable)
1473 phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
1474 if (params->eee.adv_caps & ECORE_EEE_1G_ADV)
1475 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G;
1476 if (params->eee.adv_caps & ECORE_EEE_10G_ADV)
1477 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G;
1478 phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer <<
1479 EEE_TX_TIMER_USEC_OFFSET) &
1480 EEE_TX_TIMER_USEC_MASK;
1483 p_hwfn->b_drv_link_init = b_up;
1486 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1487 "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x\n",
1488 phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed,
1489 phy_cfg.loopback_mode);
1491 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
1493 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1494 mb_params.cmd = cmd;
1495 mb_params.p_data_src = &phy_cfg;
1496 mb_params.data_src_size = sizeof(phy_cfg);
1497 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1499 /* if mcp fails to respond we must abort */
1500 if (rc != ECORE_SUCCESS) {
1501 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1505 /* Mimic link-change attention, done for several reasons:
1506 * - On reset, there's no guarantee MFW would trigger
1508 * - On initialization, older MFWs might not indicate link change
1509 * during LFA, so we'll never get an UP indication.
1511 ecore_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
1513 return ECORE_SUCCESS;
1516 u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
1517 struct ecore_ptt *p_ptt)
1519 u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
1521 /* TODO - Add support for VFs */
1522 if (IS_VF(p_hwfn->p_dev))
1525 path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1527 path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr);
1528 path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn));
1530 proc_kill_cnt = ecore_rd(p_hwfn, p_ptt,
1532 OFFSETOF(struct public_path, process_kill)) &
1533 PROCESS_KILL_COUNTER_MASK;
1535 return proc_kill_cnt;
1538 static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn,
1539 struct ecore_ptt *p_ptt)
1541 struct ecore_dev *p_dev = p_hwfn->p_dev;
1544 /* Prevent possible attentions/interrupts during the recovery handling
1545 * and till its load phase, during which they will be re-enabled.
1547 ecore_int_igu_disable_int(p_hwfn, p_ptt);
1549 DP_NOTICE(p_hwfn, false, "Received a process kill indication\n");
1551 /* The following operations should be done once, and thus in CMT mode
1552 * are carried out by only the first HW function.
1554 if (p_hwfn != ECORE_LEADING_HWFN(p_dev))
1557 if (p_dev->recov_in_prog) {
1558 DP_NOTICE(p_hwfn, false,
1559 "Ignoring the indication since a recovery"
1560 " process is already in progress\n");
1564 p_dev->recov_in_prog = true;
1566 proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt);
1567 DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt);
1569 OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn);
1572 static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
1573 struct ecore_ptt *p_ptt,
1574 enum MFW_DRV_MSG_TYPE type)
1576 enum ecore_mcp_protocol_type stats_type;
1577 union ecore_mcp_protocol_stats stats;
1578 struct ecore_mcp_mb_params mb_params;
1580 enum _ecore_status_t rc;
1583 case MFW_DRV_MSG_GET_LAN_STATS:
1584 stats_type = ECORE_MCP_LAN_STATS;
1585 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1588 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1589 "Invalid protocol type %d\n", type);
1593 OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats);
1595 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1596 mb_params.cmd = DRV_MSG_CODE_GET_STATS;
1597 mb_params.param = hsi_param;
1598 mb_params.p_data_src = &stats;
1599 mb_params.data_src_size = sizeof(stats);
1600 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1601 if (rc != ECORE_SUCCESS)
1602 DP_ERR(p_hwfn, "Failed to send protocol stats, rc = %d\n", rc);
1605 static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
1606 struct public_func *p_shmem_info)
1608 struct ecore_mcp_function_info *p_info;
1610 p_info = &p_hwfn->mcp_info->func_info;
1612 /* TODO - bandwidth min/max should have valid values of 1-100,
1613 * as well as some indication that the feature is disabled.
1614 * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS
1615 * limit and correct value to min `1' and max `100' if limit isn't in
1618 p_info->bandwidth_min = (p_shmem_info->config &
1619 FUNC_MF_CFG_MIN_BW_MASK) >>
1620 FUNC_MF_CFG_MIN_BW_OFFSET;
1621 if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1623 "bandwidth minimum out of bounds [%02x]. Set to 1\n",
1624 p_info->bandwidth_min);
1625 p_info->bandwidth_min = 1;
1628 p_info->bandwidth_max = (p_shmem_info->config &
1629 FUNC_MF_CFG_MAX_BW_MASK) >>
1630 FUNC_MF_CFG_MAX_BW_OFFSET;
1631 if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1633 "bandwidth maximum out of bounds [%02x]. Set to 100\n",
1634 p_info->bandwidth_max);
1635 p_info->bandwidth_max = 100;
1640 ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1642 struct ecore_mcp_function_info *p_info;
1643 struct public_func shmem_info;
1644 u32 resp = 0, param = 0;
1646 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1648 ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1650 p_info = &p_hwfn->mcp_info->func_info;
1652 ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min);
1654 ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max);
1656 /* Acknowledge the MFW */
1657 ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1661 static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn)
1663 /* A single notification should be sent to upper driver in CMT mode */
1664 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1667 DP_NOTICE(p_hwfn, false,
1668 "Fan failure was detected on the network interface card"
1669 " and it's going to be shut down.\n");
1671 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
1674 struct ecore_mdump_cmd_params {
1683 static enum _ecore_status_t
1684 ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1685 struct ecore_mdump_cmd_params *p_mdump_cmd_params)
1687 struct ecore_mcp_mb_params mb_params;
1688 enum _ecore_status_t rc;
1690 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1691 mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
1692 mb_params.param = p_mdump_cmd_params->cmd;
1693 mb_params.p_data_src = p_mdump_cmd_params->p_data_src;
1694 mb_params.data_src_size = p_mdump_cmd_params->data_src_size;
1695 mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst;
1696 mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size;
1697 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1698 if (rc != ECORE_SUCCESS)
1701 p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
1703 if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
1705 "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n",
1706 p_mdump_cmd_params->cmd);
1708 } else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
1710 "The mdump command is not supported by the MFW\n");
1717 static enum _ecore_status_t ecore_mcp_mdump_ack(struct ecore_hwfn *p_hwfn,
1718 struct ecore_ptt *p_ptt)
1720 struct ecore_mdump_cmd_params mdump_cmd_params;
1722 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1723 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK;
1725 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1728 enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn,
1729 struct ecore_ptt *p_ptt,
1732 struct ecore_mdump_cmd_params mdump_cmd_params;
1734 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1735 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_SET_VALUES;
1736 mdump_cmd_params.p_data_src = &epoch;
1737 mdump_cmd_params.data_src_size = sizeof(epoch);
1739 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1742 enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
1743 struct ecore_ptt *p_ptt)
1745 struct ecore_mdump_cmd_params mdump_cmd_params;
1747 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1748 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_TRIGGER;
1750 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1753 static enum _ecore_status_t
1754 ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1755 struct mdump_config_stc *p_mdump_config)
1757 struct ecore_mdump_cmd_params mdump_cmd_params;
1758 enum _ecore_status_t rc;
1760 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1761 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_CONFIG;
1762 mdump_cmd_params.p_data_dst = p_mdump_config;
1763 mdump_cmd_params.data_dst_size = sizeof(*p_mdump_config);
1765 rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1766 if (rc != ECORE_SUCCESS)
1769 if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1771 "Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n",
1772 mdump_cmd_params.mcp_resp);
1773 rc = ECORE_UNKNOWN_ERROR;
1779 enum _ecore_status_t
1780 ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1781 struct ecore_mdump_info *p_mdump_info)
1783 u32 addr, global_offsize, global_addr;
1784 struct mdump_config_stc mdump_config;
1785 enum _ecore_status_t rc;
1787 OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info));
1789 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1791 global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1792 global_addr = SECTION_ADDR(global_offsize, 0);
1793 p_mdump_info->reason = ecore_rd(p_hwfn, p_ptt,
1795 OFFSETOF(struct public_global,
1798 if (p_mdump_info->reason) {
1799 rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config);
1800 if (rc != ECORE_SUCCESS)
1803 p_mdump_info->version = mdump_config.version;
1804 p_mdump_info->config = mdump_config.config;
1805 p_mdump_info->epoch = mdump_config.epoc;
1806 p_mdump_info->num_of_logs = mdump_config.num_of_logs;
1807 p_mdump_info->valid_logs = mdump_config.valid_logs;
1809 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1810 "MFW mdump info: reason %d, version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n",
1811 p_mdump_info->reason, p_mdump_info->version,
1812 p_mdump_info->config, p_mdump_info->epoch,
1813 p_mdump_info->num_of_logs, p_mdump_info->valid_logs);
1815 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1816 "MFW mdump info: reason %d\n", p_mdump_info->reason);
1819 return ECORE_SUCCESS;
1822 enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
1823 struct ecore_ptt *p_ptt)
1825 struct ecore_mdump_cmd_params mdump_cmd_params;
1827 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1828 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLEAR_LOGS;
1830 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1833 enum _ecore_status_t
1834 ecore_mcp_mdump_get_retain(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1835 struct ecore_mdump_retain_data *p_mdump_retain)
1837 struct ecore_mdump_cmd_params mdump_cmd_params;
1838 struct mdump_retain_data_stc mfw_mdump_retain;
1839 enum _ecore_status_t rc;
1841 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1842 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN;
1843 mdump_cmd_params.p_data_dst = &mfw_mdump_retain;
1844 mdump_cmd_params.data_dst_size = sizeof(mfw_mdump_retain);
1846 rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1847 if (rc != ECORE_SUCCESS)
1850 if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1852 "Failed to get the mdump retained data [mcp_resp 0x%x]\n",
1853 mdump_cmd_params.mcp_resp);
1854 return ECORE_UNKNOWN_ERROR;
1857 p_mdump_retain->valid = mfw_mdump_retain.valid;
1858 p_mdump_retain->epoch = mfw_mdump_retain.epoch;
1859 p_mdump_retain->pf = mfw_mdump_retain.pf;
1860 p_mdump_retain->status = mfw_mdump_retain.status;
1862 return ECORE_SUCCESS;
1865 enum _ecore_status_t ecore_mcp_mdump_clr_retain(struct ecore_hwfn *p_hwfn,
1866 struct ecore_ptt *p_ptt)
1868 struct ecore_mdump_cmd_params mdump_cmd_params;
1870 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1871 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLR_RETAIN;
1873 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1876 static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
1877 struct ecore_ptt *p_ptt)
1879 struct ecore_mdump_retain_data mdump_retain;
1880 enum _ecore_status_t rc;
1882 /* In CMT mode - no need for more than a single acknowledgment to the
1883 * MFW, and no more than a single notification to the upper driver.
1885 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1888 rc = ecore_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain);
1889 if (rc == ECORE_SUCCESS && mdump_retain.valid) {
1890 DP_NOTICE(p_hwfn, false,
1891 "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n",
1892 mdump_retain.epoch, mdump_retain.pf,
1893 mdump_retain.status);
1895 DP_NOTICE(p_hwfn, false,
1896 "The MFW notified that a critical error occurred in the device\n");
1899 if (p_hwfn->p_dev->allow_mdump) {
1900 DP_NOTICE(p_hwfn, false,
1901 "Not acknowledging the notification to allow the MFW crash dump\n");
1905 DP_NOTICE(p_hwfn, false,
1906 "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n");
1907 ecore_mcp_mdump_ack(p_hwfn, p_ptt);
1908 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
1912 ecore_mcp_read_ufp_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1914 struct public_func shmem_info;
1917 if (!OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits))
1920 OSAL_MEMSET(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info));
1921 port_cfg = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1922 OFFSETOF(struct public_port, oem_cfg_port));
1923 val = GET_MFW_FIELD(port_cfg, OEM_CFG_CHANNEL_TYPE);
1924 if (val != OEM_CFG_CHANNEL_TYPE_STAGGED)
1925 DP_NOTICE(p_hwfn, false, "Incorrect UFP Channel type %d\n",
1928 val = GET_MFW_FIELD(port_cfg, OEM_CFG_SCHED_TYPE);
1929 if (val == OEM_CFG_SCHED_TYPE_ETS)
1930 p_hwfn->ufp_info.mode = ECORE_UFP_MODE_ETS;
1931 else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW)
1932 p_hwfn->ufp_info.mode = ECORE_UFP_MODE_VNIC_BW;
1934 DP_NOTICE(p_hwfn, false, "Unknown UFP scheduling mode %d\n",
1937 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1939 val = GET_MFW_FIELD(shmem_info.oem_cfg_func, OEM_CFG_FUNC_TC);
1940 p_hwfn->ufp_info.tc = (u8)val;
1941 val = GET_MFW_FIELD(shmem_info.oem_cfg_func,
1942 OEM_CFG_FUNC_HOST_PRI_CTRL);
1943 if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC)
1944 p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_VNIC;
1945 else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS)
1946 p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_OS;
1948 DP_NOTICE(p_hwfn, false, "Unknown Host priority control %d\n",
1951 DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
1952 "UFP shmem config: mode = %d tc = %d pri_type = %d\n",
1953 p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc,
1954 p_hwfn->ufp_info.pri_type);
1957 static enum _ecore_status_t
1958 ecore_mcp_handle_ufp_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1960 ecore_mcp_read_ufp_config(p_hwfn, p_ptt);
1962 if (p_hwfn->ufp_info.mode == ECORE_UFP_MODE_VNIC_BW) {
1963 p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc;
1964 p_hwfn->hw_info.offload_tc = p_hwfn->ufp_info.tc;
1966 ecore_qm_reconf(p_hwfn, p_ptt);
1968 /* Merge UFP TC with the dcbx TC data */
1969 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1970 ECORE_DCBX_OPERATIONAL_MIB);
1973 /* update storm FW with negotiation results */
1974 ecore_sp_pf_update_ufp(p_hwfn);
1976 return ECORE_SUCCESS;
1979 enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
1980 struct ecore_ptt *p_ptt)
1982 struct ecore_mcp_info *info = p_hwfn->mcp_info;
1983 enum _ecore_status_t rc = ECORE_SUCCESS;
1987 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n");
1989 /* Read Messages from MFW */
1990 ecore_mcp_read_mb(p_hwfn, p_ptt);
1992 /* Compare current messages to old ones */
1993 for (i = 0; i < info->mfw_mb_length; i++) {
1994 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
1999 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
2000 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
2001 i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
2004 case MFW_DRV_MSG_LINK_CHANGE:
2005 ecore_mcp_handle_link_change(p_hwfn, p_ptt, false);
2007 case MFW_DRV_MSG_VF_DISABLED:
2008 ecore_mcp_handle_vf_flr(p_hwfn, p_ptt);
2010 case MFW_DRV_MSG_LLDP_DATA_UPDATED:
2011 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
2012 ECORE_DCBX_REMOTE_LLDP_MIB);
2014 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
2015 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
2016 ECORE_DCBX_REMOTE_MIB);
2018 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
2019 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
2020 ECORE_DCBX_OPERATIONAL_MIB);
2021 /* clear the user-config cache */
2022 OSAL_MEMSET(&p_hwfn->p_dcbx_info->set, 0,
2023 sizeof(struct ecore_dcbx_set));
2025 case MFW_DRV_MSG_LLDP_RECEIVED_TLVS_UPDATED:
2026 ecore_lldp_mib_update_event(p_hwfn, p_ptt);
2028 case MFW_DRV_MSG_OEM_CFG_UPDATE:
2029 ecore_mcp_handle_ufp_event(p_hwfn, p_ptt);
2031 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
2032 ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
2034 case MFW_DRV_MSG_ERROR_RECOVERY:
2035 ecore_mcp_handle_process_kill(p_hwfn, p_ptt);
2037 case MFW_DRV_MSG_GET_LAN_STATS:
2038 case MFW_DRV_MSG_GET_FCOE_STATS:
2039 case MFW_DRV_MSG_GET_ISCSI_STATS:
2040 case MFW_DRV_MSG_GET_RDMA_STATS:
2041 ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
2043 case MFW_DRV_MSG_BW_UPDATE:
2044 ecore_mcp_update_bw(p_hwfn, p_ptt);
2046 case MFW_DRV_MSG_FAILURE_DETECTED:
2047 ecore_mcp_handle_fan_failure(p_hwfn);
2049 case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
2050 ecore_mcp_handle_critical_error(p_hwfn, p_ptt);
2053 DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
2058 /* ACK everything */
2059 for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
2060 OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]);
2062 /* MFW expect answer in BE, so we force write in that format */
2063 ecore_wr(p_hwfn, p_ptt,
2064 info->mfw_mb_addr + sizeof(u32) +
2065 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
2066 sizeof(u32) + i * sizeof(u32), val);
2070 DP_NOTICE(p_hwfn, false,
2071 "Received an MFW message indication but no"
2076 /* Copy the new mfw messages into the shadow */
2077 OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
2082 enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
2083 struct ecore_ptt *p_ptt,
2085 u32 *p_running_bundle_id)
2090 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
2091 DP_NOTICE(p_hwfn, false, "Emulation - can't get MFW version\n");
2092 return ECORE_SUCCESS;
2096 if (IS_VF(p_hwfn->p_dev)) {
2097 if (p_hwfn->vf_iov_info) {
2098 struct pfvf_acquire_resp_tlv *p_resp;
2100 p_resp = &p_hwfn->vf_iov_info->acquire_resp;
2101 *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
2102 return ECORE_SUCCESS;
2104 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2105 "VF requested MFW version prior to ACQUIRE\n");
2110 global_offsize = ecore_rd(p_hwfn, p_ptt,
2111 SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
2115 ecore_rd(p_hwfn, p_ptt,
2116 SECTION_ADDR(global_offsize,
2117 0) + OFFSETOF(struct public_global, mfw_ver));
2119 if (p_running_bundle_id != OSAL_NULL) {
2120 *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt,
2121 SECTION_ADDR(global_offsize,
2123 OFFSETOF(struct public_global,
2124 running_bundle_id));
2127 return ECORE_SUCCESS;
2130 enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,
2131 struct ecore_ptt *p_ptt,
2134 enum _ecore_status_t rc = ECORE_SUCCESS;
2136 /* TODO - Add support for VFs */
2137 if (IS_VF(p_hwfn->p_dev))
2140 if (!ecore_mcp_is_init(p_hwfn)) {
2141 DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
2146 *p_media_type = MEDIA_UNSPECIFIED;
2149 *p_media_type = ecore_rd(p_hwfn, p_ptt,
2150 p_hwfn->mcp_info->port_addr +
2151 OFFSETOF(struct public_port,
2155 return ECORE_SUCCESS;
2158 enum _ecore_status_t ecore_mcp_get_transceiver_data(struct ecore_hwfn *p_hwfn,
2159 struct ecore_ptt *p_ptt,
2160 u32 *p_tranceiver_type)
2162 enum _ecore_status_t rc = ECORE_SUCCESS;
2164 /* TODO - Add support for VFs */
2165 if (IS_VF(p_hwfn->p_dev))
2168 if (!ecore_mcp_is_init(p_hwfn)) {
2169 DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
2173 *p_tranceiver_type = ETH_TRANSCEIVER_TYPE_NONE;
2176 *p_tranceiver_type = ecore_rd(p_hwfn, p_ptt,
2177 p_hwfn->mcp_info->port_addr +
2178 offsetof(struct public_port,
2185 static int is_transceiver_ready(u32 transceiver_state, u32 transceiver_type)
2187 if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) &&
2188 ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) == 0x0) &&
2189 (transceiver_type != ETH_TRANSCEIVER_TYPE_NONE))
2195 enum _ecore_status_t ecore_mcp_trans_speed_mask(struct ecore_hwfn *p_hwfn,
2196 struct ecore_ptt *p_ptt,
2199 u32 transceiver_data, transceiver_type, transceiver_state;
2201 ecore_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_data);
2203 transceiver_state = GET_MFW_FIELD(transceiver_data,
2204 ETH_TRANSCEIVER_STATE);
2206 transceiver_type = GET_MFW_FIELD(transceiver_data,
2207 ETH_TRANSCEIVER_TYPE);
2209 if (is_transceiver_ready(transceiver_state, transceiver_type) == 0)
2212 switch (transceiver_type) {
2213 case ETH_TRANSCEIVER_TYPE_1G_LX:
2214 case ETH_TRANSCEIVER_TYPE_1G_SX:
2215 case ETH_TRANSCEIVER_TYPE_1G_PCC:
2216 case ETH_TRANSCEIVER_TYPE_1G_ACC:
2217 case ETH_TRANSCEIVER_TYPE_1000BASET:
2218 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2221 case ETH_TRANSCEIVER_TYPE_10G_SR:
2222 case ETH_TRANSCEIVER_TYPE_10G_LR:
2223 case ETH_TRANSCEIVER_TYPE_10G_LRM:
2224 case ETH_TRANSCEIVER_TYPE_10G_ER:
2225 case ETH_TRANSCEIVER_TYPE_10G_PCC:
2226 case ETH_TRANSCEIVER_TYPE_10G_ACC:
2227 case ETH_TRANSCEIVER_TYPE_4x10G:
2228 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2231 case ETH_TRANSCEIVER_TYPE_40G_LR4:
2232 case ETH_TRANSCEIVER_TYPE_40G_SR4:
2233 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
2234 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
2235 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2236 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2239 case ETH_TRANSCEIVER_TYPE_100G_AOC:
2240 case ETH_TRANSCEIVER_TYPE_100G_SR4:
2241 case ETH_TRANSCEIVER_TYPE_100G_LR4:
2242 case ETH_TRANSCEIVER_TYPE_100G_ER4:
2243 case ETH_TRANSCEIVER_TYPE_100G_ACC:
2245 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2246 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2249 case ETH_TRANSCEIVER_TYPE_25G_SR:
2250 case ETH_TRANSCEIVER_TYPE_25G_LR:
2251 case ETH_TRANSCEIVER_TYPE_25G_AOC:
2252 case ETH_TRANSCEIVER_TYPE_25G_ACC_S:
2253 case ETH_TRANSCEIVER_TYPE_25G_ACC_M:
2254 case ETH_TRANSCEIVER_TYPE_25G_ACC_L:
2255 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2258 case ETH_TRANSCEIVER_TYPE_25G_CA_N:
2259 case ETH_TRANSCEIVER_TYPE_25G_CA_S:
2260 case ETH_TRANSCEIVER_TYPE_25G_CA_L:
2261 case ETH_TRANSCEIVER_TYPE_4x25G_CR:
2262 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2263 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2264 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2267 case ETH_TRANSCEIVER_TYPE_40G_CR4:
2268 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR:
2269 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2270 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2271 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2274 case ETH_TRANSCEIVER_TYPE_100G_CR4:
2275 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
2277 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2278 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G |
2279 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2280 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2281 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G |
2282 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2283 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2286 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
2287 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
2288 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC:
2290 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2291 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2292 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2293 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2296 case ETH_TRANSCEIVER_TYPE_XLPPI:
2297 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
2300 case ETH_TRANSCEIVER_TYPE_10G_BASET:
2301 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2302 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2306 DP_INFO(p_hwfn, "Unknown transcevier type 0x%x\n",
2308 *p_speed_mask = 0xff;
2312 return ECORE_SUCCESS;
2315 enum _ecore_status_t ecore_mcp_get_board_config(struct ecore_hwfn *p_hwfn,
2316 struct ecore_ptt *p_ptt,
2317 u32 *p_board_config)
2319 u32 nvm_cfg_addr, nvm_cfg1_offset, port_cfg_addr;
2320 enum _ecore_status_t rc = ECORE_SUCCESS;
2322 /* TODO - Add support for VFs */
2323 if (IS_VF(p_hwfn->p_dev))
2326 if (!ecore_mcp_is_init(p_hwfn)) {
2327 DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
2331 *p_board_config = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
2334 nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt,
2335 MISC_REG_GEN_PURP_CR0);
2336 nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt,
2338 port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
2339 offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
2340 *p_board_config = ecore_rd(p_hwfn, p_ptt,
2342 offsetof(struct nvm_cfg1_port,
2350 /* Old MFW has a global configuration for all PFs regarding RDMA support */
2352 ecore_mcp_get_shmem_proto_legacy(struct ecore_hwfn *p_hwfn,
2353 enum ecore_pci_personality *p_proto)
2355 *p_proto = ECORE_PCI_ETH;
2357 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2358 "According to Legacy capabilities, L2 personality is %08x\n",
2363 static enum _ecore_status_t
2364 ecore_mcp_get_shmem_proto_mfw(struct ecore_hwfn *p_hwfn,
2365 struct ecore_ptt *p_ptt,
2366 enum ecore_pci_personality *p_proto)
2368 u32 resp = 0, param = 0;
2369 enum _ecore_status_t rc;
2371 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2372 "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
2373 (u32)*p_proto, resp, param);
2374 return ECORE_SUCCESS;
2377 static enum _ecore_status_t
2378 ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn,
2379 struct public_func *p_info,
2380 struct ecore_ptt *p_ptt,
2381 enum ecore_pci_personality *p_proto)
2383 enum _ecore_status_t rc = ECORE_SUCCESS;
2385 switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
2386 case FUNC_MF_CFG_PROTOCOL_ETHERNET:
2387 if (ecore_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto) !=
2389 ecore_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
2398 enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
2399 struct ecore_ptt *p_ptt)
2401 struct ecore_mcp_function_info *info;
2402 struct public_func shmem_info;
2404 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
2405 info = &p_hwfn->mcp_info->func_info;
2407 info->pause_on_host = (shmem_info.config &
2408 FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
2410 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2412 DP_ERR(p_hwfn, "Unknown personality %08x\n",
2413 (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
2417 ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
2419 if (shmem_info.mac_upper || shmem_info.mac_lower) {
2420 info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
2421 info->mac[1] = (u8)(shmem_info.mac_upper);
2422 info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
2423 info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
2424 info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
2425 info->mac[5] = (u8)(shmem_info.mac_lower);
2427 /* TODO - are there protocols for which there's no MAC? */
2428 DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n");
2431 /* TODO - are these calculations true for BE machine? */
2432 info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
2433 (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
2434 info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
2435 (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
2437 info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
2439 info->mtu = (u16)shmem_info.mtu_size;
2444 info->mtu = (u16)shmem_info.mtu_size;
2446 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP),
2447 "Read configuration from shmem: pause_on_host %02x"
2448 " protocol %02x BW [%02x - %02x]"
2449 " MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %lx"
2450 " node %lx ovlan %04x\n",
2451 info->pause_on_host, info->protocol,
2452 info->bandwidth_min, info->bandwidth_max,
2453 info->mac[0], info->mac[1], info->mac[2],
2454 info->mac[3], info->mac[4], info->mac[5],
2455 (unsigned long)info->wwn_port,
2456 (unsigned long)info->wwn_node, info->ovlan);
2458 return ECORE_SUCCESS;
2461 struct ecore_mcp_link_params
2462 *ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn)
2464 if (!p_hwfn || !p_hwfn->mcp_info)
2466 return &p_hwfn->mcp_info->link_input;
2469 struct ecore_mcp_link_state
2470 *ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn)
2472 if (!p_hwfn || !p_hwfn->mcp_info)
2476 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
2477 DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n");
2478 p_hwfn->mcp_info->link_output.link_up = true;
2482 return &p_hwfn->mcp_info->link_output;
2485 struct ecore_mcp_link_capabilities
2486 *ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn)
2488 if (!p_hwfn || !p_hwfn->mcp_info)
2490 return &p_hwfn->mcp_info->link_capabilities;
2493 enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
2494 struct ecore_ptt *p_ptt)
2496 u32 resp = 0, param = 0;
2497 enum _ecore_status_t rc;
2499 rc = ecore_mcp_cmd(p_hwfn, p_ptt,
2500 DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, ¶m);
2502 /* Wait for the drain to complete before returning */
2508 const struct ecore_mcp_function_info
2509 *ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn)
2511 if (!p_hwfn || !p_hwfn->mcp_info)
2513 return &p_hwfn->mcp_info->func_info;
2516 int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
2517 struct ecore_ptt *p_ptt, u32 personalities)
2519 enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT;
2520 struct public_func shmem_info;
2521 int i, count = 0, num_pfs;
2523 num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
2525 for (i = 0; i < num_pfs; i++) {
2526 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
2527 MCP_PF_ID_BY_REL(p_hwfn, i));
2528 if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
2531 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2536 if ((1 << ((u32)protocol)) & personalities)
2543 enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
2544 struct ecore_ptt *p_ptt,
2550 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
2551 DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n");
2556 if (IS_VF(p_hwfn->p_dev))
2559 flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
2560 flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
2561 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
2562 flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_OFFSET));
2564 *p_flash_size = flash_size;
2566 return ECORE_SUCCESS;
2569 enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
2570 struct ecore_ptt *p_ptt)
2572 struct ecore_dev *p_dev = p_hwfn->p_dev;
2574 if (p_dev->recov_in_prog) {
2575 DP_NOTICE(p_hwfn, false,
2576 "Avoid triggering a recovery since such a process"
2577 " is already in progress\n");
2581 DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n");
2582 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
2584 return ECORE_SUCCESS;
2587 static enum _ecore_status_t
2588 ecore_mcp_config_vf_msix_bb(struct ecore_hwfn *p_hwfn,
2589 struct ecore_ptt *p_ptt,
2592 u32 resp = 0, param = 0, rc_param = 0;
2593 enum _ecore_status_t rc;
2595 /* Only Leader can configure MSIX, and need to take CMT into account */
2597 if (!IS_LEAD_HWFN(p_hwfn))
2598 return ECORE_SUCCESS;
2599 num *= p_hwfn->p_dev->num_hwfns;
2601 param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_OFFSET) &
2602 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
2603 param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_OFFSET) &
2604 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
2606 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
2609 if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
2610 DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n",
2614 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2615 "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
2622 static enum _ecore_status_t
2623 ecore_mcp_config_vf_msix_ah(struct ecore_hwfn *p_hwfn,
2624 struct ecore_ptt *p_ptt,
2627 u32 resp = 0, param = num, rc_param = 0;
2628 enum _ecore_status_t rc;
2630 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX,
2631 param, &resp, &rc_param);
2633 if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) {
2634 DP_NOTICE(p_hwfn, true, "MFW failed to set MSI-X for VFs\n");
2637 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2638 "Requested 0x%02x MSI-x interrupts for VFs\n",
2645 enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
2646 struct ecore_ptt *p_ptt,
2649 if (ECORE_IS_BB(p_hwfn->p_dev))
2650 return ecore_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
2652 return ecore_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num);
2655 enum _ecore_status_t
2656 ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2657 struct ecore_mcp_drv_version *p_ver)
2659 struct ecore_mcp_mb_params mb_params;
2660 struct drv_version_stc drv_version;
2664 enum _ecore_status_t rc;
2667 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
2668 return ECORE_SUCCESS;
2671 OSAL_MEM_ZERO(&drv_version, sizeof(drv_version));
2672 drv_version.version = p_ver->version;
2673 num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4;
2674 for (i = 0; i < num_words; i++) {
2675 /* The driver name is expected to be in a big-endian format */
2676 p_name = &p_ver->name[i * sizeof(u32)];
2677 val = OSAL_CPU_TO_BE32(*(u32 *)p_name);
2678 *(u32 *)&drv_version.name[i * sizeof(u32)] = val;
2681 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2682 mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
2683 mb_params.p_data_src = &drv_version;
2684 mb_params.data_src_size = sizeof(drv_version);
2685 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2686 if (rc != ECORE_SUCCESS)
2687 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2692 /* A maximal 100 msec waiting time for the MCP to halt */
2693 #define ECORE_MCP_HALT_SLEEP_MS 10
2694 #define ECORE_MCP_HALT_MAX_RETRIES 10
2696 enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
2697 struct ecore_ptt *p_ptt)
2699 u32 resp = 0, param = 0, cpu_state, cnt = 0;
2700 enum _ecore_status_t rc;
2702 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2704 if (rc != ECORE_SUCCESS) {
2705 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2710 OSAL_MSLEEP(ECORE_MCP_HALT_SLEEP_MS);
2711 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2712 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
2714 } while (++cnt < ECORE_MCP_HALT_MAX_RETRIES);
2716 if (cnt == ECORE_MCP_HALT_MAX_RETRIES) {
2717 DP_NOTICE(p_hwfn, false,
2718 "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2719 ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
2723 ecore_mcp_cmd_set_blocking(p_hwfn, true);
2725 return ECORE_SUCCESS;
2728 #define ECORE_MCP_RESUME_SLEEP_MS 10
2730 enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
2731 struct ecore_ptt *p_ptt)
2733 u32 cpu_mode, cpu_state;
2735 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
2737 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2738 cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2739 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
2741 OSAL_MSLEEP(ECORE_MCP_RESUME_SLEEP_MS);
2742 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2744 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
2745 DP_NOTICE(p_hwfn, false,
2746 "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2747 cpu_mode, cpu_state);
2751 ecore_mcp_cmd_set_blocking(p_hwfn, false);
2753 return ECORE_SUCCESS;
2756 enum _ecore_status_t
2757 ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
2758 struct ecore_ptt *p_ptt,
2759 enum ecore_ov_client client)
2761 u32 resp = 0, param = 0;
2763 enum _ecore_status_t rc;
2766 case ECORE_OV_CLIENT_DRV:
2767 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
2769 case ECORE_OV_CLIENT_USER:
2770 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
2772 case ECORE_OV_CLIENT_VENDOR_SPEC:
2773 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
2776 DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", client);
2780 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
2781 drv_mb_param, &resp, ¶m);
2782 if (rc != ECORE_SUCCESS)
2783 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2788 enum _ecore_status_t
2789 ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
2790 struct ecore_ptt *p_ptt,
2791 enum ecore_ov_driver_state drv_state)
2793 u32 resp = 0, param = 0;
2795 enum _ecore_status_t rc;
2797 switch (drv_state) {
2798 case ECORE_OV_DRIVER_STATE_NOT_LOADED:
2799 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
2801 case ECORE_OV_DRIVER_STATE_DISABLED:
2802 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
2804 case ECORE_OV_DRIVER_STATE_ACTIVE:
2805 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
2808 DP_NOTICE(p_hwfn, true, "Invalid driver state %d\n", drv_state);
2812 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
2813 drv_mb_param, &resp, ¶m);
2814 if (rc != ECORE_SUCCESS)
2815 DP_ERR(p_hwfn, "Failed to send driver state\n");
2820 enum _ecore_status_t
2821 ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2822 struct ecore_fc_npiv_tbl *p_table)
2827 enum _ecore_status_t
2828 ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn,
2829 struct ecore_ptt *p_ptt, u16 mtu)
2834 enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
2835 struct ecore_ptt *p_ptt,
2836 enum ecore_led_mode mode)
2838 u32 resp = 0, param = 0, drv_mb_param;
2839 enum _ecore_status_t rc;
2842 case ECORE_LED_MODE_ON:
2843 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
2845 case ECORE_LED_MODE_OFF:
2846 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
2848 case ECORE_LED_MODE_RESTORE:
2849 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
2852 DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode);
2856 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
2857 drv_mb_param, &resp, ¶m);
2858 if (rc != ECORE_SUCCESS)
2859 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2864 enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
2865 struct ecore_ptt *p_ptt,
2868 u32 resp = 0, param = 0;
2869 enum _ecore_status_t rc;
2871 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
2872 mask_parities, &resp, ¶m);
2874 if (rc != ECORE_SUCCESS) {
2876 "MCP response failure for mask parities, aborting\n");
2877 } else if (resp != FW_MSG_CODE_OK) {
2879 "MCP did not ack mask parity request. Old MFW?\n");
2886 enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
2889 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2890 u32 bytes_left, offset, bytes_to_copy, buf_size;
2891 u32 nvm_offset, resp, param;
2892 struct ecore_ptt *p_ptt;
2893 enum _ecore_status_t rc = ECORE_SUCCESS;
2895 p_ptt = ecore_ptt_acquire(p_hwfn);
2901 while (bytes_left > 0) {
2902 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
2903 MCP_DRV_NVM_BUF_LEN);
2904 nvm_offset = (addr + offset) | (bytes_to_copy <<
2905 DRV_MB_PARAM_NVM_LEN_OFFSET);
2906 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
2907 DRV_MSG_CODE_NVM_READ_NVRAM,
2908 nvm_offset, &resp, ¶m, &buf_size,
2909 (u32 *)(p_buf + offset));
2910 if (rc != ECORE_SUCCESS) {
2911 DP_NOTICE(p_dev, false,
2912 "ecore_mcp_nvm_rd_cmd() failed, rc = %d\n",
2914 resp = FW_MSG_CODE_ERROR;
2918 if (resp != FW_MSG_CODE_NVM_OK) {
2919 DP_NOTICE(p_dev, false,
2920 "nvm read failed, resp = 0x%08x\n", resp);
2921 rc = ECORE_UNKNOWN_ERROR;
2925 /* This can be a lengthy process, and it's possible scheduler
2926 * isn't preemptible. Sleep a bit to prevent CPU hogging.
2928 if (bytes_left % 0x1000 <
2929 (bytes_left - buf_size) % 0x1000)
2933 bytes_left -= buf_size;
2936 p_dev->mcp_nvm_resp = resp;
2937 ecore_ptt_release(p_hwfn, p_ptt);
2942 enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
2943 u32 addr, u8 *p_buf, u32 len)
2945 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2946 struct ecore_ptt *p_ptt;
2948 enum _ecore_status_t rc;
2950 p_ptt = ecore_ptt_acquire(p_hwfn);
2954 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
2955 (cmd == ECORE_PHY_CORE_READ) ?
2956 DRV_MSG_CODE_PHY_CORE_READ :
2957 DRV_MSG_CODE_PHY_RAW_READ,
2958 addr, &resp, ¶m, &len, (u32 *)p_buf);
2959 if (rc != ECORE_SUCCESS)
2960 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2962 p_dev->mcp_nvm_resp = resp;
2963 ecore_ptt_release(p_hwfn, p_ptt);
2968 enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
2970 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2971 struct ecore_ptt *p_ptt;
2973 p_ptt = ecore_ptt_acquire(p_hwfn);
2977 OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp));
2978 ecore_ptt_release(p_hwfn, p_ptt);
2980 return ECORE_SUCCESS;
2983 enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr)
2985 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2986 struct ecore_ptt *p_ptt;
2988 enum _ecore_status_t rc;
2990 p_ptt = ecore_ptt_acquire(p_hwfn);
2993 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_DEL_FILE, addr,
2995 p_dev->mcp_nvm_resp = resp;
2996 ecore_ptt_release(p_hwfn, p_ptt);
3001 enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
3004 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3005 struct ecore_ptt *p_ptt;
3007 enum _ecore_status_t rc;
3009 p_ptt = ecore_ptt_acquire(p_hwfn);
3012 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, addr,
3014 p_dev->mcp_nvm_resp = resp;
3015 ecore_ptt_release(p_hwfn, p_ptt);
3020 /* rc receives ECORE_INVAL as default parameter because
3021 * it might not enter the while loop if the len is 0
3023 enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
3024 u32 addr, u8 *p_buf, u32 len)
3026 u32 buf_idx, buf_size, nvm_cmd, nvm_offset, resp, param;
3027 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3028 enum _ecore_status_t rc = ECORE_INVAL;
3029 struct ecore_ptt *p_ptt;
3031 p_ptt = ecore_ptt_acquire(p_hwfn);
3036 case ECORE_PUT_FILE_DATA:
3037 nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
3039 case ECORE_NVM_WRITE_NVRAM:
3040 nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
3042 case ECORE_EXT_PHY_FW_UPGRADE:
3043 nvm_cmd = DRV_MSG_CODE_EXT_PHY_FW_UPGRADE;
3046 DP_NOTICE(p_hwfn, true, "Invalid nvm write command 0x%x\n",
3053 while (buf_idx < len) {
3054 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
3055 MCP_DRV_NVM_BUF_LEN);
3056 nvm_offset = ((buf_size << DRV_MB_PARAM_NVM_LEN_OFFSET) |
3059 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset,
3060 &resp, ¶m, buf_size,
3061 (u32 *)&p_buf[buf_idx]);
3062 if (rc != ECORE_SUCCESS) {
3063 DP_NOTICE(p_dev, false,
3064 "ecore_mcp_nvm_write() failed, rc = %d\n",
3066 resp = FW_MSG_CODE_ERROR;
3070 if (resp != FW_MSG_CODE_OK &&
3071 resp != FW_MSG_CODE_NVM_OK &&
3072 resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK) {
3073 DP_NOTICE(p_dev, false,
3074 "nvm write failed, resp = 0x%08x\n", resp);
3075 rc = ECORE_UNKNOWN_ERROR;
3079 /* This can be a lengthy process, and it's possible scheduler
3080 * isn't preemptible. Sleep a bit to prevent CPU hogging.
3082 if (buf_idx % 0x1000 >
3083 (buf_idx + buf_size) % 0x1000)
3086 buf_idx += buf_size;
3089 p_dev->mcp_nvm_resp = resp;
3091 ecore_ptt_release(p_hwfn, p_ptt);
3096 enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
3097 u32 addr, u8 *p_buf, u32 len)
3099 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3100 struct ecore_ptt *p_ptt;
3101 u32 resp, param, nvm_cmd;
3102 enum _ecore_status_t rc;
3104 p_ptt = ecore_ptt_acquire(p_hwfn);
3108 nvm_cmd = (cmd == ECORE_PHY_CORE_WRITE) ? DRV_MSG_CODE_PHY_CORE_WRITE :
3109 DRV_MSG_CODE_PHY_RAW_WRITE;
3110 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, addr,
3111 &resp, ¶m, len, (u32 *)p_buf);
3112 if (rc != ECORE_SUCCESS)
3113 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
3114 p_dev->mcp_nvm_resp = resp;
3115 ecore_ptt_release(p_hwfn, p_ptt);
3120 enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
3123 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3124 struct ecore_ptt *p_ptt;
3126 enum _ecore_status_t rc;
3128 p_ptt = ecore_ptt_acquire(p_hwfn);
3132 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_SECURE_MODE, addr,
3134 p_dev->mcp_nvm_resp = resp;
3135 ecore_ptt_release(p_hwfn, p_ptt);
3140 enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
3141 struct ecore_ptt *p_ptt,
3142 u32 port, u32 addr, u32 offset,
3145 u32 bytes_left, bytes_to_copy, buf_size, nvm_offset;
3147 enum _ecore_status_t rc;
3149 nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) |
3150 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET);
3154 while (bytes_left > 0) {
3155 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
3156 MAX_I2C_TRANSACTION_SIZE);
3157 nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
3158 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
3159 nvm_offset |= ((addr + offset) <<
3160 DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET);
3161 nvm_offset |= (bytes_to_copy <<
3162 DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET);
3163 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3164 DRV_MSG_CODE_TRANSCEIVER_READ,
3165 nvm_offset, &resp, ¶m, &buf_size,
3166 (u32 *)(p_buf + offset));
3167 if (rc != ECORE_SUCCESS) {
3168 DP_NOTICE(p_hwfn, false,
3169 "Failed to send a transceiver read command to the MFW. rc = %d.\n",
3174 if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
3176 else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
3177 return ECORE_UNKNOWN_ERROR;
3180 bytes_left -= buf_size;
3183 return ECORE_SUCCESS;
3186 enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
3187 struct ecore_ptt *p_ptt,
3188 u32 port, u32 addr, u32 offset,
3191 u32 buf_idx, buf_size, nvm_offset, resp, param;
3192 enum _ecore_status_t rc;
3194 nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) |
3195 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET);
3197 while (buf_idx < len) {
3198 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
3199 MAX_I2C_TRANSACTION_SIZE);
3200 nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
3201 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
3202 nvm_offset |= ((offset + buf_idx) <<
3203 DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET);
3204 nvm_offset |= (buf_size <<
3205 DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET);
3206 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt,
3207 DRV_MSG_CODE_TRANSCEIVER_WRITE,
3208 nvm_offset, &resp, ¶m, buf_size,
3209 (u32 *)&p_buf[buf_idx]);
3210 if (rc != ECORE_SUCCESS) {
3211 DP_NOTICE(p_hwfn, false,
3212 "Failed to send a transceiver write command to the MFW. rc = %d.\n",
3217 if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
3219 else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
3220 return ECORE_UNKNOWN_ERROR;
3222 buf_idx += buf_size;
3225 return ECORE_SUCCESS;
3228 enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
3229 struct ecore_ptt *p_ptt,
3230 u16 gpio, u32 *gpio_val)
3232 enum _ecore_status_t rc = ECORE_SUCCESS;
3233 u32 drv_mb_param = 0, rsp;
3235 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET);
3237 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ,
3238 drv_mb_param, &rsp, gpio_val);
3240 if (rc != ECORE_SUCCESS)
3243 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3244 return ECORE_UNKNOWN_ERROR;
3246 return ECORE_SUCCESS;
3249 enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
3250 struct ecore_ptt *p_ptt,
3251 u16 gpio, u16 gpio_val)
3253 enum _ecore_status_t rc = ECORE_SUCCESS;
3254 u32 drv_mb_param = 0, param, rsp;
3256 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET) |
3257 (gpio_val << DRV_MB_PARAM_GPIO_VALUE_OFFSET);
3259 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE,
3260 drv_mb_param, &rsp, ¶m);
3262 if (rc != ECORE_SUCCESS)
3265 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3266 return ECORE_UNKNOWN_ERROR;
3268 return ECORE_SUCCESS;
3271 enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn,
3272 struct ecore_ptt *p_ptt,
3273 u16 gpio, u32 *gpio_direction,
3276 u32 drv_mb_param = 0, rsp, val = 0;
3277 enum _ecore_status_t rc = ECORE_SUCCESS;
3279 drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET;
3281 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO,
3282 drv_mb_param, &rsp, &val);
3283 if (rc != ECORE_SUCCESS)
3286 *gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >>
3287 DRV_MB_PARAM_GPIO_DIRECTION_OFFSET;
3288 *gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >>
3289 DRV_MB_PARAM_GPIO_CTRL_OFFSET;
3291 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3292 return ECORE_UNKNOWN_ERROR;
3294 return ECORE_SUCCESS;
3297 enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn,
3298 struct ecore_ptt *p_ptt)
3300 u32 drv_mb_param = 0, rsp, param;
3301 enum _ecore_status_t rc = ECORE_SUCCESS;
3303 drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
3304 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3306 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3307 drv_mb_param, &rsp, ¶m);
3309 if (rc != ECORE_SUCCESS)
3312 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3313 (param != DRV_MB_PARAM_BIST_RC_PASSED))
3314 rc = ECORE_UNKNOWN_ERROR;
3319 enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn,
3320 struct ecore_ptt *p_ptt)
3322 u32 drv_mb_param, rsp, param;
3323 enum _ecore_status_t rc = ECORE_SUCCESS;
3325 drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
3326 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3328 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3329 drv_mb_param, &rsp, ¶m);
3331 if (rc != ECORE_SUCCESS)
3334 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3335 (param != DRV_MB_PARAM_BIST_RC_PASSED))
3336 rc = ECORE_UNKNOWN_ERROR;
3341 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images(
3342 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *num_images)
3344 u32 drv_mb_param = 0, rsp;
3345 enum _ecore_status_t rc = ECORE_SUCCESS;
3347 drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
3348 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3350 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3351 drv_mb_param, &rsp, num_images);
3353 if (rc != ECORE_SUCCESS)
3356 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
3357 rc = ECORE_UNKNOWN_ERROR;
3362 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att(
3363 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3364 struct bist_nvm_image_att *p_image_att, u32 image_index)
3366 u32 buf_size, nvm_offset, resp, param;
3367 enum _ecore_status_t rc;
3369 nvm_offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
3370 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3371 nvm_offset |= (image_index <<
3372 DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_OFFSET);
3373 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3374 nvm_offset, &resp, ¶m, &buf_size,
3375 (u32 *)p_image_att);
3376 if (rc != ECORE_SUCCESS)
3379 if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3380 (p_image_att->return_code != 1))
3381 rc = ECORE_UNKNOWN_ERROR;
3386 enum _ecore_status_t
3387 ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn,
3388 struct ecore_ptt *p_ptt,
3389 struct ecore_temperature_info *p_temp_info)
3391 struct ecore_temperature_sensor *p_temp_sensor;
3392 struct temperature_status_stc mfw_temp_info;
3393 struct ecore_mcp_mb_params mb_params;
3395 enum _ecore_status_t rc;
3398 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3399 mb_params.cmd = DRV_MSG_CODE_GET_TEMPERATURE;
3400 mb_params.p_data_dst = &mfw_temp_info;
3401 mb_params.data_dst_size = sizeof(mfw_temp_info);
3402 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3403 if (rc != ECORE_SUCCESS)
3406 OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS != MAX_NUM_OF_SENSORS);
3407 p_temp_info->num_sensors = OSAL_MIN_T(u32, mfw_temp_info.num_of_sensors,
3408 ECORE_MAX_NUM_OF_SENSORS);
3409 for (i = 0; i < p_temp_info->num_sensors; i++) {
3410 val = mfw_temp_info.sensor[i];
3411 p_temp_sensor = &p_temp_info->sensors[i];
3412 p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >>
3413 SENSOR_LOCATION_OFFSET;
3414 p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >>
3415 THRESHOLD_HIGH_OFFSET;
3416 p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >>
3417 CRITICAL_TEMPERATURE_OFFSET;
3418 p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >>
3419 CURRENT_TEMP_OFFSET;
3422 return ECORE_SUCCESS;
3425 enum _ecore_status_t ecore_mcp_get_mba_versions(
3426 struct ecore_hwfn *p_hwfn,
3427 struct ecore_ptt *p_ptt,
3428 struct ecore_mba_vers *p_mba_vers)
3430 u32 buf_size, resp, param;
3431 enum _ecore_status_t rc;
3433 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MBA_VERSION,
3434 0, &resp, ¶m, &buf_size,
3435 &p_mba_vers->mba_vers[0]);
3437 if (rc != ECORE_SUCCESS)
3440 if ((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
3441 rc = ECORE_UNKNOWN_ERROR;
3443 if (buf_size != MCP_DRV_NVM_BUF_LEN)
3444 rc = ECORE_UNKNOWN_ERROR;
3449 enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
3450 struct ecore_ptt *p_ptt,
3455 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MEM_ECC_EVENTS,
3456 0, &rsp, (u32 *)num_events);
3459 static enum resource_id_enum
3460 ecore_mcp_get_mfw_res_id(enum ecore_resources res_id)
3462 enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
3466 mfw_res_id = RESOURCE_NUM_SB_E;
3468 case ECORE_L2_QUEUE:
3469 mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
3472 mfw_res_id = RESOURCE_NUM_VPORT_E;
3475 mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
3478 mfw_res_id = RESOURCE_NUM_PQ_E;
3481 mfw_res_id = RESOURCE_NUM_RL_E;
3485 /* Each VFC resource can accommodate both a MAC and a VLAN */
3486 mfw_res_id = RESOURCE_VFC_FILTER_E;
3489 mfw_res_id = RESOURCE_ILT_E;
3491 case ECORE_LL2_QUEUE:
3492 mfw_res_id = RESOURCE_LL2_QUEUE_E;
3494 case ECORE_RDMA_CNQ_RAM:
3495 case ECORE_CMDQS_CQS:
3496 /* CNQ/CMDQS are the same resource */
3497 mfw_res_id = RESOURCE_CQS_E;
3499 case ECORE_RDMA_STATS_QUEUE:
3500 mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
3503 mfw_res_id = RESOURCE_BDQ_E;
3512 #define ECORE_RESC_ALLOC_VERSION_MAJOR 2
3513 #define ECORE_RESC_ALLOC_VERSION_MINOR 0
3514 #define ECORE_RESC_ALLOC_VERSION \
3515 ((ECORE_RESC_ALLOC_VERSION_MAJOR << \
3516 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_OFFSET) | \
3517 (ECORE_RESC_ALLOC_VERSION_MINOR << \
3518 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_OFFSET))
3520 struct ecore_resc_alloc_in_params {
3522 enum ecore_resources res_id;
3526 struct ecore_resc_alloc_out_params {
3536 #define ECORE_RECOVERY_PROLOG_SLEEP_MS 100
3538 enum _ecore_status_t ecore_recovery_prolog(struct ecore_dev *p_dev)
3540 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3541 struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
3542 enum _ecore_status_t rc;
3544 /* Allow ongoing PCIe transactions to complete */
3545 OSAL_MSLEEP(ECORE_RECOVERY_PROLOG_SLEEP_MS);
3547 /* Clear the PF's internal FID_enable in the PXP */
3548 rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_ptt, false);
3549 if (rc != ECORE_SUCCESS)
3550 DP_NOTICE(p_hwfn, false,
3551 "ecore_pglueb_set_pfid_enable() failed. rc = %d.\n",
3557 static enum _ecore_status_t
3558 ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn,
3559 struct ecore_ptt *p_ptt,
3560 struct ecore_resc_alloc_in_params *p_in_params,
3561 struct ecore_resc_alloc_out_params *p_out_params)
3563 struct ecore_mcp_mb_params mb_params;
3564 struct resource_info mfw_resc_info;
3565 enum _ecore_status_t rc;
3567 OSAL_MEM_ZERO(&mfw_resc_info, sizeof(mfw_resc_info));
3569 mfw_resc_info.res_id = ecore_mcp_get_mfw_res_id(p_in_params->res_id);
3570 if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
3572 "Failed to match resource %d [%s] with the MFW resources\n",
3573 p_in_params->res_id,
3574 ecore_hw_get_resc_name(p_in_params->res_id));
3578 switch (p_in_params->cmd) {
3579 case DRV_MSG_SET_RESOURCE_VALUE_MSG:
3580 mfw_resc_info.size = p_in_params->resc_max_val;
3582 case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
3585 DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
3590 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3591 mb_params.cmd = p_in_params->cmd;
3592 mb_params.param = ECORE_RESC_ALLOC_VERSION;
3593 mb_params.p_data_src = &mfw_resc_info;
3594 mb_params.data_src_size = sizeof(mfw_resc_info);
3595 mb_params.p_data_dst = mb_params.p_data_src;
3596 mb_params.data_dst_size = mb_params.data_src_size;
3598 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3599 "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
3600 p_in_params->cmd, p_in_params->res_id,
3601 ecore_hw_get_resc_name(p_in_params->res_id),
3602 GET_MFW_FIELD(mb_params.param,
3603 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3604 GET_MFW_FIELD(mb_params.param,
3605 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3606 p_in_params->resc_max_val);
3608 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3609 if (rc != ECORE_SUCCESS)
3612 p_out_params->mcp_resp = mb_params.mcp_resp;
3613 p_out_params->mcp_param = mb_params.mcp_param;
3614 p_out_params->resc_num = mfw_resc_info.size;
3615 p_out_params->resc_start = mfw_resc_info.offset;
3616 p_out_params->vf_resc_num = mfw_resc_info.vf_size;
3617 p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
3618 p_out_params->flags = mfw_resc_info.flags;
3620 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3621 "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
3622 GET_MFW_FIELD(p_out_params->mcp_param,
3623 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3624 GET_MFW_FIELD(p_out_params->mcp_param,
3625 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3626 p_out_params->resc_num, p_out_params->resc_start,
3627 p_out_params->vf_resc_num, p_out_params->vf_resc_start,
3628 p_out_params->flags);
3630 return ECORE_SUCCESS;
3633 enum _ecore_status_t
3634 ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3635 enum ecore_resources res_id, u32 resc_max_val,
3638 struct ecore_resc_alloc_out_params out_params;
3639 struct ecore_resc_alloc_in_params in_params;
3640 enum _ecore_status_t rc;
3642 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
3643 in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
3644 in_params.res_id = res_id;
3645 in_params.resc_max_val = resc_max_val;
3646 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
3647 rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3649 if (rc != ECORE_SUCCESS)
3652 *p_mcp_resp = out_params.mcp_resp;
3654 return ECORE_SUCCESS;
3657 enum _ecore_status_t
3658 ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3659 enum ecore_resources res_id, u32 *p_mcp_resp,
3660 u32 *p_resc_num, u32 *p_resc_start)
3662 struct ecore_resc_alloc_out_params out_params;
3663 struct ecore_resc_alloc_in_params in_params;
3664 enum _ecore_status_t rc;
3666 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
3667 in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
3668 in_params.res_id = res_id;
3669 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
3670 rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3672 if (rc != ECORE_SUCCESS)
3675 *p_mcp_resp = out_params.mcp_resp;
3677 if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
3678 *p_resc_num = out_params.resc_num;
3679 *p_resc_start = out_params.resc_start;
3682 return ECORE_SUCCESS;
3685 enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn,
3686 struct ecore_ptt *p_ptt)
3688 u32 mcp_resp, mcp_param;
3690 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
3691 &mcp_resp, &mcp_param);
3694 static enum _ecore_status_t ecore_mcp_resource_cmd(struct ecore_hwfn *p_hwfn,
3695 struct ecore_ptt *p_ptt,
3696 u32 param, u32 *p_mcp_resp,
3699 enum _ecore_status_t rc;
3701 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
3702 p_mcp_resp, p_mcp_param);
3703 if (rc != ECORE_SUCCESS)
3706 if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3708 "The resource command is unsupported by the MFW\n");
3709 return ECORE_NOTIMPL;
3712 if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
3713 u8 opcode = GET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
3715 DP_NOTICE(p_hwfn, false,
3716 "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
3724 enum _ecore_status_t
3725 __ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3726 struct ecore_resc_lock_params *p_params)
3728 u32 param = 0, mcp_resp, mcp_param;
3730 enum _ecore_status_t rc;
3732 switch (p_params->timeout) {
3733 case ECORE_MCP_RESC_LOCK_TO_DEFAULT:
3734 opcode = RESOURCE_OPCODE_REQ;
3735 p_params->timeout = 0;
3737 case ECORE_MCP_RESC_LOCK_TO_NONE:
3738 opcode = RESOURCE_OPCODE_REQ_WO_AGING;
3739 p_params->timeout = 0;
3742 opcode = RESOURCE_OPCODE_REQ_W_AGING;
3746 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3747 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3748 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
3750 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3751 "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
3752 param, p_params->timeout, opcode, p_params->resource);
3754 /* Attempt to acquire the resource */
3755 rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
3757 if (rc != ECORE_SUCCESS)
3760 /* Analyze the response */
3761 p_params->owner = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER);
3762 opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3764 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3765 "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
3766 mcp_param, opcode, p_params->owner);
3769 case RESOURCE_OPCODE_GNT:
3770 p_params->b_granted = true;
3772 case RESOURCE_OPCODE_BUSY:
3773 p_params->b_granted = false;
3776 DP_NOTICE(p_hwfn, false,
3777 "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
3782 return ECORE_SUCCESS;
3785 enum _ecore_status_t
3786 ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3787 struct ecore_resc_lock_params *p_params)
3790 enum _ecore_status_t rc;
3793 /* No need for an interval before the first iteration */
3795 if (p_params->sleep_b4_retry) {
3796 u16 retry_interval_in_ms =
3797 DIV_ROUND_UP(p_params->retry_interval,
3800 OSAL_MSLEEP(retry_interval_in_ms);
3802 OSAL_UDELAY(p_params->retry_interval);
3806 rc = __ecore_mcp_resc_lock(p_hwfn, p_ptt, p_params);
3807 if (rc != ECORE_SUCCESS)
3810 if (p_params->b_granted)
3812 } while (retry_cnt++ < p_params->retry_num);
3814 return ECORE_SUCCESS;
3817 void ecore_mcp_resc_lock_default_init(struct ecore_resc_lock_params *p_lock,
3818 struct ecore_resc_unlock_params *p_unlock,
3819 enum ecore_resc_lock resource,
3820 bool b_is_permanent)
3822 if (p_lock != OSAL_NULL) {
3823 OSAL_MEM_ZERO(p_lock, sizeof(*p_lock));
3825 /* Permanent resources don't require aging, and there's no
3826 * point in trying to acquire them more than once since it's
3827 * unexpected another entity would release them.
3829 if (b_is_permanent) {
3830 p_lock->timeout = ECORE_MCP_RESC_LOCK_TO_NONE;
3832 p_lock->retry_num = ECORE_MCP_RESC_LOCK_RETRY_CNT_DFLT;
3833 p_lock->retry_interval =
3834 ECORE_MCP_RESC_LOCK_RETRY_VAL_DFLT;
3835 p_lock->sleep_b4_retry = true;
3838 p_lock->resource = resource;
3841 if (p_unlock != OSAL_NULL) {
3842 OSAL_MEM_ZERO(p_unlock, sizeof(*p_unlock));
3843 p_unlock->resource = resource;
3847 enum _ecore_status_t
3848 ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3849 struct ecore_resc_unlock_params *p_params)
3851 u32 param = 0, mcp_resp, mcp_param;
3853 enum _ecore_status_t rc;
3855 opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
3856 : RESOURCE_OPCODE_RELEASE;
3857 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3858 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3860 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3861 "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
3862 param, opcode, p_params->resource);
3864 /* Attempt to release the resource */
3865 rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
3867 if (rc != ECORE_SUCCESS)
3870 /* Analyze the response */
3871 opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3873 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3874 "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
3878 case RESOURCE_OPCODE_RELEASED_PREVIOUS:
3880 "Resource unlock request for an already released resource [%d]\n",
3881 p_params->resource);
3883 case RESOURCE_OPCODE_RELEASED:
3884 p_params->b_released = true;
3886 case RESOURCE_OPCODE_WRONG_OWNER:
3887 p_params->b_released = false;
3890 DP_NOTICE(p_hwfn, false,
3891 "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
3896 return ECORE_SUCCESS;
3899 bool ecore_mcp_is_smart_an_supported(struct ecore_hwfn *p_hwfn)
3901 return !!(p_hwfn->mcp_info->capabilities &
3902 FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ);
3905 enum _ecore_status_t ecore_mcp_get_capabilities(struct ecore_hwfn *p_hwfn,
3906 struct ecore_ptt *p_ptt)
3909 enum _ecore_status_t rc;
3911 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT,
3912 0, &mcp_resp, &p_hwfn->mcp_info->capabilities);
3913 if (rc == ECORE_SUCCESS)
3914 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_PROBE),
3915 "MFW supported features: %08x\n",
3916 p_hwfn->mcp_info->capabilities);
3921 enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn,
3922 struct ecore_ptt *p_ptt)
3924 u32 mcp_resp, mcp_param, features;
3926 features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ |
3927 DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE |
3928 DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK;
3930 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
3931 features, &mcp_resp, &mcp_param);
3934 enum _ecore_status_t
3935 ecore_mcp_drv_attribute(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3936 struct ecore_mcp_drv_attr *p_drv_attr)
3938 struct attribute_cmd_write_stc attr_cmd_write;
3939 enum _attribute_commands_e mfw_attr_cmd;
3940 struct ecore_mcp_mb_params mb_params;
3941 enum _ecore_status_t rc;
3943 switch (p_drv_attr->attr_cmd) {
3944 case ECORE_MCP_DRV_ATTR_CMD_READ:
3945 mfw_attr_cmd = ATTRIBUTE_CMD_READ;
3947 case ECORE_MCP_DRV_ATTR_CMD_WRITE:
3948 mfw_attr_cmd = ATTRIBUTE_CMD_WRITE;
3950 case ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR:
3951 mfw_attr_cmd = ATTRIBUTE_CMD_READ_CLEAR;
3953 case ECORE_MCP_DRV_ATTR_CMD_CLEAR:
3954 mfw_attr_cmd = ATTRIBUTE_CMD_CLEAR;
3957 DP_NOTICE(p_hwfn, false, "Unknown attribute command %d\n",
3958 p_drv_attr->attr_cmd);
3962 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3963 mb_params.cmd = DRV_MSG_CODE_ATTRIBUTE;
3964 SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_KEY,
3965 p_drv_attr->attr_num);
3966 SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_CMD,
3968 if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_WRITE) {
3969 OSAL_MEM_ZERO(&attr_cmd_write, sizeof(attr_cmd_write));
3970 attr_cmd_write.val = p_drv_attr->val;
3971 attr_cmd_write.mask = p_drv_attr->mask;
3972 attr_cmd_write.offset = p_drv_attr->offset;
3974 mb_params.p_data_src = &attr_cmd_write;
3975 mb_params.data_src_size = sizeof(attr_cmd_write);
3978 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3979 if (rc != ECORE_SUCCESS)
3982 if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3984 "The attribute command is not supported by the MFW\n");
3985 return ECORE_NOTIMPL;
3986 } else if (mb_params.mcp_resp != FW_MSG_CODE_OK) {
3988 "Failed to send an attribute command [mcp_resp 0x%x, attr_cmd %d, attr_num %d]\n",
3989 mb_params.mcp_resp, p_drv_attr->attr_cmd,
3990 p_drv_attr->attr_num);
3994 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3995 "Attribute Command: cmd %d [mfw_cmd %d], num %d, in={val 0x%08x, mask 0x%08x, offset 0x%08x}, out={val 0x%08x}\n",
3996 p_drv_attr->attr_cmd, mfw_attr_cmd, p_drv_attr->attr_num,
3997 p_drv_attr->val, p_drv_attr->mask, p_drv_attr->offset,
3998 mb_params.mcp_param);
4000 if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ ||
4001 p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR)
4002 p_drv_attr->val = mb_params.mcp_param;
4004 return ECORE_SUCCESS;
4007 void ecore_mcp_wol_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
4008 u32 offset, u32 val)
4010 struct ecore_mcp_mb_params mb_params = {0};
4011 enum _ecore_status_t rc = ECORE_SUCCESS;
4014 mb_params.cmd = DRV_MSG_CODE_WRITE_WOL_REG;
4015 mb_params.param = offset;
4016 mb_params.p_data_src = &dword;
4017 mb_params.data_src_size = sizeof(dword);
4019 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4020 if (rc != ECORE_SUCCESS) {
4021 DP_NOTICE(p_hwfn, false,
4022 "Failed to wol write request, rc = %d\n", rc);
4025 if (mb_params.mcp_resp != FW_MSG_CODE_WOL_READ_WRITE_OK) {
4026 DP_NOTICE(p_hwfn, false,
4027 "Failed to write value 0x%x to offset 0x%x [mcp_resp 0x%x]\n",
4028 val, offset, mb_params.mcp_resp);
4029 rc = ECORE_UNKNOWN_ERROR;