1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2016 - 2018 Cavium Inc.
9 #include "ecore_status.h"
11 #include "ecore_mcp.h"
12 #include "mcp_public.h"
15 #include "ecore_init_fw_funcs.h"
16 #include "ecore_sriov.h"
18 #include "ecore_iov_api.h"
19 #include "ecore_gtt_reg_addr.h"
20 #include "ecore_iro.h"
21 #include "ecore_dcbx.h"
22 #include "ecore_sp_commands.h"
23 #include "ecore_cxt.h"
25 #define CHIP_MCP_RESP_ITER_US 10
26 #define EMUL_MCP_RESP_ITER_US (1000 * 1000)
28 #define ECORE_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
29 #define ECORE_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
31 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
32 ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
35 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
36 ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
38 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
39 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
40 OFFSETOF(struct public_drv_mb, _field), _val)
42 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
43 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
44 OFFSETOF(struct public_drv_mb, _field))
46 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
47 DRV_ID_PDA_COMP_VER_OFFSET)
49 #define MCP_BYTES_PER_MBIT_OFFSET 17
53 static int loaded_port[MAX_NUM_PORTS] = { 0 };
56 bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn)
58 if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
63 void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
65 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
67 u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr);
69 p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
71 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
72 "port_addr = 0x%x, port_id 0x%02x\n",
73 p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
76 void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
78 u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
83 if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev))
87 if (!p_hwfn->mcp_info->public_base)
90 for (i = 0; i < length; i++) {
91 tmp = ecore_rd(p_hwfn, p_ptt,
92 p_hwfn->mcp_info->mfw_mb_addr +
93 (i << 2) + sizeof(u32));
95 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
96 OSAL_BE32_TO_CPU(tmp);
100 struct ecore_mcp_cmd_elem {
101 osal_list_entry_t list;
102 struct ecore_mcp_mb_params *p_mb_params;
103 u16 expected_seq_num;
107 /* Must be called while cmd_lock is acquired */
108 static struct ecore_mcp_cmd_elem *
109 ecore_mcp_cmd_add_elem(struct ecore_hwfn *p_hwfn,
110 struct ecore_mcp_mb_params *p_mb_params,
111 u16 expected_seq_num)
113 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
115 p_cmd_elem = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
116 sizeof(*p_cmd_elem));
118 DP_NOTICE(p_hwfn, false,
119 "Failed to allocate `struct ecore_mcp_cmd_elem'\n");
123 p_cmd_elem->p_mb_params = p_mb_params;
124 p_cmd_elem->expected_seq_num = expected_seq_num;
125 OSAL_LIST_PUSH_HEAD(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
130 /* Must be called while cmd_lock is acquired */
131 static void ecore_mcp_cmd_del_elem(struct ecore_hwfn *p_hwfn,
132 struct ecore_mcp_cmd_elem *p_cmd_elem)
134 OSAL_LIST_REMOVE_ENTRY(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
135 OSAL_FREE(p_hwfn->p_dev, p_cmd_elem);
138 /* Must be called while cmd_lock is acquired */
139 static struct ecore_mcp_cmd_elem *
140 ecore_mcp_cmd_get_elem(struct ecore_hwfn *p_hwfn, u16 seq_num)
142 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
144 OSAL_LIST_FOR_EACH_ENTRY(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list,
145 struct ecore_mcp_cmd_elem) {
146 if (p_cmd_elem->expected_seq_num == seq_num)
153 enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
155 if (p_hwfn->mcp_info) {
156 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL, *p_tmp;
158 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
159 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
161 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
162 OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_cmd_elem, p_tmp,
163 &p_hwfn->mcp_info->cmd_list, list,
164 struct ecore_mcp_cmd_elem) {
165 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
167 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
169 #ifdef CONFIG_ECORE_LOCK_ALLOC
170 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->cmd_lock);
171 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->link_lock);
175 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
177 return ECORE_SUCCESS;
180 /* Maximum of 1 sec to wait for the SHMEM ready indication */
181 #define ECORE_MCP_SHMEM_RDY_MAX_RETRIES 20
182 #define ECORE_MCP_SHMEM_RDY_ITER_MS 50
184 static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
185 struct ecore_ptt *p_ptt)
187 struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
188 u8 cnt = ECORE_MCP_SHMEM_RDY_MAX_RETRIES;
189 u8 msec = ECORE_MCP_SHMEM_RDY_ITER_MS;
190 u32 drv_mb_offsize, mfw_mb_offsize;
191 u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
194 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
195 DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n");
196 p_info->public_base = 0;
201 p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
202 if (!p_info->public_base)
205 p_info->public_base |= GRCBASE_MCP;
207 /* Get the MFW MB address and number of supported messages */
208 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
209 SECTION_OFFSIZE_ADDR(p_info->public_base,
211 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
212 p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
213 p_info->mfw_mb_addr);
216 * The driver can notify that there was an MCP reset, and read the SHMEM
217 * values before the MFW has completed initializing them.
218 * As a temporary solution, the "sup_msgs" field is used as a data ready
220 * This should be replaced with an actual indication when it is provided
223 while (!p_info->mfw_mb_length && cnt--) {
225 p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
226 p_info->mfw_mb_addr);
230 DP_NOTICE(p_hwfn, false,
231 "Failed to get the SHMEM ready notification after %d msec\n",
232 ECORE_MCP_SHMEM_RDY_MAX_RETRIES * msec);
233 return ECORE_TIMEOUT;
236 /* Calculate the driver and MFW mailbox address */
237 drv_mb_offsize = ecore_rd(p_hwfn, p_ptt,
238 SECTION_OFFSIZE_ADDR(p_info->public_base,
240 p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
241 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
242 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x"
243 " mcp_pf_id = 0x%x\n",
244 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
246 /* Get the current driver mailbox sequence before sending
249 p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
250 DRV_MSG_SEQ_NUMBER_MASK;
252 /* Get current FW pulse sequence */
253 p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
256 p_info->mcp_hist = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
258 return ECORE_SUCCESS;
261 enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
262 struct ecore_ptt *p_ptt)
264 struct ecore_mcp_info *p_info;
267 /* Allocate mcp_info structure */
268 p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
269 sizeof(*p_hwfn->mcp_info));
270 if (!p_hwfn->mcp_info) {
271 DP_NOTICE(p_hwfn, false, "Failed to allocate mcp_info\n");
274 p_info = p_hwfn->mcp_info;
276 /* Initialize the MFW spinlocks */
277 #ifdef CONFIG_ECORE_LOCK_ALLOC
278 if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock)) {
279 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
282 if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock)) {
283 OSAL_SPIN_LOCK_DEALLOC(&p_info->cmd_lock);
284 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
288 OSAL_SPIN_LOCK_INIT(&p_info->cmd_lock);
289 OSAL_SPIN_LOCK_INIT(&p_info->link_lock);
291 OSAL_LIST_INIT(&p_info->cmd_list);
293 if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
294 DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
295 /* Do not free mcp_info here, since public_base indicate that
296 * the MCP is not initialized
298 return ECORE_SUCCESS;
301 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
302 p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
303 p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
304 if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
307 return ECORE_SUCCESS;
310 DP_NOTICE(p_hwfn, false, "Failed to allocate mcp memory\n");
311 ecore_mcp_free(p_hwfn);
315 static void ecore_mcp_reread_offsets(struct ecore_hwfn *p_hwfn,
316 struct ecore_ptt *p_ptt)
318 u32 generic_por_0 = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
320 /* Use MCP history register to check if MCP reset occurred between init
323 if (p_hwfn->mcp_info->mcp_hist != generic_por_0) {
324 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
325 "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
326 p_hwfn->mcp_info->mcp_hist, generic_por_0);
328 ecore_load_mcp_offsets(p_hwfn, p_ptt);
329 ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
333 enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
334 struct ecore_ptt *p_ptt)
336 u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0;
337 enum _ecore_status_t rc = ECORE_SUCCESS;
340 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
341 delay = EMUL_MCP_RESP_ITER_US;
344 if (p_hwfn->mcp_info->b_block_cmd) {
345 DP_NOTICE(p_hwfn, false,
346 "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
347 return ECORE_ABORTED;
350 /* Ensure that only a single thread is accessing the mailbox */
351 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
353 org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
355 /* Set drv command along with the updated sequence */
356 ecore_mcp_reread_offsets(p_hwfn, p_ptt);
357 seq = ++p_hwfn->mcp_info->drv_mb_seq;
358 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
361 /* Wait for MFW response */
363 /* Give the FW up to 500 second (50*1000*10usec) */
364 } while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt,
365 MISCS_REG_GENERIC_POR_0)) &&
366 (cnt++ < ECORE_MCP_RESET_RETRIES));
368 if (org_mcp_reset_seq !=
369 ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
370 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
371 "MCP was reset after %d usec\n", cnt * delay);
373 DP_ERR(p_hwfn, "Failed to reset MCP\n");
377 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
382 /* Must be called while cmd_lock is acquired */
383 static bool ecore_mcp_has_pending_cmd(struct ecore_hwfn *p_hwfn)
385 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
387 /* There is at most one pending command at a certain time, and if it
388 * exists - it is placed at the HEAD of the list.
390 if (!OSAL_LIST_IS_EMPTY(&p_hwfn->mcp_info->cmd_list)) {
391 p_cmd_elem = OSAL_LIST_FIRST_ENTRY(&p_hwfn->mcp_info->cmd_list,
392 struct ecore_mcp_cmd_elem,
394 return !p_cmd_elem->b_is_completed;
400 /* Must be called while cmd_lock is acquired */
401 static enum _ecore_status_t
402 ecore_mcp_update_pending_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
404 struct ecore_mcp_mb_params *p_mb_params;
405 struct ecore_mcp_cmd_elem *p_cmd_elem;
409 mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
410 seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK);
412 /* Return if no new non-handled response has been received */
413 if (seq_num != p_hwfn->mcp_info->drv_mb_seq)
416 p_cmd_elem = ecore_mcp_cmd_get_elem(p_hwfn, seq_num);
419 "Failed to find a pending mailbox cmd that expects sequence number %d\n",
421 return ECORE_UNKNOWN_ERROR;
424 p_mb_params = p_cmd_elem->p_mb_params;
426 /* Get the MFW response along with the sequence number */
427 p_mb_params->mcp_resp = mcp_resp;
429 /* Get the MFW param */
430 p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
432 /* Get the union data */
433 if (p_mb_params->p_data_dst != OSAL_NULL &&
434 p_mb_params->data_dst_size) {
435 u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
436 OFFSETOF(struct public_drv_mb,
438 ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
439 union_data_addr, p_mb_params->data_dst_size);
442 p_cmd_elem->b_is_completed = true;
444 return ECORE_SUCCESS;
447 /* Must be called while cmd_lock is acquired */
448 static void __ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
449 struct ecore_ptt *p_ptt,
450 struct ecore_mcp_mb_params *p_mb_params,
453 union drv_union_data union_data;
456 /* Set the union data */
457 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
458 OFFSETOF(struct public_drv_mb, union_data);
459 OSAL_MEM_ZERO(&union_data, sizeof(union_data));
460 if (p_mb_params->p_data_src != OSAL_NULL && p_mb_params->data_src_size)
461 OSAL_MEMCPY(&union_data, p_mb_params->p_data_src,
462 p_mb_params->data_src_size);
463 ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
466 /* Set the drv param */
467 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param);
469 /* Set the drv command along with the sequence number */
470 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
472 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
473 "MFW mailbox: command 0x%08x param 0x%08x\n",
474 (p_mb_params->cmd | seq_num), p_mb_params->param);
477 static void ecore_mcp_cmd_set_blocking(struct ecore_hwfn *p_hwfn,
480 p_hwfn->mcp_info->b_block_cmd = block_cmd;
482 DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n",
483 block_cmd ? "Block" : "Unblock");
486 void ecore_mcp_print_cpu_info(struct ecore_hwfn *p_hwfn,
487 struct ecore_ptt *p_ptt)
489 u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
491 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
492 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
493 cpu_pc_0 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
494 OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
495 cpu_pc_1 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
496 OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
497 cpu_pc_2 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
499 DP_NOTICE(p_hwfn, false,
500 "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
501 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
504 static enum _ecore_status_t
505 _ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
506 struct ecore_mcp_mb_params *p_mb_params,
507 u32 max_retries, u32 delay)
509 struct ecore_mcp_cmd_elem *p_cmd_elem;
512 enum _ecore_status_t rc = ECORE_SUCCESS;
514 /* Wait until the mailbox is non-occupied */
516 /* Exit the loop if there is no pending command, or if the
517 * pending command is completed during this iteration.
518 * The spinlock stays locked until the command is sent.
521 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
523 if (!ecore_mcp_has_pending_cmd(p_hwfn))
526 rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt);
527 if (rc == ECORE_SUCCESS)
529 else if (rc != ECORE_AGAIN)
532 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
534 OSAL_MFW_CMD_PREEMPT(p_hwfn);
535 } while (++cnt < max_retries);
537 if (cnt >= max_retries) {
538 DP_NOTICE(p_hwfn, false,
539 "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
540 p_mb_params->cmd, p_mb_params->param);
544 /* Send the mailbox command */
545 ecore_mcp_reread_offsets(p_hwfn, p_ptt);
546 seq_num = ++p_hwfn->mcp_info->drv_mb_seq;
547 p_cmd_elem = ecore_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num);
553 __ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num);
554 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
556 /* Wait for the MFW response */
558 /* Exit the loop if the command is already completed, or if the
559 * command is completed during this iteration.
560 * The spinlock stays locked until the list element is removed.
564 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
566 if (p_cmd_elem->b_is_completed)
569 rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt);
570 if (rc == ECORE_SUCCESS)
572 else if (rc != ECORE_AGAIN)
575 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
576 OSAL_MFW_CMD_PREEMPT(p_hwfn);
577 } while (++cnt < max_retries);
579 if (cnt >= max_retries) {
580 DP_NOTICE(p_hwfn, false,
581 "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
582 p_mb_params->cmd, p_mb_params->param);
583 ecore_mcp_print_cpu_info(p_hwfn, p_ptt);
585 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
586 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
587 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
589 ecore_mcp_cmd_set_blocking(p_hwfn, true);
590 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
594 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
595 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
597 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
598 "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
599 p_mb_params->mcp_resp, p_mb_params->mcp_param,
600 (cnt * delay) / 1000, (cnt * delay) % 1000);
602 /* Clear the sequence number from the MFW response */
603 p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
605 return ECORE_SUCCESS;
608 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
612 static enum _ecore_status_t
613 ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
614 struct ecore_ptt *p_ptt,
615 struct ecore_mcp_mb_params *p_mb_params)
617 osal_size_t union_data_size = sizeof(union drv_union_data);
618 u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
619 u32 delay = CHIP_MCP_RESP_ITER_US;
622 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
623 delay = EMUL_MCP_RESP_ITER_US;
624 /* There is a built-in delay of 100usec in each MFW response read */
625 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
629 /* MCP not initialized */
630 if (!ecore_mcp_is_init(p_hwfn)) {
631 DP_NOTICE(p_hwfn, true, "MFW is not initialized!\n");
635 if (p_mb_params->data_src_size > union_data_size ||
636 p_mb_params->data_dst_size > union_data_size) {
638 "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
639 p_mb_params->data_src_size, p_mb_params->data_dst_size,
644 if (p_hwfn->mcp_info->b_block_cmd) {
645 DP_NOTICE(p_hwfn, false,
646 "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
647 p_mb_params->cmd, p_mb_params->param);
648 return ECORE_ABORTED;
651 return _ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
655 enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
656 struct ecore_ptt *p_ptt, u32 cmd, u32 param,
657 u32 *o_mcp_resp, u32 *o_mcp_param)
659 struct ecore_mcp_mb_params mb_params;
660 enum _ecore_status_t rc;
663 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
664 if (cmd == DRV_MSG_CODE_UNLOAD_REQ) {
666 loaded_port[p_hwfn->port_id]--;
667 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n",
670 return ECORE_SUCCESS;
674 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
676 mb_params.param = param;
677 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
678 if (rc != ECORE_SUCCESS)
681 *o_mcp_resp = mb_params.mcp_resp;
682 *o_mcp_param = mb_params.mcp_param;
684 return ECORE_SUCCESS;
687 enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
688 struct ecore_ptt *p_ptt,
693 u32 i_txn_size, u32 *i_buf)
695 struct ecore_mcp_mb_params mb_params;
696 enum _ecore_status_t rc;
698 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
700 mb_params.param = param;
701 mb_params.p_data_src = i_buf;
702 mb_params.data_src_size = (u8)i_txn_size;
703 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
704 if (rc != ECORE_SUCCESS)
707 *o_mcp_resp = mb_params.mcp_resp;
708 *o_mcp_param = mb_params.mcp_param;
710 return ECORE_SUCCESS;
713 enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
714 struct ecore_ptt *p_ptt,
719 u32 *o_txn_size, u32 *o_buf)
721 struct ecore_mcp_mb_params mb_params;
722 u8 raw_data[MCP_DRV_NVM_BUF_LEN];
723 enum _ecore_status_t rc;
725 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
727 mb_params.param = param;
728 mb_params.p_data_dst = raw_data;
730 /* Use the maximal value since the actual one is part of the response */
731 mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
733 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
734 if (rc != ECORE_SUCCESS)
737 *o_mcp_resp = mb_params.mcp_resp;
738 *o_mcp_param = mb_params.mcp_param;
740 *o_txn_size = *o_mcp_param;
742 OSAL_MEMCPY(o_buf, raw_data, RTE_MIN(*o_txn_size, MCP_DRV_NVM_BUF_LEN));
744 return ECORE_SUCCESS;
748 static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
751 static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
754 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
755 else if (!loaded_port[p_hwfn->port_id])
756 load_phase = FW_MSG_CODE_DRV_LOAD_PORT;
758 load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
760 /* On CMT, always tell that it's engine */
761 if (ECORE_IS_CMT(p_hwfn->p_dev))
762 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
764 *p_load_code = load_phase;
766 loaded_port[p_hwfn->port_id]++;
768 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
769 "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n",
770 *p_load_code, loaded, p_hwfn->port_id,
771 loaded_port[p_hwfn->port_id]);
776 ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role,
777 enum ecore_override_force_load override_force_load)
779 bool can_force_load = false;
781 switch (override_force_load) {
782 case ECORE_OVERRIDE_FORCE_LOAD_ALWAYS:
783 can_force_load = true;
785 case ECORE_OVERRIDE_FORCE_LOAD_NEVER:
786 can_force_load = false;
789 can_force_load = (drv_role == DRV_ROLE_OS &&
790 exist_drv_role == DRV_ROLE_PREBOOT) ||
791 (drv_role == DRV_ROLE_KDUMP &&
792 exist_drv_role == DRV_ROLE_OS);
796 return can_force_load;
799 static enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn,
800 struct ecore_ptt *p_ptt)
802 u32 resp = 0, param = 0;
803 enum _ecore_status_t rc;
805 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
807 if (rc != ECORE_SUCCESS)
808 DP_NOTICE(p_hwfn, false,
809 "Failed to send cancel load request, rc = %d\n", rc);
814 #define CONFIG_ECORE_L2_BITMAP_IDX (0x1 << 0)
815 #define CONFIG_ECORE_SRIOV_BITMAP_IDX (0x1 << 1)
816 #define CONFIG_ECORE_ROCE_BITMAP_IDX (0x1 << 2)
817 #define CONFIG_ECORE_IWARP_BITMAP_IDX (0x1 << 3)
818 #define CONFIG_ECORE_FCOE_BITMAP_IDX (0x1 << 4)
819 #define CONFIG_ECORE_ISCSI_BITMAP_IDX (0x1 << 5)
820 #define CONFIG_ECORE_LL2_BITMAP_IDX (0x1 << 6)
822 static u32 ecore_get_config_bitmap(void)
824 u32 config_bitmap = 0x0;
826 #ifdef CONFIG_ECORE_L2
827 config_bitmap |= CONFIG_ECORE_L2_BITMAP_IDX;
829 #ifdef CONFIG_ECORE_SRIOV
830 config_bitmap |= CONFIG_ECORE_SRIOV_BITMAP_IDX;
832 #ifdef CONFIG_ECORE_ROCE
833 config_bitmap |= CONFIG_ECORE_ROCE_BITMAP_IDX;
835 #ifdef CONFIG_ECORE_IWARP
836 config_bitmap |= CONFIG_ECORE_IWARP_BITMAP_IDX;
838 #ifdef CONFIG_ECORE_FCOE
839 config_bitmap |= CONFIG_ECORE_FCOE_BITMAP_IDX;
841 #ifdef CONFIG_ECORE_ISCSI
842 config_bitmap |= CONFIG_ECORE_ISCSI_BITMAP_IDX;
844 #ifdef CONFIG_ECORE_LL2
845 config_bitmap |= CONFIG_ECORE_LL2_BITMAP_IDX;
848 return config_bitmap;
851 struct ecore_load_req_in_params {
853 #define ECORE_LOAD_REQ_HSI_VER_DEFAULT 0
854 #define ECORE_LOAD_REQ_HSI_VER_1 1
861 bool avoid_eng_reset;
864 struct ecore_load_req_out_params {
874 static enum _ecore_status_t
875 __ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
876 struct ecore_load_req_in_params *p_in_params,
877 struct ecore_load_req_out_params *p_out_params)
879 struct ecore_mcp_mb_params mb_params;
880 struct load_req_stc load_req;
881 struct load_rsp_stc load_rsp;
883 enum _ecore_status_t rc;
885 OSAL_MEM_ZERO(&load_req, sizeof(load_req));
886 load_req.drv_ver_0 = p_in_params->drv_ver_0;
887 load_req.drv_ver_1 = p_in_params->drv_ver_1;
888 load_req.fw_ver = p_in_params->fw_ver;
889 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role);
890 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
891 p_in_params->timeout_val);
892 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE, p_in_params->force_cmd);
893 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
894 p_in_params->avoid_eng_reset);
896 hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ?
897 DRV_ID_MCP_HSI_VER_CURRENT :
898 (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_OFFSET);
900 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
901 mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
902 mb_params.param = PDA_COMP | hsi_ver | p_hwfn->p_dev->drv_type;
903 mb_params.p_data_src = &load_req;
904 mb_params.data_src_size = sizeof(load_req);
905 mb_params.p_data_dst = &load_rsp;
906 mb_params.data_dst_size = sizeof(load_rsp);
908 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
909 "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
911 GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
912 GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
913 GET_MFW_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
914 GET_MFW_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
916 if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1)
917 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
918 "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
919 load_req.drv_ver_0, load_req.drv_ver_1,
920 load_req.fw_ver, load_req.misc0,
921 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE),
922 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO),
923 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE),
924 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0));
926 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
927 if (rc != ECORE_SUCCESS) {
928 DP_NOTICE(p_hwfn, false,
929 "Failed to send load request, rc = %d\n", rc);
933 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
934 "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
935 p_out_params->load_code = mb_params.mcp_resp;
937 if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
938 p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
939 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
940 "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
941 load_rsp.drv_ver_0, load_rsp.drv_ver_1,
942 load_rsp.fw_ver, load_rsp.misc0,
943 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
944 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
945 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0));
947 p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
948 p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
949 p_out_params->exist_fw_ver = load_rsp.fw_ver;
950 p_out_params->exist_drv_role =
951 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
952 p_out_params->mfw_hsi_ver =
953 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
954 p_out_params->drv_exists =
955 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
956 LOAD_RSP_FLAGS0_DRV_EXISTS;
959 return ECORE_SUCCESS;
962 static void ecore_get_mfw_drv_role(enum ecore_drv_role drv_role,
966 case ECORE_DRV_ROLE_OS:
967 *p_mfw_drv_role = DRV_ROLE_OS;
969 case ECORE_DRV_ROLE_KDUMP:
970 *p_mfw_drv_role = DRV_ROLE_KDUMP;
975 enum ecore_load_req_force {
976 ECORE_LOAD_REQ_FORCE_NONE,
977 ECORE_LOAD_REQ_FORCE_PF,
978 ECORE_LOAD_REQ_FORCE_ALL,
981 static void ecore_get_mfw_force_cmd(enum ecore_load_req_force force_cmd,
985 case ECORE_LOAD_REQ_FORCE_NONE:
986 *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
988 case ECORE_LOAD_REQ_FORCE_PF:
989 *p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
991 case ECORE_LOAD_REQ_FORCE_ALL:
992 *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
997 enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
998 struct ecore_ptt *p_ptt,
999 struct ecore_load_req_params *p_params)
1001 struct ecore_load_req_out_params out_params;
1002 struct ecore_load_req_in_params in_params;
1003 u8 mfw_drv_role = 0, mfw_force_cmd;
1004 enum _ecore_status_t rc;
1007 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1008 ecore_mcp_mf_workaround(p_hwfn, &p_params->load_code);
1009 return ECORE_SUCCESS;
1013 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
1014 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT;
1015 in_params.drv_ver_0 = ECORE_VERSION;
1016 in_params.drv_ver_1 = ecore_get_config_bitmap();
1017 in_params.fw_ver = STORM_FW_VERSION;
1018 ecore_get_mfw_drv_role(p_params->drv_role, &mfw_drv_role);
1019 in_params.drv_role = mfw_drv_role;
1020 in_params.timeout_val = p_params->timeout_val;
1021 ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_NONE, &mfw_force_cmd);
1022 in_params.force_cmd = mfw_force_cmd;
1023 in_params.avoid_eng_reset = p_params->avoid_eng_reset;
1025 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
1026 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
1027 if (rc != ECORE_SUCCESS)
1030 /* First handle cases where another load request should/might be sent:
1031 * - MFW expects the old interface [HSI version = 1]
1032 * - MFW responds that a force load request is required
1034 if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
1036 "MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n");
1038 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1;
1039 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
1040 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
1042 if (rc != ECORE_SUCCESS)
1044 } else if (out_params.load_code ==
1045 FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
1046 if (ecore_mcp_can_force_load(in_params.drv_role,
1047 out_params.exist_drv_role,
1048 p_params->override_force_load)) {
1050 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, 0x%08x_%08x}, existing={%d, 0x%08x, 0x%08x_%08x}]\n",
1051 in_params.drv_role, in_params.fw_ver,
1052 in_params.drv_ver_0, in_params.drv_ver_1,
1053 out_params.exist_drv_role,
1054 out_params.exist_fw_ver,
1055 out_params.exist_drv_ver_0,
1056 out_params.exist_drv_ver_1);
1058 ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_ALL,
1061 in_params.force_cmd = mfw_force_cmd;
1062 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
1063 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
1065 if (rc != ECORE_SUCCESS)
1068 DP_NOTICE(p_hwfn, false,
1069 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
1070 in_params.drv_role, in_params.fw_ver,
1071 in_params.drv_ver_0, in_params.drv_ver_1,
1072 out_params.exist_drv_role,
1073 out_params.exist_fw_ver,
1074 out_params.exist_drv_ver_0,
1075 out_params.exist_drv_ver_1);
1077 ecore_mcp_cancel_load_req(p_hwfn, p_ptt);
1082 /* Now handle the other types of responses.
1083 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
1084 * expected here after the additional revised load requests were sent.
1086 switch (out_params.load_code) {
1087 case FW_MSG_CODE_DRV_LOAD_ENGINE:
1088 case FW_MSG_CODE_DRV_LOAD_PORT:
1089 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
1090 if (out_params.mfw_hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
1091 out_params.drv_exists) {
1092 /* The role and fw/driver version match, but the PF is
1093 * already loaded and has not been unloaded gracefully.
1094 * This is unexpected since a quasi-FLR request was
1095 * previously sent as part of ecore_hw_prepare().
1097 DP_NOTICE(p_hwfn, false,
1098 "PF is already loaded - shouldn't have got here since a quasi-FLR request was previously sent!\n");
1103 DP_NOTICE(p_hwfn, false,
1104 "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
1105 out_params.load_code);
1109 p_params->load_code = out_params.load_code;
1111 return ECORE_SUCCESS;
1114 enum _ecore_status_t ecore_mcp_load_done(struct ecore_hwfn *p_hwfn,
1115 struct ecore_ptt *p_ptt)
1117 u32 resp = 0, param = 0;
1118 enum _ecore_status_t rc;
1120 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp,
1122 if (rc != ECORE_SUCCESS) {
1123 DP_NOTICE(p_hwfn, false,
1124 "Failed to send a LOAD_DONE command, rc = %d\n", rc);
1128 /* Check if there is a DID mismatch between nvm-cfg/efuse */
1129 if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
1130 DP_NOTICE(p_hwfn, false,
1131 "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
1133 return ECORE_SUCCESS;
1136 enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn,
1137 struct ecore_ptt *p_ptt)
1139 u32 wol_param, mcp_resp, mcp_param;
1142 wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
1144 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param,
1145 &mcp_resp, &mcp_param);
1148 enum _ecore_status_t ecore_mcp_unload_done(struct ecore_hwfn *p_hwfn,
1149 struct ecore_ptt *p_ptt)
1151 struct ecore_mcp_mb_params mb_params;
1152 struct mcp_mac wol_mac;
1154 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1155 mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
1157 return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1160 static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
1161 struct ecore_ptt *p_ptt)
1163 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1165 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1166 u32 path_addr = SECTION_ADDR(mfw_path_offsize,
1167 ECORE_PATH_ID(p_hwfn));
1168 u32 disabled_vfs[VF_MAX_STATIC / 32];
1171 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1172 "Reading Disabled VF information from [offset %08x],"
1173 " path_addr %08x\n",
1174 mfw_path_offsize, path_addr);
1176 for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
1177 disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,
1179 OFFSETOF(struct public_path,
1182 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
1183 "FLR-ed VFs [%08x,...,%08x] - %08x\n",
1184 i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
1187 if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs))
1188 OSAL_VF_FLR_UPDATE(p_hwfn);
1191 enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
1192 struct ecore_ptt *p_ptt,
1195 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1197 u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1198 u32 func_addr = SECTION_ADDR(mfw_func_offsize,
1200 struct ecore_mcp_mb_params mb_params;
1201 enum _ecore_status_t rc;
1204 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1205 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
1206 "Acking VFs [%08x,...,%08x] - %08x\n",
1207 i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
1209 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1210 mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
1211 mb_params.p_data_src = vfs_to_ack;
1212 mb_params.data_src_size = VF_MAX_STATIC / 8;
1213 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt,
1215 if (rc != ECORE_SUCCESS) {
1216 DP_NOTICE(p_hwfn, false,
1217 "Failed to pass ACK for VF flr to MFW\n");
1218 return ECORE_TIMEOUT;
1221 /* TMP - clear the ACK bits; should be done by MFW */
1222 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1223 ecore_wr(p_hwfn, p_ptt,
1225 OFFSETOF(struct public_func, drv_ack_vf_disabled) +
1226 i * sizeof(u32), 0);
1231 static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
1232 struct ecore_ptt *p_ptt)
1234 u32 transceiver_state;
1236 transceiver_state = ecore_rd(p_hwfn, p_ptt,
1237 p_hwfn->mcp_info->port_addr +
1238 OFFSETOF(struct public_port,
1241 DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP),
1242 "Received transceiver state update [0x%08x] from mfw"
1244 transceiver_state, (u32)(p_hwfn->mcp_info->port_addr +
1245 OFFSETOF(struct public_port,
1246 transceiver_data)));
1248 transceiver_state = GET_MFW_FIELD(transceiver_state,
1249 ETH_TRANSCEIVER_STATE);
1251 if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
1252 DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
1254 DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
1256 OSAL_TRANSCEIVER_UPDATE(p_hwfn);
1259 static void ecore_mcp_read_eee_config(struct ecore_hwfn *p_hwfn,
1260 struct ecore_ptt *p_ptt,
1261 struct ecore_mcp_link_state *p_link)
1263 u32 eee_status, val;
1265 p_link->eee_adv_caps = 0;
1266 p_link->eee_lp_adv_caps = 0;
1267 eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1268 OFFSETOF(struct public_port, eee_status));
1269 p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT);
1270 val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET;
1271 if (val & EEE_1G_ADV)
1272 p_link->eee_adv_caps |= ECORE_EEE_1G_ADV;
1273 if (val & EEE_10G_ADV)
1274 p_link->eee_adv_caps |= ECORE_EEE_10G_ADV;
1275 val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET;
1276 if (val & EEE_1G_ADV)
1277 p_link->eee_lp_adv_caps |= ECORE_EEE_1G_ADV;
1278 if (val & EEE_10G_ADV)
1279 p_link->eee_lp_adv_caps |= ECORE_EEE_10G_ADV;
1282 static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
1283 struct ecore_ptt *p_ptt,
1284 struct public_func *p_data,
1287 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1289 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1290 u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1293 OSAL_MEM_ZERO(p_data, sizeof(*p_data));
1295 size = OSAL_MIN_T(u32, sizeof(*p_data),
1296 SECTION_SIZE(mfw_path_offsize));
1297 for (i = 0; i < size / sizeof(u32); i++)
1298 ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
1299 func_addr + (i << 2));
1304 static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
1305 struct ecore_ptt *p_ptt,
1308 struct ecore_mcp_link_state *p_link;
1312 /* Prevent SW/attentions from doing this at the same time */
1313 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->link_lock);
1315 p_link = &p_hwfn->mcp_info->link_output;
1316 OSAL_MEMSET(p_link, 0, sizeof(*p_link));
1318 status = ecore_rd(p_hwfn, p_ptt,
1319 p_hwfn->mcp_info->port_addr +
1320 OFFSETOF(struct public_port, link_status));
1321 DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP),
1322 "Received link update [0x%08x] from mfw"
1324 status, (u32)(p_hwfn->mcp_info->port_addr +
1325 OFFSETOF(struct public_port,
1328 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1329 "Resetting link indications\n");
1333 if (p_hwfn->b_drv_link_init) {
1334 /* Link indication with modern MFW arrives as per-PF
1337 if (p_hwfn->mcp_info->capabilities &
1338 FW_MB_PARAM_FEATURE_SUPPORT_VLINK) {
1339 struct public_func shmem_info;
1341 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1343 p_link->link_up = !!(shmem_info.status &
1344 FUNC_STATUS_VIRTUAL_LINK_UP);
1346 p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1349 p_link->link_up = false;
1352 p_link->full_duplex = true;
1353 switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
1354 case LINK_STATUS_SPEED_AND_DUPLEX_100G:
1355 p_link->speed = 100000;
1357 case LINK_STATUS_SPEED_AND_DUPLEX_50G:
1358 p_link->speed = 50000;
1360 case LINK_STATUS_SPEED_AND_DUPLEX_40G:
1361 p_link->speed = 40000;
1363 case LINK_STATUS_SPEED_AND_DUPLEX_25G:
1364 p_link->speed = 25000;
1366 case LINK_STATUS_SPEED_AND_DUPLEX_20G:
1367 p_link->speed = 20000;
1369 case LINK_STATUS_SPEED_AND_DUPLEX_10G:
1370 p_link->speed = 10000;
1372 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
1373 p_link->full_duplex = false;
1375 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
1376 p_link->speed = 1000;
1382 /* We never store total line speed as p_link->speed is
1383 * again changes according to bandwidth allocation.
1385 if (p_link->link_up && p_link->speed)
1386 p_link->line_speed = p_link->speed;
1388 p_link->line_speed = 0;
1390 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
1391 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
1393 /* Max bandwidth configuration */
1394 __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
1397 /* Min bandwidth configuration */
1398 __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
1400 ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev, p_ptt,
1401 p_link->min_pf_rate);
1403 p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
1404 p_link->an_complete = !!(status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
1405 p_link->parallel_detection = !!(status &
1406 LINK_STATUS_PARALLEL_DETECTION_USED);
1407 p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
1409 p_link->partner_adv_speed |=
1410 (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
1411 ECORE_LINK_PARTNER_SPEED_1G_FD : 0;
1412 p_link->partner_adv_speed |=
1413 (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
1414 ECORE_LINK_PARTNER_SPEED_1G_HD : 0;
1415 p_link->partner_adv_speed |=
1416 (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
1417 ECORE_LINK_PARTNER_SPEED_10G : 0;
1418 p_link->partner_adv_speed |=
1419 (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
1420 ECORE_LINK_PARTNER_SPEED_20G : 0;
1421 p_link->partner_adv_speed |=
1422 (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
1423 ECORE_LINK_PARTNER_SPEED_25G : 0;
1424 p_link->partner_adv_speed |=
1425 (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
1426 ECORE_LINK_PARTNER_SPEED_40G : 0;
1427 p_link->partner_adv_speed |=
1428 (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
1429 ECORE_LINK_PARTNER_SPEED_50G : 0;
1430 p_link->partner_adv_speed |=
1431 (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
1432 ECORE_LINK_PARTNER_SPEED_100G : 0;
1434 p_link->partner_tx_flow_ctrl_en =
1435 !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
1436 p_link->partner_rx_flow_ctrl_en =
1437 !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
1439 switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
1440 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
1441 p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE;
1443 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
1444 p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE;
1446 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
1447 p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE;
1450 p_link->partner_adv_pause = 0;
1453 p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
1455 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
1456 ecore_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
1458 OSAL_LINK_UPDATE(p_hwfn);
1460 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->link_lock);
1463 enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
1464 struct ecore_ptt *p_ptt, bool b_up)
1466 struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
1467 struct ecore_mcp_mb_params mb_params;
1468 struct eth_phy_cfg phy_cfg;
1469 enum _ecore_status_t rc = ECORE_SUCCESS;
1473 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
1474 return ECORE_SUCCESS;
1477 /* Set the shmem configuration according to params */
1478 OSAL_MEM_ZERO(&phy_cfg, sizeof(phy_cfg));
1479 cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
1480 if (!params->speed.autoneg)
1481 phy_cfg.speed = params->speed.forced_speed;
1482 phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
1483 phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
1484 phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1485 phy_cfg.adv_speed = params->speed.advertised_speeds;
1486 phy_cfg.loopback_mode = params->loopback_mode;
1488 /* There are MFWs that share this capability regardless of whether
1489 * this is feasible or not. And given that at the very least adv_caps
1490 * would be set internally by ecore, we want to make sure LFA would
1493 if ((p_hwfn->mcp_info->capabilities &
1494 FW_MB_PARAM_FEATURE_SUPPORT_EEE) &&
1495 params->eee.enable) {
1496 phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
1497 if (params->eee.tx_lpi_enable)
1498 phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
1499 if (params->eee.adv_caps & ECORE_EEE_1G_ADV)
1500 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G;
1501 if (params->eee.adv_caps & ECORE_EEE_10G_ADV)
1502 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G;
1503 phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer <<
1504 EEE_TX_TIMER_USEC_OFFSET) &
1505 EEE_TX_TIMER_USEC_MASK;
1508 p_hwfn->b_drv_link_init = b_up;
1511 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1512 "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x\n",
1513 phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed,
1514 phy_cfg.loopback_mode);
1516 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
1518 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1519 mb_params.cmd = cmd;
1520 mb_params.p_data_src = &phy_cfg;
1521 mb_params.data_src_size = sizeof(phy_cfg);
1522 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1524 /* if mcp fails to respond we must abort */
1525 if (rc != ECORE_SUCCESS) {
1526 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1530 /* Mimic link-change attention, done for several reasons:
1531 * - On reset, there's no guarantee MFW would trigger
1533 * - On initialization, older MFWs might not indicate link change
1534 * during LFA, so we'll never get an UP indication.
1536 ecore_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
1538 return ECORE_SUCCESS;
1541 u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
1542 struct ecore_ptt *p_ptt)
1544 u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
1546 /* TODO - Add support for VFs */
1547 if (IS_VF(p_hwfn->p_dev))
1550 path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1552 path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr);
1553 path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn));
1555 proc_kill_cnt = ecore_rd(p_hwfn, p_ptt,
1557 OFFSETOF(struct public_path, process_kill)) &
1558 PROCESS_KILL_COUNTER_MASK;
1560 return proc_kill_cnt;
1563 static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn,
1564 struct ecore_ptt *p_ptt)
1566 struct ecore_dev *p_dev = p_hwfn->p_dev;
1569 /* Prevent possible attentions/interrupts during the recovery handling
1570 * and till its load phase, during which they will be re-enabled.
1572 ecore_int_igu_disable_int(p_hwfn, p_ptt);
1574 DP_NOTICE(p_hwfn, false, "Received a process kill indication\n");
1576 /* The following operations should be done once, and thus in CMT mode
1577 * are carried out by only the first HW function.
1579 if (p_hwfn != ECORE_LEADING_HWFN(p_dev))
1582 if (p_dev->recov_in_prog) {
1583 DP_NOTICE(p_hwfn, false,
1584 "Ignoring the indication since a recovery"
1585 " process is already in progress\n");
1589 p_dev->recov_in_prog = true;
1591 proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt);
1592 DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt);
1594 OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn);
1597 static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
1598 struct ecore_ptt *p_ptt,
1599 enum MFW_DRV_MSG_TYPE type)
1601 enum ecore_mcp_protocol_type stats_type;
1602 union ecore_mcp_protocol_stats stats;
1603 struct ecore_mcp_mb_params mb_params;
1605 enum _ecore_status_t rc;
1608 case MFW_DRV_MSG_GET_LAN_STATS:
1609 stats_type = ECORE_MCP_LAN_STATS;
1610 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1613 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1614 "Invalid protocol type %d\n", type);
1618 OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats);
1620 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1621 mb_params.cmd = DRV_MSG_CODE_GET_STATS;
1622 mb_params.param = hsi_param;
1623 mb_params.p_data_src = &stats;
1624 mb_params.data_src_size = sizeof(stats);
1625 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1626 if (rc != ECORE_SUCCESS)
1627 DP_ERR(p_hwfn, "Failed to send protocol stats, rc = %d\n", rc);
1630 static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
1631 struct public_func *p_shmem_info)
1633 struct ecore_mcp_function_info *p_info;
1635 p_info = &p_hwfn->mcp_info->func_info;
1637 /* TODO - bandwidth min/max should have valid values of 1-100,
1638 * as well as some indication that the feature is disabled.
1639 * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS
1640 * limit and correct value to min `1' and max `100' if limit isn't in
1643 p_info->bandwidth_min = (p_shmem_info->config &
1644 FUNC_MF_CFG_MIN_BW_MASK) >>
1645 FUNC_MF_CFG_MIN_BW_OFFSET;
1646 if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1648 "bandwidth minimum out of bounds [%02x]. Set to 1\n",
1649 p_info->bandwidth_min);
1650 p_info->bandwidth_min = 1;
1653 p_info->bandwidth_max = (p_shmem_info->config &
1654 FUNC_MF_CFG_MAX_BW_MASK) >>
1655 FUNC_MF_CFG_MAX_BW_OFFSET;
1656 if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1658 "bandwidth maximum out of bounds [%02x]. Set to 100\n",
1659 p_info->bandwidth_max);
1660 p_info->bandwidth_max = 100;
1665 ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1667 struct ecore_mcp_function_info *p_info;
1668 struct public_func shmem_info;
1669 u32 resp = 0, param = 0;
1671 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1673 ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1675 p_info = &p_hwfn->mcp_info->func_info;
1677 ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min);
1679 ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max);
1681 /* Acknowledge the MFW */
1682 ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1686 static void ecore_mcp_update_stag(struct ecore_hwfn *p_hwfn,
1687 struct ecore_ptt *p_ptt)
1689 struct public_func shmem_info;
1690 u32 resp = 0, param = 0;
1692 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1695 p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
1696 FUNC_MF_CFG_OV_STAG_MASK;
1697 p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
1698 if (OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS, &p_hwfn->p_dev->mf_bits)) {
1699 if (p_hwfn->hw_info.ovlan != ECORE_MCP_VLAN_UNSET) {
1700 ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE,
1701 p_hwfn->hw_info.ovlan);
1702 ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 1);
1704 /* Configure DB to add external vlan to EDPM packets */
1705 ecore_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
1706 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2,
1707 p_hwfn->hw_info.ovlan);
1709 ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 0);
1710 ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, 0);
1712 /* Configure DB to add external vlan to EDPM packets */
1713 ecore_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 0);
1714 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, 0);
1717 ecore_sp_pf_update_stag(p_hwfn);
1720 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "ovlan = %d hw_mode = 0x%x\n",
1721 p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode);
1722 OSAL_HW_INFO_CHANGE(p_hwfn, ECORE_HW_INFO_CHANGE_OVLAN);
1724 /* Acknowledge the MFW */
1725 ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
1729 static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn)
1731 /* A single notification should be sent to upper driver in CMT mode */
1732 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1735 DP_NOTICE(p_hwfn, false,
1736 "Fan failure was detected on the network interface card"
1737 " and it's going to be shut down.\n");
1739 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
1742 struct ecore_mdump_cmd_params {
1751 static enum _ecore_status_t
1752 ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1753 struct ecore_mdump_cmd_params *p_mdump_cmd_params)
1755 struct ecore_mcp_mb_params mb_params;
1756 enum _ecore_status_t rc;
1758 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1759 mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
1760 mb_params.param = p_mdump_cmd_params->cmd;
1761 mb_params.p_data_src = p_mdump_cmd_params->p_data_src;
1762 mb_params.data_src_size = p_mdump_cmd_params->data_src_size;
1763 mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst;
1764 mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size;
1765 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1766 if (rc != ECORE_SUCCESS)
1769 p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
1771 if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
1773 "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n",
1774 p_mdump_cmd_params->cmd);
1776 } else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
1778 "The mdump command is not supported by the MFW\n");
1785 static enum _ecore_status_t ecore_mcp_mdump_ack(struct ecore_hwfn *p_hwfn,
1786 struct ecore_ptt *p_ptt)
1788 struct ecore_mdump_cmd_params mdump_cmd_params;
1790 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1791 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK;
1793 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1796 enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn,
1797 struct ecore_ptt *p_ptt,
1800 struct ecore_mdump_cmd_params mdump_cmd_params;
1802 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1803 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_SET_VALUES;
1804 mdump_cmd_params.p_data_src = &epoch;
1805 mdump_cmd_params.data_src_size = sizeof(epoch);
1807 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1810 enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
1811 struct ecore_ptt *p_ptt)
1813 struct ecore_mdump_cmd_params mdump_cmd_params;
1815 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1816 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_TRIGGER;
1818 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1821 static enum _ecore_status_t
1822 ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1823 struct mdump_config_stc *p_mdump_config)
1825 struct ecore_mdump_cmd_params mdump_cmd_params;
1826 enum _ecore_status_t rc;
1828 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1829 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_CONFIG;
1830 mdump_cmd_params.p_data_dst = p_mdump_config;
1831 mdump_cmd_params.data_dst_size = sizeof(*p_mdump_config);
1833 rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1834 if (rc != ECORE_SUCCESS)
1837 if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1839 "Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n",
1840 mdump_cmd_params.mcp_resp);
1841 rc = ECORE_UNKNOWN_ERROR;
1847 enum _ecore_status_t
1848 ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1849 struct ecore_mdump_info *p_mdump_info)
1851 u32 addr, global_offsize, global_addr;
1852 struct mdump_config_stc mdump_config;
1853 enum _ecore_status_t rc;
1855 OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info));
1857 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1859 global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1860 global_addr = SECTION_ADDR(global_offsize, 0);
1861 p_mdump_info->reason = ecore_rd(p_hwfn, p_ptt,
1863 OFFSETOF(struct public_global,
1866 if (p_mdump_info->reason) {
1867 rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config);
1868 if (rc != ECORE_SUCCESS)
1871 p_mdump_info->version = mdump_config.version;
1872 p_mdump_info->config = mdump_config.config;
1873 p_mdump_info->epoch = mdump_config.epoc;
1874 p_mdump_info->num_of_logs = mdump_config.num_of_logs;
1875 p_mdump_info->valid_logs = mdump_config.valid_logs;
1877 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1878 "MFW mdump info: reason %d, version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n",
1879 p_mdump_info->reason, p_mdump_info->version,
1880 p_mdump_info->config, p_mdump_info->epoch,
1881 p_mdump_info->num_of_logs, p_mdump_info->valid_logs);
1883 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1884 "MFW mdump info: reason %d\n", p_mdump_info->reason);
1887 return ECORE_SUCCESS;
1890 enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
1891 struct ecore_ptt *p_ptt)
1893 struct ecore_mdump_cmd_params mdump_cmd_params;
1895 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1896 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLEAR_LOGS;
1898 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1901 enum _ecore_status_t
1902 ecore_mcp_mdump_get_retain(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1903 struct ecore_mdump_retain_data *p_mdump_retain)
1905 struct ecore_mdump_cmd_params mdump_cmd_params;
1906 struct mdump_retain_data_stc mfw_mdump_retain;
1907 enum _ecore_status_t rc;
1909 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1910 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN;
1911 mdump_cmd_params.p_data_dst = &mfw_mdump_retain;
1912 mdump_cmd_params.data_dst_size = sizeof(mfw_mdump_retain);
1914 rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1915 if (rc != ECORE_SUCCESS)
1918 if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1920 "Failed to get the mdump retained data [mcp_resp 0x%x]\n",
1921 mdump_cmd_params.mcp_resp);
1922 return ECORE_UNKNOWN_ERROR;
1925 p_mdump_retain->valid = mfw_mdump_retain.valid;
1926 p_mdump_retain->epoch = mfw_mdump_retain.epoch;
1927 p_mdump_retain->pf = mfw_mdump_retain.pf;
1928 p_mdump_retain->status = mfw_mdump_retain.status;
1930 return ECORE_SUCCESS;
1933 enum _ecore_status_t ecore_mcp_mdump_clr_retain(struct ecore_hwfn *p_hwfn,
1934 struct ecore_ptt *p_ptt)
1936 struct ecore_mdump_cmd_params mdump_cmd_params;
1938 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1939 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLR_RETAIN;
1941 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1944 static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
1945 struct ecore_ptt *p_ptt)
1947 struct ecore_mdump_retain_data mdump_retain;
1948 enum _ecore_status_t rc;
1950 /* In CMT mode - no need for more than a single acknowledgment to the
1951 * MFW, and no more than a single notification to the upper driver.
1953 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1956 rc = ecore_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain);
1957 if (rc == ECORE_SUCCESS && mdump_retain.valid) {
1958 DP_NOTICE(p_hwfn, false,
1959 "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n",
1960 mdump_retain.epoch, mdump_retain.pf,
1961 mdump_retain.status);
1963 DP_NOTICE(p_hwfn, false,
1964 "The MFW notified that a critical error occurred in the device\n");
1967 if (p_hwfn->p_dev->allow_mdump) {
1968 DP_NOTICE(p_hwfn, false,
1969 "Not acknowledging the notification to allow the MFW crash dump\n");
1973 DP_NOTICE(p_hwfn, false,
1974 "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n");
1975 ecore_mcp_mdump_ack(p_hwfn, p_ptt);
1976 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
1980 ecore_mcp_read_ufp_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1982 struct public_func shmem_info;
1985 if (!OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits))
1988 OSAL_MEMSET(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info));
1989 port_cfg = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1990 OFFSETOF(struct public_port, oem_cfg_port));
1991 val = GET_MFW_FIELD(port_cfg, OEM_CFG_CHANNEL_TYPE);
1992 if (val != OEM_CFG_CHANNEL_TYPE_STAGGED)
1993 DP_NOTICE(p_hwfn, false, "Incorrect UFP Channel type %d\n",
1996 val = GET_MFW_FIELD(port_cfg, OEM_CFG_SCHED_TYPE);
1997 if (val == OEM_CFG_SCHED_TYPE_ETS)
1998 p_hwfn->ufp_info.mode = ECORE_UFP_MODE_ETS;
1999 else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW)
2000 p_hwfn->ufp_info.mode = ECORE_UFP_MODE_VNIC_BW;
2002 DP_NOTICE(p_hwfn, false, "Unknown UFP scheduling mode %d\n",
2005 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
2007 val = GET_MFW_FIELD(shmem_info.oem_cfg_func, OEM_CFG_FUNC_TC);
2008 p_hwfn->ufp_info.tc = (u8)val;
2009 val = GET_MFW_FIELD(shmem_info.oem_cfg_func,
2010 OEM_CFG_FUNC_HOST_PRI_CTRL);
2011 if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC)
2012 p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_VNIC;
2013 else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS)
2014 p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_OS;
2016 DP_NOTICE(p_hwfn, false, "Unknown Host priority control %d\n",
2019 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2020 "UFP shmem config: mode = %d tc = %d pri_type = %d\n",
2021 p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc,
2022 p_hwfn->ufp_info.pri_type);
2025 static enum _ecore_status_t
2026 ecore_mcp_handle_ufp_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
2028 ecore_mcp_read_ufp_config(p_hwfn, p_ptt);
2030 if (p_hwfn->ufp_info.mode == ECORE_UFP_MODE_VNIC_BW) {
2031 p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc;
2032 p_hwfn->hw_info.offload_tc = p_hwfn->ufp_info.tc;
2034 ecore_qm_reconf(p_hwfn, p_ptt);
2036 /* Merge UFP TC with the dcbx TC data */
2037 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
2038 ECORE_DCBX_OPERATIONAL_MIB);
2041 /* update storm FW with negotiation results */
2042 ecore_sp_pf_update_ufp(p_hwfn);
2044 return ECORE_SUCCESS;
2047 enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
2048 struct ecore_ptt *p_ptt)
2050 struct ecore_mcp_info *info = p_hwfn->mcp_info;
2051 enum _ecore_status_t rc = ECORE_SUCCESS;
2055 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n");
2057 /* Read Messages from MFW */
2058 ecore_mcp_read_mb(p_hwfn, p_ptt);
2060 /* Compare current messages to old ones */
2061 for (i = 0; i < info->mfw_mb_length; i++) {
2062 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
2067 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
2068 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
2069 i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
2072 case MFW_DRV_MSG_LINK_CHANGE:
2073 ecore_mcp_handle_link_change(p_hwfn, p_ptt, false);
2075 case MFW_DRV_MSG_VF_DISABLED:
2076 ecore_mcp_handle_vf_flr(p_hwfn, p_ptt);
2078 case MFW_DRV_MSG_LLDP_DATA_UPDATED:
2079 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
2080 ECORE_DCBX_REMOTE_LLDP_MIB);
2082 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
2083 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
2084 ECORE_DCBX_REMOTE_MIB);
2086 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
2087 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
2088 ECORE_DCBX_OPERATIONAL_MIB);
2089 /* clear the user-config cache */
2090 OSAL_MEMSET(&p_hwfn->p_dcbx_info->set, 0,
2091 sizeof(struct ecore_dcbx_set));
2093 case MFW_DRV_MSG_LLDP_RECEIVED_TLVS_UPDATED:
2094 ecore_lldp_mib_update_event(p_hwfn, p_ptt);
2096 case MFW_DRV_MSG_OEM_CFG_UPDATE:
2097 ecore_mcp_handle_ufp_event(p_hwfn, p_ptt);
2099 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
2100 ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
2102 case MFW_DRV_MSG_ERROR_RECOVERY:
2103 ecore_mcp_handle_process_kill(p_hwfn, p_ptt);
2105 case MFW_DRV_MSG_GET_LAN_STATS:
2106 case MFW_DRV_MSG_GET_FCOE_STATS:
2107 case MFW_DRV_MSG_GET_ISCSI_STATS:
2108 case MFW_DRV_MSG_GET_RDMA_STATS:
2109 ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
2111 case MFW_DRV_MSG_BW_UPDATE:
2112 ecore_mcp_update_bw(p_hwfn, p_ptt);
2114 case MFW_DRV_MSG_S_TAG_UPDATE:
2115 ecore_mcp_update_stag(p_hwfn, p_ptt);
2117 case MFW_DRV_MSG_FAILURE_DETECTED:
2118 ecore_mcp_handle_fan_failure(p_hwfn);
2120 case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
2121 ecore_mcp_handle_critical_error(p_hwfn, p_ptt);
2124 DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
2129 /* ACK everything */
2130 for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
2131 OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]);
2133 /* MFW expect answer in BE, so we force write in that format */
2134 ecore_wr(p_hwfn, p_ptt,
2135 info->mfw_mb_addr + sizeof(u32) +
2136 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
2137 sizeof(u32) + i * sizeof(u32), val);
2141 DP_NOTICE(p_hwfn, false,
2142 "Received an MFW message indication but no"
2147 /* Copy the new mfw messages into the shadow */
2148 OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
2153 enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
2154 struct ecore_ptt *p_ptt,
2156 u32 *p_running_bundle_id)
2161 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
2162 DP_NOTICE(p_hwfn, false, "Emulation - can't get MFW version\n");
2163 return ECORE_SUCCESS;
2167 if (IS_VF(p_hwfn->p_dev)) {
2168 if (p_hwfn->vf_iov_info) {
2169 struct pfvf_acquire_resp_tlv *p_resp;
2171 p_resp = &p_hwfn->vf_iov_info->acquire_resp;
2172 *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
2173 return ECORE_SUCCESS;
2175 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2176 "VF requested MFW version prior to ACQUIRE\n");
2181 global_offsize = ecore_rd(p_hwfn, p_ptt,
2182 SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
2186 ecore_rd(p_hwfn, p_ptt,
2187 SECTION_ADDR(global_offsize,
2188 0) + OFFSETOF(struct public_global, mfw_ver));
2190 if (p_running_bundle_id != OSAL_NULL) {
2191 *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt,
2192 SECTION_ADDR(global_offsize,
2194 OFFSETOF(struct public_global,
2195 running_bundle_id));
2198 return ECORE_SUCCESS;
2201 enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,
2202 struct ecore_ptt *p_ptt,
2205 enum _ecore_status_t rc = ECORE_SUCCESS;
2207 /* TODO - Add support for VFs */
2208 if (IS_VF(p_hwfn->p_dev))
2211 if (!ecore_mcp_is_init(p_hwfn)) {
2212 DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
2217 *p_media_type = MEDIA_UNSPECIFIED;
2220 *p_media_type = ecore_rd(p_hwfn, p_ptt,
2221 p_hwfn->mcp_info->port_addr +
2222 OFFSETOF(struct public_port,
2226 return ECORE_SUCCESS;
2229 enum _ecore_status_t ecore_mcp_get_transceiver_data(struct ecore_hwfn *p_hwfn,
2230 struct ecore_ptt *p_ptt,
2231 u32 *p_transceiver_state,
2232 u32 *p_transceiver_type)
2234 u32 transceiver_info;
2235 enum _ecore_status_t rc = ECORE_SUCCESS;
2237 /* TODO - Add support for VFs */
2238 if (IS_VF(p_hwfn->p_dev))
2241 if (!ecore_mcp_is_init(p_hwfn)) {
2242 DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
2246 *p_transceiver_type = ETH_TRANSCEIVER_TYPE_NONE;
2247 *p_transceiver_state = ETH_TRANSCEIVER_STATE_UPDATING;
2249 transceiver_info = ecore_rd(p_hwfn, p_ptt,
2250 p_hwfn->mcp_info->port_addr +
2251 offsetof(struct public_port,
2254 *p_transceiver_state = GET_MFW_FIELD(transceiver_info,
2255 ETH_TRANSCEIVER_STATE);
2257 if (*p_transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT) {
2258 *p_transceiver_type = GET_MFW_FIELD(transceiver_info,
2259 ETH_TRANSCEIVER_TYPE);
2261 *p_transceiver_type = ETH_TRANSCEIVER_TYPE_UNKNOWN;
2267 static int is_transceiver_ready(u32 transceiver_state, u32 transceiver_type)
2269 if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) &&
2270 ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) == 0x0) &&
2271 (transceiver_type != ETH_TRANSCEIVER_TYPE_NONE))
2277 enum _ecore_status_t ecore_mcp_trans_speed_mask(struct ecore_hwfn *p_hwfn,
2278 struct ecore_ptt *p_ptt,
2281 u32 transceiver_type, transceiver_state;
2283 ecore_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state,
2287 if (is_transceiver_ready(transceiver_state, transceiver_type) == 0)
2290 switch (transceiver_type) {
2291 case ETH_TRANSCEIVER_TYPE_1G_LX:
2292 case ETH_TRANSCEIVER_TYPE_1G_SX:
2293 case ETH_TRANSCEIVER_TYPE_1G_PCC:
2294 case ETH_TRANSCEIVER_TYPE_1G_ACC:
2295 case ETH_TRANSCEIVER_TYPE_1000BASET:
2296 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2299 case ETH_TRANSCEIVER_TYPE_10G_SR:
2300 case ETH_TRANSCEIVER_TYPE_10G_LR:
2301 case ETH_TRANSCEIVER_TYPE_10G_LRM:
2302 case ETH_TRANSCEIVER_TYPE_10G_ER:
2303 case ETH_TRANSCEIVER_TYPE_10G_PCC:
2304 case ETH_TRANSCEIVER_TYPE_10G_ACC:
2305 case ETH_TRANSCEIVER_TYPE_4x10G:
2306 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2309 case ETH_TRANSCEIVER_TYPE_40G_LR4:
2310 case ETH_TRANSCEIVER_TYPE_40G_SR4:
2311 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
2312 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
2313 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2314 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2317 case ETH_TRANSCEIVER_TYPE_100G_AOC:
2318 case ETH_TRANSCEIVER_TYPE_100G_SR4:
2319 case ETH_TRANSCEIVER_TYPE_100G_LR4:
2320 case ETH_TRANSCEIVER_TYPE_100G_ER4:
2321 case ETH_TRANSCEIVER_TYPE_100G_ACC:
2323 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2324 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2327 case ETH_TRANSCEIVER_TYPE_25G_SR:
2328 case ETH_TRANSCEIVER_TYPE_25G_LR:
2329 case ETH_TRANSCEIVER_TYPE_25G_AOC:
2330 case ETH_TRANSCEIVER_TYPE_25G_ACC_S:
2331 case ETH_TRANSCEIVER_TYPE_25G_ACC_M:
2332 case ETH_TRANSCEIVER_TYPE_25G_ACC_L:
2333 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2336 case ETH_TRANSCEIVER_TYPE_25G_CA_N:
2337 case ETH_TRANSCEIVER_TYPE_25G_CA_S:
2338 case ETH_TRANSCEIVER_TYPE_25G_CA_L:
2339 case ETH_TRANSCEIVER_TYPE_4x25G_CR:
2340 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2341 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2342 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2345 case ETH_TRANSCEIVER_TYPE_40G_CR4:
2346 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR:
2347 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2348 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2349 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2352 case ETH_TRANSCEIVER_TYPE_100G_CR4:
2353 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
2355 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2356 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G |
2357 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2358 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2359 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G |
2360 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2361 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2364 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
2365 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
2366 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC:
2368 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2369 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2370 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2371 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2374 case ETH_TRANSCEIVER_TYPE_XLPPI:
2375 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
2378 case ETH_TRANSCEIVER_TYPE_10G_BASET:
2379 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2380 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2384 DP_INFO(p_hwfn, "Unknown transcevier type 0x%x\n",
2386 *p_speed_mask = 0xff;
2390 return ECORE_SUCCESS;
2393 enum _ecore_status_t ecore_mcp_get_board_config(struct ecore_hwfn *p_hwfn,
2394 struct ecore_ptt *p_ptt,
2395 u32 *p_board_config)
2397 u32 nvm_cfg_addr, nvm_cfg1_offset, port_cfg_addr;
2398 enum _ecore_status_t rc = ECORE_SUCCESS;
2400 /* TODO - Add support for VFs */
2401 if (IS_VF(p_hwfn->p_dev))
2404 if (!ecore_mcp_is_init(p_hwfn)) {
2405 DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
2409 *p_board_config = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
2412 nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt,
2413 MISC_REG_GEN_PURP_CR0);
2414 nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt,
2416 port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
2417 offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
2418 *p_board_config = ecore_rd(p_hwfn, p_ptt,
2420 offsetof(struct nvm_cfg1_port,
2428 /* Old MFW has a global configuration for all PFs regarding RDMA support */
2430 ecore_mcp_get_shmem_proto_legacy(struct ecore_hwfn *p_hwfn,
2431 enum ecore_pci_personality *p_proto)
2433 *p_proto = ECORE_PCI_ETH;
2435 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2436 "According to Legacy capabilities, L2 personality is %08x\n",
2441 static enum _ecore_status_t
2442 ecore_mcp_get_shmem_proto_mfw(struct ecore_hwfn *p_hwfn,
2443 struct ecore_ptt *p_ptt,
2444 enum ecore_pci_personality *p_proto)
2446 u32 resp = 0, param = 0;
2447 enum _ecore_status_t rc;
2449 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2450 "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
2451 (u32)*p_proto, resp, param);
2452 return ECORE_SUCCESS;
2455 static enum _ecore_status_t
2456 ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn,
2457 struct public_func *p_info,
2458 struct ecore_ptt *p_ptt,
2459 enum ecore_pci_personality *p_proto)
2461 enum _ecore_status_t rc = ECORE_SUCCESS;
2463 switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
2464 case FUNC_MF_CFG_PROTOCOL_ETHERNET:
2465 if (ecore_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto) !=
2467 ecore_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
2476 enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
2477 struct ecore_ptt *p_ptt)
2479 struct ecore_mcp_function_info *info;
2480 struct public_func shmem_info;
2482 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
2483 info = &p_hwfn->mcp_info->func_info;
2485 info->pause_on_host = (shmem_info.config &
2486 FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
2488 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2490 DP_ERR(p_hwfn, "Unknown personality %08x\n",
2491 (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
2495 ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
2497 if (shmem_info.mac_upper || shmem_info.mac_lower) {
2498 info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
2499 info->mac[1] = (u8)(shmem_info.mac_upper);
2500 info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
2501 info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
2502 info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
2503 info->mac[5] = (u8)(shmem_info.mac_lower);
2505 /* TODO - are there protocols for which there's no MAC? */
2506 DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n");
2509 /* TODO - are these calculations true for BE machine? */
2510 info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
2511 (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
2512 info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
2513 (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
2515 info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
2517 info->mtu = (u16)shmem_info.mtu_size;
2522 info->mtu = (u16)shmem_info.mtu_size;
2524 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP),
2525 "Read configuration from shmem: pause_on_host %02x"
2526 " protocol %02x BW [%02x - %02x]"
2527 " MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %lx"
2528 " node %lx ovlan %04x\n",
2529 info->pause_on_host, info->protocol,
2530 info->bandwidth_min, info->bandwidth_max,
2531 info->mac[0], info->mac[1], info->mac[2],
2532 info->mac[3], info->mac[4], info->mac[5],
2533 (unsigned long)info->wwn_port,
2534 (unsigned long)info->wwn_node, info->ovlan);
2536 return ECORE_SUCCESS;
2539 struct ecore_mcp_link_params
2540 *ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn)
2542 if (!p_hwfn || !p_hwfn->mcp_info)
2544 return &p_hwfn->mcp_info->link_input;
2547 struct ecore_mcp_link_state
2548 *ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn)
2550 if (!p_hwfn || !p_hwfn->mcp_info)
2554 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
2555 DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n");
2556 p_hwfn->mcp_info->link_output.link_up = true;
2560 return &p_hwfn->mcp_info->link_output;
2563 struct ecore_mcp_link_capabilities
2564 *ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn)
2566 if (!p_hwfn || !p_hwfn->mcp_info)
2568 return &p_hwfn->mcp_info->link_capabilities;
2571 enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
2572 struct ecore_ptt *p_ptt)
2574 u32 resp = 0, param = 0;
2575 enum _ecore_status_t rc;
2577 rc = ecore_mcp_cmd(p_hwfn, p_ptt,
2578 DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, ¶m);
2580 /* Wait for the drain to complete before returning */
2586 const struct ecore_mcp_function_info
2587 *ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn)
2589 if (!p_hwfn || !p_hwfn->mcp_info)
2591 return &p_hwfn->mcp_info->func_info;
2594 int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
2595 struct ecore_ptt *p_ptt, u32 personalities)
2597 enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT;
2598 struct public_func shmem_info;
2599 int i, count = 0, num_pfs;
2601 num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
2603 for (i = 0; i < num_pfs; i++) {
2604 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
2605 MCP_PF_ID_BY_REL(p_hwfn, i));
2606 if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
2609 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2614 if ((1 << ((u32)protocol)) & personalities)
2621 enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
2622 struct ecore_ptt *p_ptt,
2628 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
2629 DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n");
2634 if (IS_VF(p_hwfn->p_dev))
2637 flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
2638 flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
2639 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
2640 flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_OFFSET));
2642 *p_flash_size = flash_size;
2644 return ECORE_SUCCESS;
2647 enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
2648 struct ecore_ptt *p_ptt)
2650 struct ecore_dev *p_dev = p_hwfn->p_dev;
2652 if (p_dev->recov_in_prog) {
2653 DP_NOTICE(p_hwfn, false,
2654 "Avoid triggering a recovery since such a process"
2655 " is already in progress\n");
2659 DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n");
2660 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
2662 return ECORE_SUCCESS;
2665 static enum _ecore_status_t
2666 ecore_mcp_config_vf_msix_bb(struct ecore_hwfn *p_hwfn,
2667 struct ecore_ptt *p_ptt,
2670 u32 resp = 0, param = 0, rc_param = 0;
2671 enum _ecore_status_t rc;
2673 /* Only Leader can configure MSIX, and need to take CMT into account */
2675 if (!IS_LEAD_HWFN(p_hwfn))
2676 return ECORE_SUCCESS;
2677 num *= p_hwfn->p_dev->num_hwfns;
2679 param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_OFFSET) &
2680 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
2681 param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_OFFSET) &
2682 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
2684 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
2687 if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
2688 DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n",
2692 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2693 "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
2700 static enum _ecore_status_t
2701 ecore_mcp_config_vf_msix_ah(struct ecore_hwfn *p_hwfn,
2702 struct ecore_ptt *p_ptt,
2705 u32 resp = 0, param = num, rc_param = 0;
2706 enum _ecore_status_t rc;
2708 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX,
2709 param, &resp, &rc_param);
2711 if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) {
2712 DP_NOTICE(p_hwfn, true, "MFW failed to set MSI-X for VFs\n");
2715 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2716 "Requested 0x%02x MSI-x interrupts for VFs\n",
2723 enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
2724 struct ecore_ptt *p_ptt,
2727 if (ECORE_IS_BB(p_hwfn->p_dev))
2728 return ecore_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
2730 return ecore_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num);
2733 enum _ecore_status_t
2734 ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2735 struct ecore_mcp_drv_version *p_ver)
2737 struct ecore_mcp_mb_params mb_params;
2738 struct drv_version_stc drv_version;
2742 enum _ecore_status_t rc;
2745 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
2746 return ECORE_SUCCESS;
2749 OSAL_MEM_ZERO(&drv_version, sizeof(drv_version));
2750 drv_version.version = p_ver->version;
2751 num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4;
2752 for (i = 0; i < num_words; i++) {
2753 /* The driver name is expected to be in a big-endian format */
2754 p_name = &p_ver->name[i * sizeof(u32)];
2755 val = OSAL_CPU_TO_BE32(*(u32 *)p_name);
2756 *(u32 *)&drv_version.name[i * sizeof(u32)] = val;
2759 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2760 mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
2761 mb_params.p_data_src = &drv_version;
2762 mb_params.data_src_size = sizeof(drv_version);
2763 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2764 if (rc != ECORE_SUCCESS)
2765 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2770 /* A maximal 100 msec waiting time for the MCP to halt */
2771 #define ECORE_MCP_HALT_SLEEP_MS 10
2772 #define ECORE_MCP_HALT_MAX_RETRIES 10
2774 enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
2775 struct ecore_ptt *p_ptt)
2777 u32 resp = 0, param = 0, cpu_state, cnt = 0;
2778 enum _ecore_status_t rc;
2780 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2782 if (rc != ECORE_SUCCESS) {
2783 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2788 OSAL_MSLEEP(ECORE_MCP_HALT_SLEEP_MS);
2789 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2790 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
2792 } while (++cnt < ECORE_MCP_HALT_MAX_RETRIES);
2794 if (cnt == ECORE_MCP_HALT_MAX_RETRIES) {
2795 DP_NOTICE(p_hwfn, false,
2796 "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2797 ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
2801 ecore_mcp_cmd_set_blocking(p_hwfn, true);
2803 return ECORE_SUCCESS;
2806 #define ECORE_MCP_RESUME_SLEEP_MS 10
2808 enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
2809 struct ecore_ptt *p_ptt)
2811 u32 cpu_mode, cpu_state;
2813 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
2815 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2816 cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2817 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
2819 OSAL_MSLEEP(ECORE_MCP_RESUME_SLEEP_MS);
2820 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2822 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
2823 DP_NOTICE(p_hwfn, false,
2824 "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2825 cpu_mode, cpu_state);
2829 ecore_mcp_cmd_set_blocking(p_hwfn, false);
2831 return ECORE_SUCCESS;
2834 enum _ecore_status_t
2835 ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
2836 struct ecore_ptt *p_ptt,
2837 enum ecore_ov_client client)
2839 u32 resp = 0, param = 0;
2841 enum _ecore_status_t rc;
2844 case ECORE_OV_CLIENT_DRV:
2845 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
2847 case ECORE_OV_CLIENT_USER:
2848 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
2850 case ECORE_OV_CLIENT_VENDOR_SPEC:
2851 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
2854 DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", client);
2858 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
2859 drv_mb_param, &resp, ¶m);
2860 if (rc != ECORE_SUCCESS)
2861 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2866 enum _ecore_status_t
2867 ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
2868 struct ecore_ptt *p_ptt,
2869 enum ecore_ov_driver_state drv_state)
2871 u32 resp = 0, param = 0;
2873 enum _ecore_status_t rc;
2875 switch (drv_state) {
2876 case ECORE_OV_DRIVER_STATE_NOT_LOADED:
2877 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
2879 case ECORE_OV_DRIVER_STATE_DISABLED:
2880 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
2882 case ECORE_OV_DRIVER_STATE_ACTIVE:
2883 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
2886 DP_NOTICE(p_hwfn, true, "Invalid driver state %d\n", drv_state);
2890 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
2891 drv_mb_param, &resp, ¶m);
2892 if (rc != ECORE_SUCCESS)
2893 DP_ERR(p_hwfn, "Failed to send driver state\n");
2898 enum _ecore_status_t
2899 ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2900 struct ecore_fc_npiv_tbl *p_table)
2905 enum _ecore_status_t
2906 ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2909 u32 resp = 0, param = 0, drv_mb_param = 0;
2910 enum _ecore_status_t rc;
2912 SET_MFW_FIELD(drv_mb_param, DRV_MB_PARAM_OV_MTU_SIZE, (u32)mtu);
2913 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU,
2914 drv_mb_param, &resp, ¶m);
2915 if (rc != ECORE_SUCCESS)
2916 DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc);
2921 enum _ecore_status_t
2922 ecore_mcp_ov_update_mac(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2925 struct ecore_mcp_mb_params mb_params;
2926 union drv_union_data union_data;
2927 enum _ecore_status_t rc;
2929 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2930 mb_params.cmd = DRV_MSG_CODE_SET_VMAC;
2931 SET_MFW_FIELD(mb_params.param, DRV_MSG_CODE_VMAC_TYPE,
2932 DRV_MSG_CODE_VMAC_TYPE_MAC);
2933 mb_params.param |= MCP_PF_ID(p_hwfn);
2934 OSAL_MEMCPY(&union_data.raw_data, mac, ETH_ALEN);
2935 mb_params.p_data_src = &union_data;
2936 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2937 if (rc != ECORE_SUCCESS)
2938 DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
2943 enum _ecore_status_t
2944 ecore_mcp_ov_update_eswitch(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2945 enum ecore_ov_eswitch eswitch)
2947 enum _ecore_status_t rc;
2948 u32 resp = 0, param = 0;
2952 case ECORE_OV_ESWITCH_NONE:
2953 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE;
2955 case ECORE_OV_ESWITCH_VEB:
2956 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB;
2958 case ECORE_OV_ESWITCH_VEPA:
2959 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA;
2962 DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch);
2966 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE,
2967 drv_mb_param, &resp, ¶m);
2968 if (rc != ECORE_SUCCESS)
2969 DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc);
2974 enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
2975 struct ecore_ptt *p_ptt,
2976 enum ecore_led_mode mode)
2978 u32 resp = 0, param = 0, drv_mb_param;
2979 enum _ecore_status_t rc;
2982 case ECORE_LED_MODE_ON:
2983 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
2985 case ECORE_LED_MODE_OFF:
2986 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
2988 case ECORE_LED_MODE_RESTORE:
2989 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
2992 DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode);
2996 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
2997 drv_mb_param, &resp, ¶m);
2998 if (rc != ECORE_SUCCESS)
2999 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
3004 enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
3005 struct ecore_ptt *p_ptt,
3008 u32 resp = 0, param = 0;
3009 enum _ecore_status_t rc;
3011 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
3012 mask_parities, &resp, ¶m);
3014 if (rc != ECORE_SUCCESS) {
3016 "MCP response failure for mask parities, aborting\n");
3017 } else if (resp != FW_MSG_CODE_OK) {
3019 "MCP did not ack mask parity request. Old MFW?\n");
3026 enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
3029 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3030 u32 bytes_left, offset, bytes_to_copy, buf_size;
3031 u32 nvm_offset, resp, param;
3032 struct ecore_ptt *p_ptt;
3033 enum _ecore_status_t rc = ECORE_SUCCESS;
3035 p_ptt = ecore_ptt_acquire(p_hwfn);
3041 while (bytes_left > 0) {
3042 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
3043 MCP_DRV_NVM_BUF_LEN);
3044 nvm_offset = (addr + offset) | (bytes_to_copy <<
3045 DRV_MB_PARAM_NVM_LEN_OFFSET);
3046 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3047 DRV_MSG_CODE_NVM_READ_NVRAM,
3048 nvm_offset, &resp, ¶m, &buf_size,
3049 (u32 *)(p_buf + offset));
3050 if (rc != ECORE_SUCCESS) {
3051 DP_NOTICE(p_dev, false,
3052 "ecore_mcp_nvm_rd_cmd() failed, rc = %d\n",
3054 resp = FW_MSG_CODE_ERROR;
3058 if (resp != FW_MSG_CODE_NVM_OK) {
3059 DP_NOTICE(p_dev, false,
3060 "nvm read failed, resp = 0x%08x\n", resp);
3061 rc = ECORE_UNKNOWN_ERROR;
3065 /* This can be a lengthy process, and it's possible scheduler
3066 * isn't preemptible. Sleep a bit to prevent CPU hogging.
3068 if (bytes_left % 0x1000 <
3069 (bytes_left - buf_size) % 0x1000)
3073 bytes_left -= buf_size;
3076 p_dev->mcp_nvm_resp = resp;
3077 ecore_ptt_release(p_hwfn, p_ptt);
3082 enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
3083 u32 addr, u8 *p_buf, u32 *p_len)
3085 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3086 struct ecore_ptt *p_ptt;
3087 u32 resp = 0, param;
3088 enum _ecore_status_t rc;
3090 p_ptt = ecore_ptt_acquire(p_hwfn);
3094 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3095 (cmd == ECORE_PHY_CORE_READ) ?
3096 DRV_MSG_CODE_PHY_CORE_READ :
3097 DRV_MSG_CODE_PHY_RAW_READ,
3098 addr, &resp, ¶m, p_len, (u32 *)p_buf);
3099 if (rc != ECORE_SUCCESS)
3100 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
3102 p_dev->mcp_nvm_resp = resp;
3103 ecore_ptt_release(p_hwfn, p_ptt);
3108 enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
3110 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3111 struct ecore_ptt *p_ptt;
3113 p_ptt = ecore_ptt_acquire(p_hwfn);
3117 OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp));
3118 ecore_ptt_release(p_hwfn, p_ptt);
3120 return ECORE_SUCCESS;
3123 enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr)
3125 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3126 struct ecore_ptt *p_ptt;
3127 u32 resp = 0, param;
3128 enum _ecore_status_t rc;
3130 p_ptt = ecore_ptt_acquire(p_hwfn);
3133 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_DEL_FILE, addr,
3135 p_dev->mcp_nvm_resp = resp;
3136 ecore_ptt_release(p_hwfn, p_ptt);
3141 enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
3144 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3145 struct ecore_ptt *p_ptt;
3146 u32 resp = 0, param;
3147 enum _ecore_status_t rc;
3149 p_ptt = ecore_ptt_acquire(p_hwfn);
3152 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, addr,
3154 p_dev->mcp_nvm_resp = resp;
3155 ecore_ptt_release(p_hwfn, p_ptt);
3160 /* rc receives ECORE_INVAL as default parameter because
3161 * it might not enter the while loop if the len is 0
3163 enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
3164 u32 addr, u8 *p_buf, u32 len)
3166 u32 buf_idx, buf_size, nvm_cmd, nvm_offset, resp, param;
3167 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3168 enum _ecore_status_t rc = ECORE_INVAL;
3169 struct ecore_ptt *p_ptt;
3171 p_ptt = ecore_ptt_acquire(p_hwfn);
3176 case ECORE_PUT_FILE_DATA:
3177 nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
3179 case ECORE_NVM_WRITE_NVRAM:
3180 nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
3182 case ECORE_EXT_PHY_FW_UPGRADE:
3183 nvm_cmd = DRV_MSG_CODE_EXT_PHY_FW_UPGRADE;
3186 DP_NOTICE(p_hwfn, true, "Invalid nvm write command 0x%x\n",
3193 while (buf_idx < len) {
3194 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
3195 MCP_DRV_NVM_BUF_LEN);
3196 nvm_offset = ((buf_size << DRV_MB_PARAM_NVM_LEN_OFFSET) |
3199 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset,
3200 &resp, ¶m, buf_size,
3201 (u32 *)&p_buf[buf_idx]);
3202 if (rc != ECORE_SUCCESS) {
3203 DP_NOTICE(p_dev, false,
3204 "ecore_mcp_nvm_write() failed, rc = %d\n",
3206 resp = FW_MSG_CODE_ERROR;
3210 if (resp != FW_MSG_CODE_OK &&
3211 resp != FW_MSG_CODE_NVM_OK &&
3212 resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK) {
3213 DP_NOTICE(p_dev, false,
3214 "nvm write failed, resp = 0x%08x\n", resp);
3215 rc = ECORE_UNKNOWN_ERROR;
3219 /* This can be a lengthy process, and it's possible scheduler
3220 * isn't preemptible. Sleep a bit to prevent CPU hogging.
3222 if (buf_idx % 0x1000 >
3223 (buf_idx + buf_size) % 0x1000)
3226 buf_idx += buf_size;
3229 p_dev->mcp_nvm_resp = resp;
3231 ecore_ptt_release(p_hwfn, p_ptt);
3236 enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
3237 u32 addr, u8 *p_buf, u32 len)
3239 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3240 u32 resp = 0, param, nvm_cmd;
3241 struct ecore_ptt *p_ptt;
3242 enum _ecore_status_t rc;
3244 p_ptt = ecore_ptt_acquire(p_hwfn);
3248 nvm_cmd = (cmd == ECORE_PHY_CORE_WRITE) ? DRV_MSG_CODE_PHY_CORE_WRITE :
3249 DRV_MSG_CODE_PHY_RAW_WRITE;
3250 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, addr,
3251 &resp, ¶m, len, (u32 *)p_buf);
3252 if (rc != ECORE_SUCCESS)
3253 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
3254 p_dev->mcp_nvm_resp = resp;
3255 ecore_ptt_release(p_hwfn, p_ptt);
3260 enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
3263 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3264 struct ecore_ptt *p_ptt;
3266 enum _ecore_status_t rc;
3268 p_ptt = ecore_ptt_acquire(p_hwfn);
3272 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_SECURE_MODE, addr,
3274 p_dev->mcp_nvm_resp = resp;
3275 ecore_ptt_release(p_hwfn, p_ptt);
3280 enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
3281 struct ecore_ptt *p_ptt,
3282 u32 port, u32 addr, u32 offset,
3285 u32 bytes_left, bytes_to_copy, buf_size, nvm_offset;
3287 enum _ecore_status_t rc;
3289 nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) |
3290 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET);
3294 while (bytes_left > 0) {
3295 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
3296 MAX_I2C_TRANSACTION_SIZE);
3297 nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
3298 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
3299 nvm_offset |= ((addr + offset) <<
3300 DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET);
3301 nvm_offset |= (bytes_to_copy <<
3302 DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET);
3303 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3304 DRV_MSG_CODE_TRANSCEIVER_READ,
3305 nvm_offset, &resp, ¶m, &buf_size,
3306 (u32 *)(p_buf + offset));
3307 if (rc != ECORE_SUCCESS) {
3308 DP_NOTICE(p_hwfn, false,
3309 "Failed to send a transceiver read command to the MFW. rc = %d.\n",
3314 if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
3316 else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
3317 return ECORE_UNKNOWN_ERROR;
3320 bytes_left -= buf_size;
3323 return ECORE_SUCCESS;
3326 enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
3327 struct ecore_ptt *p_ptt,
3328 u32 port, u32 addr, u32 offset,
3331 u32 buf_idx, buf_size, nvm_offset, resp, param;
3332 enum _ecore_status_t rc;
3334 nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) |
3335 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET);
3337 while (buf_idx < len) {
3338 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
3339 MAX_I2C_TRANSACTION_SIZE);
3340 nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
3341 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
3342 nvm_offset |= ((offset + buf_idx) <<
3343 DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET);
3344 nvm_offset |= (buf_size <<
3345 DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET);
3346 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt,
3347 DRV_MSG_CODE_TRANSCEIVER_WRITE,
3348 nvm_offset, &resp, ¶m, buf_size,
3349 (u32 *)&p_buf[buf_idx]);
3350 if (rc != ECORE_SUCCESS) {
3351 DP_NOTICE(p_hwfn, false,
3352 "Failed to send a transceiver write command to the MFW. rc = %d.\n",
3357 if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
3359 else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
3360 return ECORE_UNKNOWN_ERROR;
3362 buf_idx += buf_size;
3365 return ECORE_SUCCESS;
3368 enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
3369 struct ecore_ptt *p_ptt,
3370 u16 gpio, u32 *gpio_val)
3372 enum _ecore_status_t rc = ECORE_SUCCESS;
3373 u32 drv_mb_param = 0, rsp;
3375 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET);
3377 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ,
3378 drv_mb_param, &rsp, gpio_val);
3380 if (rc != ECORE_SUCCESS)
3383 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3384 return ECORE_UNKNOWN_ERROR;
3386 return ECORE_SUCCESS;
3389 enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
3390 struct ecore_ptt *p_ptt,
3391 u16 gpio, u16 gpio_val)
3393 enum _ecore_status_t rc = ECORE_SUCCESS;
3394 u32 drv_mb_param = 0, param, rsp;
3396 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET) |
3397 (gpio_val << DRV_MB_PARAM_GPIO_VALUE_OFFSET);
3399 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE,
3400 drv_mb_param, &rsp, ¶m);
3402 if (rc != ECORE_SUCCESS)
3405 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3406 return ECORE_UNKNOWN_ERROR;
3408 return ECORE_SUCCESS;
3411 enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn,
3412 struct ecore_ptt *p_ptt,
3413 u16 gpio, u32 *gpio_direction,
3416 u32 drv_mb_param = 0, rsp, val = 0;
3417 enum _ecore_status_t rc = ECORE_SUCCESS;
3419 drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET;
3421 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO,
3422 drv_mb_param, &rsp, &val);
3423 if (rc != ECORE_SUCCESS)
3426 *gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >>
3427 DRV_MB_PARAM_GPIO_DIRECTION_OFFSET;
3428 *gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >>
3429 DRV_MB_PARAM_GPIO_CTRL_OFFSET;
3431 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3432 return ECORE_UNKNOWN_ERROR;
3434 return ECORE_SUCCESS;
3437 enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn,
3438 struct ecore_ptt *p_ptt)
3440 u32 drv_mb_param = 0, rsp, param;
3441 enum _ecore_status_t rc = ECORE_SUCCESS;
3443 drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
3444 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3446 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3447 drv_mb_param, &rsp, ¶m);
3449 if (rc != ECORE_SUCCESS)
3452 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3453 (param != DRV_MB_PARAM_BIST_RC_PASSED))
3454 rc = ECORE_UNKNOWN_ERROR;
3459 enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn,
3460 struct ecore_ptt *p_ptt)
3462 u32 drv_mb_param, rsp, param;
3463 enum _ecore_status_t rc = ECORE_SUCCESS;
3465 drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
3466 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3468 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3469 drv_mb_param, &rsp, ¶m);
3471 if (rc != ECORE_SUCCESS)
3474 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3475 (param != DRV_MB_PARAM_BIST_RC_PASSED))
3476 rc = ECORE_UNKNOWN_ERROR;
3481 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images(
3482 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *num_images)
3484 u32 drv_mb_param = 0, rsp;
3485 enum _ecore_status_t rc = ECORE_SUCCESS;
3487 drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
3488 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3490 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3491 drv_mb_param, &rsp, num_images);
3493 if (rc != ECORE_SUCCESS)
3496 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
3497 rc = ECORE_UNKNOWN_ERROR;
3502 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att(
3503 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3504 struct bist_nvm_image_att *p_image_att, u32 image_index)
3506 u32 buf_size, nvm_offset, resp, param;
3507 enum _ecore_status_t rc;
3509 nvm_offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
3510 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3511 nvm_offset |= (image_index <<
3512 DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_OFFSET);
3513 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3514 nvm_offset, &resp, ¶m, &buf_size,
3515 (u32 *)p_image_att);
3516 if (rc != ECORE_SUCCESS)
3519 if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3520 (p_image_att->return_code != 1))
3521 rc = ECORE_UNKNOWN_ERROR;
3526 enum _ecore_status_t
3527 ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn,
3528 struct ecore_ptt *p_ptt,
3529 struct ecore_temperature_info *p_temp_info)
3531 struct ecore_temperature_sensor *p_temp_sensor;
3532 struct temperature_status_stc mfw_temp_info;
3533 struct ecore_mcp_mb_params mb_params;
3535 enum _ecore_status_t rc;
3538 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3539 mb_params.cmd = DRV_MSG_CODE_GET_TEMPERATURE;
3540 mb_params.p_data_dst = &mfw_temp_info;
3541 mb_params.data_dst_size = sizeof(mfw_temp_info);
3542 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3543 if (rc != ECORE_SUCCESS)
3546 OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS != MAX_NUM_OF_SENSORS);
3547 p_temp_info->num_sensors = OSAL_MIN_T(u32, mfw_temp_info.num_of_sensors,
3548 ECORE_MAX_NUM_OF_SENSORS);
3549 for (i = 0; i < p_temp_info->num_sensors; i++) {
3550 val = mfw_temp_info.sensor[i];
3551 p_temp_sensor = &p_temp_info->sensors[i];
3552 p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >>
3553 SENSOR_LOCATION_OFFSET;
3554 p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >>
3555 THRESHOLD_HIGH_OFFSET;
3556 p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >>
3557 CRITICAL_TEMPERATURE_OFFSET;
3558 p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >>
3559 CURRENT_TEMP_OFFSET;
3562 return ECORE_SUCCESS;
3565 enum _ecore_status_t ecore_mcp_get_mba_versions(
3566 struct ecore_hwfn *p_hwfn,
3567 struct ecore_ptt *p_ptt,
3568 struct ecore_mba_vers *p_mba_vers)
3570 u32 buf_size, resp, param;
3571 enum _ecore_status_t rc;
3573 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MBA_VERSION,
3574 0, &resp, ¶m, &buf_size,
3575 &p_mba_vers->mba_vers[0]);
3577 if (rc != ECORE_SUCCESS)
3580 if ((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
3581 rc = ECORE_UNKNOWN_ERROR;
3583 if (buf_size != MCP_DRV_NVM_BUF_LEN)
3584 rc = ECORE_UNKNOWN_ERROR;
3589 enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
3590 struct ecore_ptt *p_ptt,
3595 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MEM_ECC_EVENTS,
3596 0, &rsp, (u32 *)num_events);
3599 static enum resource_id_enum
3600 ecore_mcp_get_mfw_res_id(enum ecore_resources res_id)
3602 enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
3606 mfw_res_id = RESOURCE_NUM_SB_E;
3608 case ECORE_L2_QUEUE:
3609 mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
3612 mfw_res_id = RESOURCE_NUM_VPORT_E;
3615 mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
3618 mfw_res_id = RESOURCE_NUM_PQ_E;
3621 mfw_res_id = RESOURCE_NUM_RL_E;
3625 /* Each VFC resource can accommodate both a MAC and a VLAN */
3626 mfw_res_id = RESOURCE_VFC_FILTER_E;
3629 mfw_res_id = RESOURCE_ILT_E;
3631 case ECORE_LL2_QUEUE:
3632 mfw_res_id = RESOURCE_LL2_QUEUE_E;
3634 case ECORE_RDMA_CNQ_RAM:
3635 case ECORE_CMDQS_CQS:
3636 /* CNQ/CMDQS are the same resource */
3637 mfw_res_id = RESOURCE_CQS_E;
3639 case ECORE_RDMA_STATS_QUEUE:
3640 mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
3643 mfw_res_id = RESOURCE_BDQ_E;
3652 #define ECORE_RESC_ALLOC_VERSION_MAJOR 2
3653 #define ECORE_RESC_ALLOC_VERSION_MINOR 0
3654 #define ECORE_RESC_ALLOC_VERSION \
3655 ((ECORE_RESC_ALLOC_VERSION_MAJOR << \
3656 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_OFFSET) | \
3657 (ECORE_RESC_ALLOC_VERSION_MINOR << \
3658 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_OFFSET))
3660 struct ecore_resc_alloc_in_params {
3662 enum ecore_resources res_id;
3666 struct ecore_resc_alloc_out_params {
3676 #define ECORE_RECOVERY_PROLOG_SLEEP_MS 100
3678 enum _ecore_status_t ecore_recovery_prolog(struct ecore_dev *p_dev)
3680 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3681 struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
3682 enum _ecore_status_t rc;
3684 /* Allow ongoing PCIe transactions to complete */
3685 OSAL_MSLEEP(ECORE_RECOVERY_PROLOG_SLEEP_MS);
3687 /* Clear the PF's internal FID_enable in the PXP */
3688 rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_ptt, false);
3689 if (rc != ECORE_SUCCESS)
3690 DP_NOTICE(p_hwfn, false,
3691 "ecore_pglueb_set_pfid_enable() failed. rc = %d.\n",
3697 static enum _ecore_status_t
3698 ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn,
3699 struct ecore_ptt *p_ptt,
3700 struct ecore_resc_alloc_in_params *p_in_params,
3701 struct ecore_resc_alloc_out_params *p_out_params)
3703 struct ecore_mcp_mb_params mb_params;
3704 struct resource_info mfw_resc_info;
3705 enum _ecore_status_t rc;
3707 OSAL_MEM_ZERO(&mfw_resc_info, sizeof(mfw_resc_info));
3709 mfw_resc_info.res_id = ecore_mcp_get_mfw_res_id(p_in_params->res_id);
3710 if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
3712 "Failed to match resource %d [%s] with the MFW resources\n",
3713 p_in_params->res_id,
3714 ecore_hw_get_resc_name(p_in_params->res_id));
3718 switch (p_in_params->cmd) {
3719 case DRV_MSG_SET_RESOURCE_VALUE_MSG:
3720 mfw_resc_info.size = p_in_params->resc_max_val;
3722 case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
3725 DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
3730 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3731 mb_params.cmd = p_in_params->cmd;
3732 mb_params.param = ECORE_RESC_ALLOC_VERSION;
3733 mb_params.p_data_src = &mfw_resc_info;
3734 mb_params.data_src_size = sizeof(mfw_resc_info);
3735 mb_params.p_data_dst = mb_params.p_data_src;
3736 mb_params.data_dst_size = mb_params.data_src_size;
3738 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3739 "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
3740 p_in_params->cmd, p_in_params->res_id,
3741 ecore_hw_get_resc_name(p_in_params->res_id),
3742 GET_MFW_FIELD(mb_params.param,
3743 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3744 GET_MFW_FIELD(mb_params.param,
3745 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3746 p_in_params->resc_max_val);
3748 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3749 if (rc != ECORE_SUCCESS)
3752 p_out_params->mcp_resp = mb_params.mcp_resp;
3753 p_out_params->mcp_param = mb_params.mcp_param;
3754 p_out_params->resc_num = mfw_resc_info.size;
3755 p_out_params->resc_start = mfw_resc_info.offset;
3756 p_out_params->vf_resc_num = mfw_resc_info.vf_size;
3757 p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
3758 p_out_params->flags = mfw_resc_info.flags;
3760 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3761 "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
3762 GET_MFW_FIELD(p_out_params->mcp_param,
3763 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3764 GET_MFW_FIELD(p_out_params->mcp_param,
3765 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3766 p_out_params->resc_num, p_out_params->resc_start,
3767 p_out_params->vf_resc_num, p_out_params->vf_resc_start,
3768 p_out_params->flags);
3770 return ECORE_SUCCESS;
3773 enum _ecore_status_t
3774 ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3775 enum ecore_resources res_id, u32 resc_max_val,
3778 struct ecore_resc_alloc_out_params out_params;
3779 struct ecore_resc_alloc_in_params in_params;
3780 enum _ecore_status_t rc;
3782 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
3783 in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
3784 in_params.res_id = res_id;
3785 in_params.resc_max_val = resc_max_val;
3786 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
3787 rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3789 if (rc != ECORE_SUCCESS)
3792 *p_mcp_resp = out_params.mcp_resp;
3794 return ECORE_SUCCESS;
3797 enum _ecore_status_t
3798 ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3799 enum ecore_resources res_id, u32 *p_mcp_resp,
3800 u32 *p_resc_num, u32 *p_resc_start)
3802 struct ecore_resc_alloc_out_params out_params;
3803 struct ecore_resc_alloc_in_params in_params;
3804 enum _ecore_status_t rc;
3806 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
3807 in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
3808 in_params.res_id = res_id;
3809 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
3810 rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3812 if (rc != ECORE_SUCCESS)
3815 *p_mcp_resp = out_params.mcp_resp;
3817 if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
3818 *p_resc_num = out_params.resc_num;
3819 *p_resc_start = out_params.resc_start;
3822 return ECORE_SUCCESS;
3825 enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn,
3826 struct ecore_ptt *p_ptt)
3828 u32 mcp_resp, mcp_param;
3830 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
3831 &mcp_resp, &mcp_param);
3834 static enum _ecore_status_t ecore_mcp_resource_cmd(struct ecore_hwfn *p_hwfn,
3835 struct ecore_ptt *p_ptt,
3836 u32 param, u32 *p_mcp_resp,
3839 enum _ecore_status_t rc;
3841 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
3842 p_mcp_resp, p_mcp_param);
3843 if (rc != ECORE_SUCCESS)
3846 if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3848 "The resource command is unsupported by the MFW\n");
3849 return ECORE_NOTIMPL;
3852 if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
3853 u8 opcode = GET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
3855 DP_NOTICE(p_hwfn, false,
3856 "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
3864 enum _ecore_status_t
3865 __ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3866 struct ecore_resc_lock_params *p_params)
3868 u32 param = 0, mcp_resp, mcp_param;
3870 enum _ecore_status_t rc;
3872 switch (p_params->timeout) {
3873 case ECORE_MCP_RESC_LOCK_TO_DEFAULT:
3874 opcode = RESOURCE_OPCODE_REQ;
3875 p_params->timeout = 0;
3877 case ECORE_MCP_RESC_LOCK_TO_NONE:
3878 opcode = RESOURCE_OPCODE_REQ_WO_AGING;
3879 p_params->timeout = 0;
3882 opcode = RESOURCE_OPCODE_REQ_W_AGING;
3886 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3887 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3888 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
3890 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3891 "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
3892 param, p_params->timeout, opcode, p_params->resource);
3894 /* Attempt to acquire the resource */
3895 rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
3897 if (rc != ECORE_SUCCESS)
3900 /* Analyze the response */
3901 p_params->owner = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER);
3902 opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3904 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3905 "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
3906 mcp_param, opcode, p_params->owner);
3909 case RESOURCE_OPCODE_GNT:
3910 p_params->b_granted = true;
3912 case RESOURCE_OPCODE_BUSY:
3913 p_params->b_granted = false;
3916 DP_NOTICE(p_hwfn, false,
3917 "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
3922 return ECORE_SUCCESS;
3925 enum _ecore_status_t
3926 ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3927 struct ecore_resc_lock_params *p_params)
3930 enum _ecore_status_t rc;
3933 /* No need for an interval before the first iteration */
3935 if (p_params->sleep_b4_retry) {
3936 u16 retry_interval_in_ms =
3937 DIV_ROUND_UP(p_params->retry_interval,
3940 OSAL_MSLEEP(retry_interval_in_ms);
3942 OSAL_UDELAY(p_params->retry_interval);
3946 rc = __ecore_mcp_resc_lock(p_hwfn, p_ptt, p_params);
3947 if (rc != ECORE_SUCCESS)
3950 if (p_params->b_granted)
3952 } while (retry_cnt++ < p_params->retry_num);
3954 return ECORE_SUCCESS;
3957 void ecore_mcp_resc_lock_default_init(struct ecore_resc_lock_params *p_lock,
3958 struct ecore_resc_unlock_params *p_unlock,
3959 enum ecore_resc_lock resource,
3960 bool b_is_permanent)
3962 if (p_lock != OSAL_NULL) {
3963 OSAL_MEM_ZERO(p_lock, sizeof(*p_lock));
3965 /* Permanent resources don't require aging, and there's no
3966 * point in trying to acquire them more than once since it's
3967 * unexpected another entity would release them.
3969 if (b_is_permanent) {
3970 p_lock->timeout = ECORE_MCP_RESC_LOCK_TO_NONE;
3972 p_lock->retry_num = ECORE_MCP_RESC_LOCK_RETRY_CNT_DFLT;
3973 p_lock->retry_interval =
3974 ECORE_MCP_RESC_LOCK_RETRY_VAL_DFLT;
3975 p_lock->sleep_b4_retry = true;
3978 p_lock->resource = resource;
3981 if (p_unlock != OSAL_NULL) {
3982 OSAL_MEM_ZERO(p_unlock, sizeof(*p_unlock));
3983 p_unlock->resource = resource;
3987 enum _ecore_status_t
3988 ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3989 struct ecore_resc_unlock_params *p_params)
3991 u32 param = 0, mcp_resp, mcp_param;
3993 enum _ecore_status_t rc;
3995 opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
3996 : RESOURCE_OPCODE_RELEASE;
3997 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3998 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
4000 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
4001 "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
4002 param, opcode, p_params->resource);
4004 /* Attempt to release the resource */
4005 rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
4007 if (rc != ECORE_SUCCESS)
4010 /* Analyze the response */
4011 opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
4013 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
4014 "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
4018 case RESOURCE_OPCODE_RELEASED_PREVIOUS:
4020 "Resource unlock request for an already released resource [%d]\n",
4021 p_params->resource);
4023 case RESOURCE_OPCODE_RELEASED:
4024 p_params->b_released = true;
4026 case RESOURCE_OPCODE_WRONG_OWNER:
4027 p_params->b_released = false;
4030 DP_NOTICE(p_hwfn, false,
4031 "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
4036 return ECORE_SUCCESS;
4039 bool ecore_mcp_is_smart_an_supported(struct ecore_hwfn *p_hwfn)
4041 return !!(p_hwfn->mcp_info->capabilities &
4042 FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ);
4045 enum _ecore_status_t ecore_mcp_get_capabilities(struct ecore_hwfn *p_hwfn,
4046 struct ecore_ptt *p_ptt)
4049 enum _ecore_status_t rc;
4051 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT,
4052 0, &mcp_resp, &p_hwfn->mcp_info->capabilities);
4053 if (rc == ECORE_SUCCESS)
4054 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_PROBE),
4055 "MFW supported features: %08x\n",
4056 p_hwfn->mcp_info->capabilities);
4061 enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn,
4062 struct ecore_ptt *p_ptt)
4064 u32 mcp_resp, mcp_param, features;
4066 features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ |
4067 DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE |
4068 DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK;
4070 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
4071 features, &mcp_resp, &mcp_param);
4074 enum _ecore_status_t
4075 ecore_mcp_drv_attribute(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
4076 struct ecore_mcp_drv_attr *p_drv_attr)
4078 struct attribute_cmd_write_stc attr_cmd_write;
4079 enum _attribute_commands_e mfw_attr_cmd;
4080 struct ecore_mcp_mb_params mb_params;
4081 enum _ecore_status_t rc;
4083 switch (p_drv_attr->attr_cmd) {
4084 case ECORE_MCP_DRV_ATTR_CMD_READ:
4085 mfw_attr_cmd = ATTRIBUTE_CMD_READ;
4087 case ECORE_MCP_DRV_ATTR_CMD_WRITE:
4088 mfw_attr_cmd = ATTRIBUTE_CMD_WRITE;
4090 case ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR:
4091 mfw_attr_cmd = ATTRIBUTE_CMD_READ_CLEAR;
4093 case ECORE_MCP_DRV_ATTR_CMD_CLEAR:
4094 mfw_attr_cmd = ATTRIBUTE_CMD_CLEAR;
4097 DP_NOTICE(p_hwfn, false, "Unknown attribute command %d\n",
4098 p_drv_attr->attr_cmd);
4102 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
4103 mb_params.cmd = DRV_MSG_CODE_ATTRIBUTE;
4104 SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_KEY,
4105 p_drv_attr->attr_num);
4106 SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_CMD,
4108 if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_WRITE) {
4109 OSAL_MEM_ZERO(&attr_cmd_write, sizeof(attr_cmd_write));
4110 attr_cmd_write.val = p_drv_attr->val;
4111 attr_cmd_write.mask = p_drv_attr->mask;
4112 attr_cmd_write.offset = p_drv_attr->offset;
4114 mb_params.p_data_src = &attr_cmd_write;
4115 mb_params.data_src_size = sizeof(attr_cmd_write);
4118 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4119 if (rc != ECORE_SUCCESS)
4122 if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
4124 "The attribute command is not supported by the MFW\n");
4125 return ECORE_NOTIMPL;
4126 } else if (mb_params.mcp_resp != FW_MSG_CODE_OK) {
4128 "Failed to send an attribute command [mcp_resp 0x%x, attr_cmd %d, attr_num %d]\n",
4129 mb_params.mcp_resp, p_drv_attr->attr_cmd,
4130 p_drv_attr->attr_num);
4134 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
4135 "Attribute Command: cmd %d [mfw_cmd %d], num %d, in={val 0x%08x, mask 0x%08x, offset 0x%08x}, out={val 0x%08x}\n",
4136 p_drv_attr->attr_cmd, mfw_attr_cmd, p_drv_attr->attr_num,
4137 p_drv_attr->val, p_drv_attr->mask, p_drv_attr->offset,
4138 mb_params.mcp_param);
4140 if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ ||
4141 p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR)
4142 p_drv_attr->val = mb_params.mcp_param;
4144 return ECORE_SUCCESS;
4147 enum _ecore_status_t ecore_mcp_get_engine_config(struct ecore_hwfn *p_hwfn,
4148 struct ecore_ptt *p_ptt)
4150 struct ecore_dev *p_dev = p_hwfn->p_dev;
4151 struct ecore_mcp_mb_params mb_params;
4152 u8 fir_valid, l2_valid;
4153 enum _ecore_status_t rc;
4155 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
4156 mb_params.cmd = DRV_MSG_CODE_GET_ENGINE_CONFIG;
4157 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4158 if (rc != ECORE_SUCCESS)
4161 if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
4163 "The get_engine_config command is unsupported by the MFW\n");
4164 return ECORE_NOTIMPL;
4167 fir_valid = GET_MFW_FIELD(mb_params.mcp_param,
4168 FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID);
4171 GET_MFW_FIELD(mb_params.mcp_param,
4172 FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE);
4174 l2_valid = GET_MFW_FIELD(mb_params.mcp_param,
4175 FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID);
4177 p_dev->l2_affin_hint =
4178 GET_MFW_FIELD(mb_params.mcp_param,
4179 FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE);
4182 "Engine affinity config: FIR={valid %hhd, value %hhd}, L2_hint={valid %hhd, value %hhd}\n",
4183 fir_valid, p_dev->fir_affin, l2_valid, p_dev->l2_affin_hint);
4185 return ECORE_SUCCESS;
4188 enum _ecore_status_t ecore_mcp_get_ppfid_bitmap(struct ecore_hwfn *p_hwfn,
4189 struct ecore_ptt *p_ptt)
4191 struct ecore_dev *p_dev = p_hwfn->p_dev;
4192 struct ecore_mcp_mb_params mb_params;
4193 enum _ecore_status_t rc;
4195 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
4196 mb_params.cmd = DRV_MSG_CODE_GET_PPFID_BITMAP;
4197 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4198 if (rc != ECORE_SUCCESS)
4201 if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
4203 "The get_ppfid_bitmap command is unsupported by the MFW\n");
4204 return ECORE_NOTIMPL;
4207 p_dev->ppfid_bitmap = GET_MFW_FIELD(mb_params.mcp_param,
4208 FW_MB_PARAM_PPFID_BITMAP);
4210 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "PPFID bitmap 0x%hhx\n",
4211 p_dev->ppfid_bitmap);
4213 return ECORE_SUCCESS;
4216 void ecore_mcp_wol_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
4217 u32 offset, u32 val)
4219 enum _ecore_status_t rc = ECORE_SUCCESS;
4221 struct ecore_mcp_mb_params mb_params;
4223 OSAL_MEMSET(&mb_params, 0, sizeof(struct ecore_mcp_mb_params));
4224 mb_params.cmd = DRV_MSG_CODE_WRITE_WOL_REG;
4225 mb_params.param = offset;
4226 mb_params.p_data_src = &dword;
4227 mb_params.data_src_size = sizeof(dword);
4229 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4230 if (rc != ECORE_SUCCESS) {
4231 DP_NOTICE(p_hwfn, false,
4232 "Failed to wol write request, rc = %d\n", rc);
4235 if (mb_params.mcp_resp != FW_MSG_CODE_WOL_READ_WRITE_OK) {
4236 DP_NOTICE(p_hwfn, false,
4237 "Failed to write value 0x%x to offset 0x%x [mcp_resp 0x%x]\n",
4238 val, offset, mb_params.mcp_resp);
4239 rc = ECORE_UNKNOWN_ERROR;