2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
11 #include "ecore_status.h"
12 #include "ecore_mcp.h"
13 #include "mcp_public.h"
16 #include "ecore_init_fw_funcs.h"
17 #include "ecore_sriov.h"
19 #include "ecore_iov_api.h"
20 #include "ecore_gtt_reg_addr.h"
21 #include "ecore_iro.h"
22 #include "ecore_dcbx.h"
23 #include "ecore_sp_commands.h"
25 #define CHIP_MCP_RESP_ITER_US 10
26 #define EMUL_MCP_RESP_ITER_US (1000 * 1000)
28 #define ECORE_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
29 #define ECORE_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
31 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
32 ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
35 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
36 ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
38 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
39 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
40 OFFSETOF(struct public_drv_mb, _field), _val)
42 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
43 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
44 OFFSETOF(struct public_drv_mb, _field))
46 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
47 DRV_ID_PDA_COMP_VER_OFFSET)
49 #define MCP_BYTES_PER_MBIT_OFFSET 17
53 static int loaded_port[MAX_NUM_PORTS] = { 0 };
56 bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn)
58 if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
63 void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
65 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
67 u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr);
69 p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
71 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
72 "port_addr = 0x%x, port_id 0x%02x\n",
73 p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
76 void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
78 u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
83 if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev))
87 if (!p_hwfn->mcp_info->public_base)
90 for (i = 0; i < length; i++) {
91 tmp = ecore_rd(p_hwfn, p_ptt,
92 p_hwfn->mcp_info->mfw_mb_addr +
93 (i << 2) + sizeof(u32));
95 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
96 OSAL_BE32_TO_CPU(tmp);
100 struct ecore_mcp_cmd_elem {
101 osal_list_entry_t list;
102 struct ecore_mcp_mb_params *p_mb_params;
103 u16 expected_seq_num;
107 /* Must be called while cmd_lock is acquired */
108 static struct ecore_mcp_cmd_elem *
109 ecore_mcp_cmd_add_elem(struct ecore_hwfn *p_hwfn,
110 struct ecore_mcp_mb_params *p_mb_params,
111 u16 expected_seq_num)
113 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
115 p_cmd_elem = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
116 sizeof(*p_cmd_elem));
118 DP_NOTICE(p_hwfn, false,
119 "Failed to allocate `struct ecore_mcp_cmd_elem'\n");
123 p_cmd_elem->p_mb_params = p_mb_params;
124 p_cmd_elem->expected_seq_num = expected_seq_num;
125 OSAL_LIST_PUSH_HEAD(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
130 /* Must be called while cmd_lock is acquired */
131 static void ecore_mcp_cmd_del_elem(struct ecore_hwfn *p_hwfn,
132 struct ecore_mcp_cmd_elem *p_cmd_elem)
134 OSAL_LIST_REMOVE_ENTRY(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
135 OSAL_FREE(p_hwfn->p_dev, p_cmd_elem);
138 /* Must be called while cmd_lock is acquired */
139 static struct ecore_mcp_cmd_elem *
140 ecore_mcp_cmd_get_elem(struct ecore_hwfn *p_hwfn, u16 seq_num)
142 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
144 OSAL_LIST_FOR_EACH_ENTRY(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list,
145 struct ecore_mcp_cmd_elem) {
146 if (p_cmd_elem->expected_seq_num == seq_num)
153 enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
155 if (p_hwfn->mcp_info) {
156 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL, *p_tmp;
158 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
159 OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_cmd_elem, p_tmp,
160 &p_hwfn->mcp_info->cmd_list, list,
161 struct ecore_mcp_cmd_elem) {
162 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
164 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
166 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
167 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
168 #ifdef CONFIG_ECORE_LOCK_ALLOC
169 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->cmd_lock);
170 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->link_lock);
174 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
176 return ECORE_SUCCESS;
179 static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
180 struct ecore_ptt *p_ptt)
182 struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
183 u32 drv_mb_offsize, mfw_mb_offsize;
184 u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
187 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
188 DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n");
189 p_info->public_base = 0;
194 p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
195 if (!p_info->public_base)
198 p_info->public_base |= GRCBASE_MCP;
200 /* Calculate the driver and MFW mailbox address */
201 drv_mb_offsize = ecore_rd(p_hwfn, p_ptt,
202 SECTION_OFFSIZE_ADDR(p_info->public_base,
204 p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
205 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
206 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x"
207 " mcp_pf_id = 0x%x\n",
208 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
210 /* Set the MFW MB address */
211 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
212 SECTION_OFFSIZE_ADDR(p_info->public_base,
214 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
215 p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
216 p_info->mfw_mb_addr);
218 /* Get the current driver mailbox sequence before sending
221 p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
222 DRV_MSG_SEQ_NUMBER_MASK;
224 /* Get current FW pulse sequence */
225 p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
228 p_info->mcp_hist = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
230 return ECORE_SUCCESS;
233 enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
234 struct ecore_ptt *p_ptt)
236 struct ecore_mcp_info *p_info;
239 /* Allocate mcp_info structure */
240 p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
241 sizeof(*p_hwfn->mcp_info));
242 if (!p_hwfn->mcp_info)
244 p_info = p_hwfn->mcp_info;
246 if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
247 DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
248 /* Do not free mcp_info here, since public_base indicate that
249 * the MCP is not initialized
251 return ECORE_SUCCESS;
254 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
255 p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
256 p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
257 if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
260 /* Initialize the MFW spinlocks */
261 #ifdef CONFIG_ECORE_LOCK_ALLOC
262 OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock);
263 OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock);
265 OSAL_SPIN_LOCK_INIT(&p_info->cmd_lock);
266 OSAL_SPIN_LOCK_INIT(&p_info->link_lock);
268 OSAL_LIST_INIT(&p_info->cmd_list);
270 return ECORE_SUCCESS;
273 DP_NOTICE(p_hwfn, true, "Failed to allocate mcp memory\n");
274 ecore_mcp_free(p_hwfn);
278 static void ecore_mcp_reread_offsets(struct ecore_hwfn *p_hwfn,
279 struct ecore_ptt *p_ptt)
281 u32 generic_por_0 = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
283 /* Use MCP history register to check if MCP reset occurred between init
286 if (p_hwfn->mcp_info->mcp_hist != generic_por_0) {
287 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
288 "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
289 p_hwfn->mcp_info->mcp_hist, generic_por_0);
291 ecore_load_mcp_offsets(p_hwfn, p_ptt);
292 ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
296 enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
297 struct ecore_ptt *p_ptt)
299 u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0;
300 enum _ecore_status_t rc = ECORE_SUCCESS;
303 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
304 delay = EMUL_MCP_RESP_ITER_US;
307 if (p_hwfn->mcp_info->b_block_cmd) {
308 DP_NOTICE(p_hwfn, false,
309 "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
310 return ECORE_ABORTED;
313 /* Ensure that only a single thread is accessing the mailbox */
314 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
316 org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
318 /* Set drv command along with the updated sequence */
319 ecore_mcp_reread_offsets(p_hwfn, p_ptt);
320 seq = ++p_hwfn->mcp_info->drv_mb_seq;
321 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
324 /* Wait for MFW response */
326 /* Give the FW up to 500 second (50*1000*10usec) */
327 } while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt,
328 MISCS_REG_GENERIC_POR_0)) &&
329 (cnt++ < ECORE_MCP_RESET_RETRIES));
331 if (org_mcp_reset_seq !=
332 ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
333 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
334 "MCP was reset after %d usec\n", cnt * delay);
336 DP_ERR(p_hwfn, "Failed to reset MCP\n");
340 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
345 /* Must be called while cmd_lock is acquired */
346 static bool ecore_mcp_has_pending_cmd(struct ecore_hwfn *p_hwfn)
348 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
350 /* There is at most one pending command at a certain time, and if it
351 * exists - it is placed at the HEAD of the list.
353 if (!OSAL_LIST_IS_EMPTY(&p_hwfn->mcp_info->cmd_list)) {
354 p_cmd_elem = OSAL_LIST_FIRST_ENTRY(&p_hwfn->mcp_info->cmd_list,
355 struct ecore_mcp_cmd_elem,
357 return !p_cmd_elem->b_is_completed;
363 /* Must be called while cmd_lock is acquired */
364 static enum _ecore_status_t
365 ecore_mcp_update_pending_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
367 struct ecore_mcp_mb_params *p_mb_params;
368 struct ecore_mcp_cmd_elem *p_cmd_elem;
372 mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
373 seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK);
375 /* Return if no new non-handled response has been received */
376 if (seq_num != p_hwfn->mcp_info->drv_mb_seq)
379 p_cmd_elem = ecore_mcp_cmd_get_elem(p_hwfn, seq_num);
382 "Failed to find a pending mailbox cmd that expects sequence number %d\n",
384 return ECORE_UNKNOWN_ERROR;
387 p_mb_params = p_cmd_elem->p_mb_params;
389 /* Get the MFW response along with the sequence number */
390 p_mb_params->mcp_resp = mcp_resp;
392 /* Get the MFW param */
393 p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
395 /* Get the union data */
396 if (p_mb_params->p_data_dst != OSAL_NULL &&
397 p_mb_params->data_dst_size) {
398 u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
399 OFFSETOF(struct public_drv_mb,
401 ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
402 union_data_addr, p_mb_params->data_dst_size);
405 p_cmd_elem->b_is_completed = true;
407 return ECORE_SUCCESS;
410 /* Must be called while cmd_lock is acquired */
411 static void __ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
412 struct ecore_ptt *p_ptt,
413 struct ecore_mcp_mb_params *p_mb_params,
416 union drv_union_data union_data;
419 /* Set the union data */
420 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
421 OFFSETOF(struct public_drv_mb, union_data);
422 OSAL_MEM_ZERO(&union_data, sizeof(union_data));
423 if (p_mb_params->p_data_src != OSAL_NULL && p_mb_params->data_src_size)
424 OSAL_MEMCPY(&union_data, p_mb_params->p_data_src,
425 p_mb_params->data_src_size);
426 ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
429 /* Set the drv param */
430 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param);
432 /* Set the drv command along with the sequence number */
433 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
435 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
436 "MFW mailbox: command 0x%08x param 0x%08x\n",
437 (p_mb_params->cmd | seq_num), p_mb_params->param);
440 static void ecore_mcp_cmd_set_blocking(struct ecore_hwfn *p_hwfn,
443 p_hwfn->mcp_info->b_block_cmd = block_cmd;
445 DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n",
446 block_cmd ? "Block" : "Unblock");
449 static enum _ecore_status_t
450 _ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
451 struct ecore_mcp_mb_params *p_mb_params,
452 u32 max_retries, u32 delay)
454 struct ecore_mcp_cmd_elem *p_cmd_elem;
457 enum _ecore_status_t rc = ECORE_SUCCESS;
459 /* Wait until the mailbox is non-occupied */
461 /* Exit the loop if there is no pending command, or if the
462 * pending command is completed during this iteration.
463 * The spinlock stays locked until the command is sent.
466 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
468 if (!ecore_mcp_has_pending_cmd(p_hwfn))
471 rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt);
472 if (rc == ECORE_SUCCESS)
474 else if (rc != ECORE_AGAIN)
477 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
479 } while (++cnt < max_retries);
481 if (cnt >= max_retries) {
482 DP_NOTICE(p_hwfn, false,
483 "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
484 p_mb_params->cmd, p_mb_params->param);
488 /* Send the mailbox command */
489 ecore_mcp_reread_offsets(p_hwfn, p_ptt);
490 seq_num = ++p_hwfn->mcp_info->drv_mb_seq;
491 p_cmd_elem = ecore_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num);
497 __ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num);
498 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
500 /* Wait for the MFW response */
502 /* Exit the loop if the command is already completed, or if the
503 * command is completed during this iteration.
504 * The spinlock stays locked until the list element is removed.
508 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
510 if (p_cmd_elem->b_is_completed)
513 rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt);
514 if (rc == ECORE_SUCCESS)
516 else if (rc != ECORE_AGAIN)
519 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
520 } while (++cnt < max_retries);
522 if (cnt >= max_retries) {
523 DP_NOTICE(p_hwfn, false,
524 "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
525 p_mb_params->cmd, p_mb_params->param);
527 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
528 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
529 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
531 ecore_mcp_cmd_set_blocking(p_hwfn, true);
532 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
536 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
537 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
539 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
540 "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
541 p_mb_params->mcp_resp, p_mb_params->mcp_param,
542 (cnt * delay) / 1000, (cnt * delay) % 1000);
544 /* Clear the sequence number from the MFW response */
545 p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
547 return ECORE_SUCCESS;
550 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
554 static enum _ecore_status_t
555 ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
556 struct ecore_ptt *p_ptt,
557 struct ecore_mcp_mb_params *p_mb_params)
559 osal_size_t union_data_size = sizeof(union drv_union_data);
560 u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
561 u32 delay = CHIP_MCP_RESP_ITER_US;
564 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
565 delay = EMUL_MCP_RESP_ITER_US;
566 /* There is a built-in delay of 100usec in each MFW response read */
567 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
571 /* MCP not initialized */
572 if (!ecore_mcp_is_init(p_hwfn)) {
573 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
577 if (p_mb_params->data_src_size > union_data_size ||
578 p_mb_params->data_dst_size > union_data_size) {
580 "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
581 p_mb_params->data_src_size, p_mb_params->data_dst_size,
586 if (p_hwfn->mcp_info->b_block_cmd) {
587 DP_NOTICE(p_hwfn, false,
588 "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
589 p_mb_params->cmd, p_mb_params->param);
590 return ECORE_ABORTED;
593 return _ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
597 enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
598 struct ecore_ptt *p_ptt, u32 cmd, u32 param,
599 u32 *o_mcp_resp, u32 *o_mcp_param)
601 struct ecore_mcp_mb_params mb_params;
602 enum _ecore_status_t rc;
605 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
606 if (cmd == DRV_MSG_CODE_UNLOAD_REQ) {
608 loaded_port[p_hwfn->port_id]--;
609 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n",
612 return ECORE_SUCCESS;
616 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
618 mb_params.param = param;
619 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
620 if (rc != ECORE_SUCCESS)
623 *o_mcp_resp = mb_params.mcp_resp;
624 *o_mcp_param = mb_params.mcp_param;
626 return ECORE_SUCCESS;
629 enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
630 struct ecore_ptt *p_ptt,
635 u32 i_txn_size, u32 *i_buf)
637 struct ecore_mcp_mb_params mb_params;
638 enum _ecore_status_t rc;
640 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
642 mb_params.param = param;
643 mb_params.p_data_src = i_buf;
644 mb_params.data_src_size = (u8)i_txn_size;
645 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
646 if (rc != ECORE_SUCCESS)
649 *o_mcp_resp = mb_params.mcp_resp;
650 *o_mcp_param = mb_params.mcp_param;
652 return ECORE_SUCCESS;
655 enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
656 struct ecore_ptt *p_ptt,
661 u32 *o_txn_size, u32 *o_buf)
663 struct ecore_mcp_mb_params mb_params;
664 u8 raw_data[MCP_DRV_NVM_BUF_LEN];
665 enum _ecore_status_t rc;
667 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
669 mb_params.param = param;
670 mb_params.p_data_dst = raw_data;
672 /* Use the maximal value since the actual one is part of the response */
673 mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
675 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
676 if (rc != ECORE_SUCCESS)
679 *o_mcp_resp = mb_params.mcp_resp;
680 *o_mcp_param = mb_params.mcp_param;
682 *o_txn_size = *o_mcp_param;
684 OSAL_MEMCPY(o_buf, raw_data, RTE_MIN(*o_txn_size, MCP_DRV_NVM_BUF_LEN));
686 return ECORE_SUCCESS;
690 static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
693 static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
696 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
697 else if (!loaded_port[p_hwfn->port_id])
698 load_phase = FW_MSG_CODE_DRV_LOAD_PORT;
700 load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
702 /* On CMT, always tell that it's engine */
703 if (p_hwfn->p_dev->num_hwfns > 1)
704 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
706 *p_load_code = load_phase;
708 loaded_port[p_hwfn->port_id]++;
710 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
711 "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n",
712 *p_load_code, loaded, p_hwfn->port_id,
713 loaded_port[p_hwfn->port_id]);
718 ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role,
719 enum ecore_override_force_load override_force_load)
721 bool can_force_load = false;
723 switch (override_force_load) {
724 case ECORE_OVERRIDE_FORCE_LOAD_ALWAYS:
725 can_force_load = true;
727 case ECORE_OVERRIDE_FORCE_LOAD_NEVER:
728 can_force_load = false;
731 can_force_load = (drv_role == DRV_ROLE_OS &&
732 exist_drv_role == DRV_ROLE_PREBOOT) ||
733 (drv_role == DRV_ROLE_KDUMP &&
734 exist_drv_role == DRV_ROLE_OS);
738 return can_force_load;
741 static enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn,
742 struct ecore_ptt *p_ptt)
744 u32 resp = 0, param = 0;
745 enum _ecore_status_t rc;
747 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
749 if (rc != ECORE_SUCCESS)
750 DP_NOTICE(p_hwfn, false,
751 "Failed to send cancel load request, rc = %d\n", rc);
756 #define CONFIG_ECORE_L2_BITMAP_IDX (0x1 << 0)
757 #define CONFIG_ECORE_SRIOV_BITMAP_IDX (0x1 << 1)
758 #define CONFIG_ECORE_ROCE_BITMAP_IDX (0x1 << 2)
759 #define CONFIG_ECORE_IWARP_BITMAP_IDX (0x1 << 3)
760 #define CONFIG_ECORE_FCOE_BITMAP_IDX (0x1 << 4)
761 #define CONFIG_ECORE_ISCSI_BITMAP_IDX (0x1 << 5)
762 #define CONFIG_ECORE_LL2_BITMAP_IDX (0x1 << 6)
764 static u32 ecore_get_config_bitmap(void)
766 u32 config_bitmap = 0x0;
768 #ifdef CONFIG_ECORE_L2
769 config_bitmap |= CONFIG_ECORE_L2_BITMAP_IDX;
771 #ifdef CONFIG_ECORE_SRIOV
772 config_bitmap |= CONFIG_ECORE_SRIOV_BITMAP_IDX;
774 #ifdef CONFIG_ECORE_ROCE
775 config_bitmap |= CONFIG_ECORE_ROCE_BITMAP_IDX;
777 #ifdef CONFIG_ECORE_IWARP
778 config_bitmap |= CONFIG_ECORE_IWARP_BITMAP_IDX;
780 #ifdef CONFIG_ECORE_FCOE
781 config_bitmap |= CONFIG_ECORE_FCOE_BITMAP_IDX;
783 #ifdef CONFIG_ECORE_ISCSI
784 config_bitmap |= CONFIG_ECORE_ISCSI_BITMAP_IDX;
786 #ifdef CONFIG_ECORE_LL2
787 config_bitmap |= CONFIG_ECORE_LL2_BITMAP_IDX;
790 return config_bitmap;
793 struct ecore_load_req_in_params {
795 #define ECORE_LOAD_REQ_HSI_VER_DEFAULT 0
796 #define ECORE_LOAD_REQ_HSI_VER_1 1
803 bool avoid_eng_reset;
806 struct ecore_load_req_out_params {
816 static enum _ecore_status_t
817 __ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
818 struct ecore_load_req_in_params *p_in_params,
819 struct ecore_load_req_out_params *p_out_params)
821 struct ecore_mcp_mb_params mb_params;
822 struct load_req_stc load_req;
823 struct load_rsp_stc load_rsp;
825 enum _ecore_status_t rc;
827 OSAL_MEM_ZERO(&load_req, sizeof(load_req));
828 load_req.drv_ver_0 = p_in_params->drv_ver_0;
829 load_req.drv_ver_1 = p_in_params->drv_ver_1;
830 load_req.fw_ver = p_in_params->fw_ver;
831 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role);
832 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
833 p_in_params->timeout_val);
834 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE, p_in_params->force_cmd);
835 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
836 p_in_params->avoid_eng_reset);
838 hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ?
839 DRV_ID_MCP_HSI_VER_CURRENT :
840 (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_OFFSET);
842 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
843 mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
844 mb_params.param = PDA_COMP | hsi_ver | p_hwfn->p_dev->drv_type;
845 mb_params.p_data_src = &load_req;
846 mb_params.data_src_size = sizeof(load_req);
847 mb_params.p_data_dst = &load_rsp;
848 mb_params.data_dst_size = sizeof(load_rsp);
850 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
851 "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
853 GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
854 GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
855 GET_MFW_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
856 GET_MFW_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
858 if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1)
859 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
860 "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
861 load_req.drv_ver_0, load_req.drv_ver_1,
862 load_req.fw_ver, load_req.misc0,
863 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE),
864 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO),
865 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE),
866 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0));
868 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
869 if (rc != ECORE_SUCCESS) {
870 DP_NOTICE(p_hwfn, false,
871 "Failed to send load request, rc = %d\n", rc);
875 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
876 "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
877 p_out_params->load_code = mb_params.mcp_resp;
879 if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
880 p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
881 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
882 "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
883 load_rsp.drv_ver_0, load_rsp.drv_ver_1,
884 load_rsp.fw_ver, load_rsp.misc0,
885 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
886 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
887 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0));
889 p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
890 p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
891 p_out_params->exist_fw_ver = load_rsp.fw_ver;
892 p_out_params->exist_drv_role =
893 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
894 p_out_params->mfw_hsi_ver =
895 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
896 p_out_params->drv_exists =
897 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
898 LOAD_RSP_FLAGS0_DRV_EXISTS;
901 return ECORE_SUCCESS;
904 static void ecore_get_mfw_drv_role(struct ecore_hwfn *p_hwfn,
905 enum ecore_drv_role drv_role,
909 case ECORE_DRV_ROLE_OS:
910 *p_mfw_drv_role = DRV_ROLE_OS;
912 case ECORE_DRV_ROLE_KDUMP:
913 *p_mfw_drv_role = DRV_ROLE_KDUMP;
918 enum ecore_load_req_force {
919 ECORE_LOAD_REQ_FORCE_NONE,
920 ECORE_LOAD_REQ_FORCE_PF,
921 ECORE_LOAD_REQ_FORCE_ALL,
924 static void ecore_get_mfw_force_cmd(struct ecore_hwfn *p_hwfn,
925 enum ecore_load_req_force force_cmd,
929 case ECORE_LOAD_REQ_FORCE_NONE:
930 *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
932 case ECORE_LOAD_REQ_FORCE_PF:
933 *p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
935 case ECORE_LOAD_REQ_FORCE_ALL:
936 *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
941 enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
942 struct ecore_ptt *p_ptt,
943 struct ecore_load_req_params *p_params)
945 struct ecore_load_req_out_params out_params;
946 struct ecore_load_req_in_params in_params;
947 u8 mfw_drv_role = 0, mfw_force_cmd;
948 enum _ecore_status_t rc;
951 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
952 ecore_mcp_mf_workaround(p_hwfn, &p_params->load_code);
953 return ECORE_SUCCESS;
957 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
958 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT;
959 in_params.drv_ver_0 = ECORE_VERSION;
960 in_params.drv_ver_1 = ecore_get_config_bitmap();
961 in_params.fw_ver = STORM_FW_VERSION;
962 ecore_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
963 in_params.drv_role = mfw_drv_role;
964 in_params.timeout_val = p_params->timeout_val;
965 ecore_get_mfw_force_cmd(p_hwfn, ECORE_LOAD_REQ_FORCE_NONE,
967 in_params.force_cmd = mfw_force_cmd;
968 in_params.avoid_eng_reset = p_params->avoid_eng_reset;
970 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
971 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
972 if (rc != ECORE_SUCCESS)
975 /* First handle cases where another load request should/might be sent:
976 * - MFW expects the old interface [HSI version = 1]
977 * - MFW responds that a force load request is required
979 if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
981 "MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n");
983 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1;
984 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
985 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
987 if (rc != ECORE_SUCCESS)
989 } else if (out_params.load_code ==
990 FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
991 if (ecore_mcp_can_force_load(in_params.drv_role,
992 out_params.exist_drv_role,
993 p_params->override_force_load)) {
995 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, 0x%08x_%08x}, existing={%d, 0x%08x, 0x%08x_%08x}]\n",
996 in_params.drv_role, in_params.fw_ver,
997 in_params.drv_ver_0, in_params.drv_ver_1,
998 out_params.exist_drv_role,
999 out_params.exist_fw_ver,
1000 out_params.exist_drv_ver_0,
1001 out_params.exist_drv_ver_1);
1003 ecore_get_mfw_force_cmd(p_hwfn,
1004 ECORE_LOAD_REQ_FORCE_ALL,
1007 in_params.force_cmd = mfw_force_cmd;
1008 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
1009 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
1011 if (rc != ECORE_SUCCESS)
1014 DP_NOTICE(p_hwfn, false,
1015 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
1016 in_params.drv_role, in_params.fw_ver,
1017 in_params.drv_ver_0, in_params.drv_ver_1,
1018 out_params.exist_drv_role,
1019 out_params.exist_fw_ver,
1020 out_params.exist_drv_ver_0,
1021 out_params.exist_drv_ver_1);
1023 ecore_mcp_cancel_load_req(p_hwfn, p_ptt);
1028 /* Now handle the other types of responses.
1029 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
1030 * expected here after the additional revised load requests were sent.
1032 switch (out_params.load_code) {
1033 case FW_MSG_CODE_DRV_LOAD_ENGINE:
1034 case FW_MSG_CODE_DRV_LOAD_PORT:
1035 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
1036 if (out_params.mfw_hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
1037 out_params.drv_exists) {
1038 /* The role and fw/driver version match, but the PF is
1039 * already loaded and has not been unloaded gracefully.
1040 * This is unexpected since a quasi-FLR request was
1041 * previously sent as part of ecore_hw_prepare().
1043 DP_NOTICE(p_hwfn, false,
1044 "PF is already loaded - shouldn't have got here since a quasi-FLR request was previously sent!\n");
1049 DP_NOTICE(p_hwfn, false,
1050 "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
1051 out_params.load_code);
1055 p_params->load_code = out_params.load_code;
1057 return ECORE_SUCCESS;
1060 enum _ecore_status_t ecore_mcp_load_done(struct ecore_hwfn *p_hwfn,
1061 struct ecore_ptt *p_ptt)
1063 u32 resp = 0, param = 0;
1064 enum _ecore_status_t rc;
1066 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp,
1068 if (rc != ECORE_SUCCESS) {
1069 DP_NOTICE(p_hwfn, false,
1070 "Failed to send a LOAD_DONE command, rc = %d\n", rc);
1074 #define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR (1 << 0)
1076 /* Check if there is a DID mismatch between nvm-cfg/efuse */
1077 if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
1078 DP_NOTICE(p_hwfn, false,
1079 "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
1081 return ECORE_SUCCESS;
1084 enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn,
1085 struct ecore_ptt *p_ptt)
1087 u32 wol_param, mcp_resp, mcp_param;
1090 wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
1092 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param,
1093 &mcp_resp, &mcp_param);
1096 enum _ecore_status_t ecore_mcp_unload_done(struct ecore_hwfn *p_hwfn,
1097 struct ecore_ptt *p_ptt)
1099 struct ecore_mcp_mb_params mb_params;
1100 struct mcp_mac wol_mac;
1102 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1103 mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
1105 return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1108 static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
1109 struct ecore_ptt *p_ptt)
1111 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1113 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1114 u32 path_addr = SECTION_ADDR(mfw_path_offsize,
1115 ECORE_PATH_ID(p_hwfn));
1116 u32 disabled_vfs[VF_MAX_STATIC / 32];
1119 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1120 "Reading Disabled VF information from [offset %08x],"
1121 " path_addr %08x\n",
1122 mfw_path_offsize, path_addr);
1124 for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
1125 disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,
1127 OFFSETOF(struct public_path,
1130 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
1131 "FLR-ed VFs [%08x,...,%08x] - %08x\n",
1132 i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
1135 if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs))
1136 OSAL_VF_FLR_UPDATE(p_hwfn);
1139 enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
1140 struct ecore_ptt *p_ptt,
1143 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1145 u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1146 u32 func_addr = SECTION_ADDR(mfw_func_offsize,
1148 struct ecore_mcp_mb_params mb_params;
1149 enum _ecore_status_t rc;
1152 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1153 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
1154 "Acking VFs [%08x,...,%08x] - %08x\n",
1155 i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
1157 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1158 mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
1159 mb_params.p_data_src = vfs_to_ack;
1160 mb_params.data_src_size = VF_MAX_STATIC / 8;
1161 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt,
1163 if (rc != ECORE_SUCCESS) {
1164 DP_NOTICE(p_hwfn, false,
1165 "Failed to pass ACK for VF flr to MFW\n");
1166 return ECORE_TIMEOUT;
1169 /* TMP - clear the ACK bits; should be done by MFW */
1170 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1171 ecore_wr(p_hwfn, p_ptt,
1173 OFFSETOF(struct public_func, drv_ack_vf_disabled) +
1174 i * sizeof(u32), 0);
1179 static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
1180 struct ecore_ptt *p_ptt)
1182 u32 transceiver_state;
1184 transceiver_state = ecore_rd(p_hwfn, p_ptt,
1185 p_hwfn->mcp_info->port_addr +
1186 OFFSETOF(struct public_port,
1189 DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP),
1190 "Received transceiver state update [0x%08x] from mfw"
1192 transceiver_state, (u32)(p_hwfn->mcp_info->port_addr +
1193 OFFSETOF(struct public_port,
1194 transceiver_data)));
1196 transceiver_state = GET_MFW_FIELD(transceiver_state,
1197 ETH_TRANSCEIVER_STATE);
1199 if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
1200 DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
1202 DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
1205 static void ecore_mcp_read_eee_config(struct ecore_hwfn *p_hwfn,
1206 struct ecore_ptt *p_ptt,
1207 struct ecore_mcp_link_state *p_link)
1209 u32 eee_status, val;
1211 p_link->eee_adv_caps = 0;
1212 p_link->eee_lp_adv_caps = 0;
1213 eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1214 OFFSETOF(struct public_port, eee_status));
1215 p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT);
1216 val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET;
1217 if (val & EEE_1G_ADV)
1218 p_link->eee_adv_caps |= ECORE_EEE_1G_ADV;
1219 if (val & EEE_10G_ADV)
1220 p_link->eee_adv_caps |= ECORE_EEE_10G_ADV;
1221 val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET;
1222 if (val & EEE_1G_ADV)
1223 p_link->eee_lp_adv_caps |= ECORE_EEE_1G_ADV;
1224 if (val & EEE_10G_ADV)
1225 p_link->eee_lp_adv_caps |= ECORE_EEE_10G_ADV;
1228 static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
1229 struct ecore_ptt *p_ptt,
1232 struct ecore_mcp_link_state *p_link;
1236 /* Prevent SW/attentions from doing this at the same time */
1237 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->link_lock);
1239 p_link = &p_hwfn->mcp_info->link_output;
1240 OSAL_MEMSET(p_link, 0, sizeof(*p_link));
1242 status = ecore_rd(p_hwfn, p_ptt,
1243 p_hwfn->mcp_info->port_addr +
1244 OFFSETOF(struct public_port, link_status));
1245 DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP),
1246 "Received link update [0x%08x] from mfw"
1248 status, (u32)(p_hwfn->mcp_info->port_addr +
1249 OFFSETOF(struct public_port,
1252 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1253 "Resetting link indications\n");
1257 if (p_hwfn->b_drv_link_init)
1258 p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1260 p_link->link_up = false;
1262 p_link->full_duplex = true;
1263 switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
1264 case LINK_STATUS_SPEED_AND_DUPLEX_100G:
1265 p_link->speed = 100000;
1267 case LINK_STATUS_SPEED_AND_DUPLEX_50G:
1268 p_link->speed = 50000;
1270 case LINK_STATUS_SPEED_AND_DUPLEX_40G:
1271 p_link->speed = 40000;
1273 case LINK_STATUS_SPEED_AND_DUPLEX_25G:
1274 p_link->speed = 25000;
1276 case LINK_STATUS_SPEED_AND_DUPLEX_20G:
1277 p_link->speed = 20000;
1279 case LINK_STATUS_SPEED_AND_DUPLEX_10G:
1280 p_link->speed = 10000;
1282 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
1283 p_link->full_duplex = false;
1285 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
1286 p_link->speed = 1000;
1292 /* We never store total line speed as p_link->speed is
1293 * again changes according to bandwidth allocation.
1295 if (p_link->link_up && p_link->speed)
1296 p_link->line_speed = p_link->speed;
1298 p_link->line_speed = 0;
1300 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
1301 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
1303 /* Max bandwidth configuration */
1304 __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
1307 /* Mintz bandwidth configuration */
1308 __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
1310 ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev, p_ptt,
1311 p_link->min_pf_rate);
1313 p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
1314 p_link->an_complete = !!(status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
1315 p_link->parallel_detection = !!(status &
1316 LINK_STATUS_PARALLEL_DETECTION_USED);
1317 p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
1319 p_link->partner_adv_speed |=
1320 (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
1321 ECORE_LINK_PARTNER_SPEED_1G_FD : 0;
1322 p_link->partner_adv_speed |=
1323 (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
1324 ECORE_LINK_PARTNER_SPEED_1G_HD : 0;
1325 p_link->partner_adv_speed |=
1326 (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
1327 ECORE_LINK_PARTNER_SPEED_10G : 0;
1328 p_link->partner_adv_speed |=
1329 (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
1330 ECORE_LINK_PARTNER_SPEED_20G : 0;
1331 p_link->partner_adv_speed |=
1332 (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
1333 ECORE_LINK_PARTNER_SPEED_25G : 0;
1334 p_link->partner_adv_speed |=
1335 (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
1336 ECORE_LINK_PARTNER_SPEED_40G : 0;
1337 p_link->partner_adv_speed |=
1338 (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
1339 ECORE_LINK_PARTNER_SPEED_50G : 0;
1340 p_link->partner_adv_speed |=
1341 (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
1342 ECORE_LINK_PARTNER_SPEED_100G : 0;
1344 p_link->partner_tx_flow_ctrl_en =
1345 !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
1346 p_link->partner_rx_flow_ctrl_en =
1347 !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
1349 switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
1350 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
1351 p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE;
1353 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
1354 p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE;
1356 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
1357 p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE;
1360 p_link->partner_adv_pause = 0;
1363 p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
1365 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
1366 ecore_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
1368 OSAL_LINK_UPDATE(p_hwfn, p_ptt);
1370 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->link_lock);
1373 enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
1374 struct ecore_ptt *p_ptt, bool b_up)
1376 struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
1377 struct ecore_mcp_mb_params mb_params;
1378 struct eth_phy_cfg phy_cfg;
1379 enum _ecore_status_t rc = ECORE_SUCCESS;
1383 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
1384 return ECORE_SUCCESS;
1387 /* Set the shmem configuration according to params */
1388 OSAL_MEM_ZERO(&phy_cfg, sizeof(phy_cfg));
1389 cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
1390 if (!params->speed.autoneg)
1391 phy_cfg.speed = params->speed.forced_speed;
1392 phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
1393 phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
1394 phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1395 phy_cfg.adv_speed = params->speed.advertised_speeds;
1396 phy_cfg.loopback_mode = params->loopback_mode;
1398 /* There are MFWs that share this capability regardless of whether
1399 * this is feasible or not. And given that at the very least adv_caps
1400 * would be set internally by ecore, we want to make sure LFA would
1403 if ((p_hwfn->mcp_info->capabilities &
1404 FW_MB_PARAM_FEATURE_SUPPORT_EEE) &&
1405 params->eee.enable) {
1406 phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
1407 if (params->eee.tx_lpi_enable)
1408 phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
1409 if (params->eee.adv_caps & ECORE_EEE_1G_ADV)
1410 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G;
1411 if (params->eee.adv_caps & ECORE_EEE_10G_ADV)
1412 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G;
1413 phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer <<
1414 EEE_TX_TIMER_USEC_OFFSET) &
1415 EEE_TX_TIMER_USEC_MASK;
1418 p_hwfn->b_drv_link_init = b_up;
1421 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1422 "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x\n",
1423 phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed,
1424 phy_cfg.loopback_mode);
1426 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
1428 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1429 mb_params.cmd = cmd;
1430 mb_params.p_data_src = &phy_cfg;
1431 mb_params.data_src_size = sizeof(phy_cfg);
1432 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1434 /* if mcp fails to respond we must abort */
1435 if (rc != ECORE_SUCCESS) {
1436 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1440 /* Mimic link-change attention, done for several reasons:
1441 * - On reset, there's no guarantee MFW would trigger
1443 * - On initialization, older MFWs might not indicate link change
1444 * during LFA, so we'll never get an UP indication.
1446 ecore_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
1451 u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
1452 struct ecore_ptt *p_ptt)
1454 u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
1456 /* TODO - Add support for VFs */
1457 if (IS_VF(p_hwfn->p_dev))
1460 path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1462 path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr);
1463 path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn));
1465 proc_kill_cnt = ecore_rd(p_hwfn, p_ptt,
1467 OFFSETOF(struct public_path, process_kill)) &
1468 PROCESS_KILL_COUNTER_MASK;
1470 return proc_kill_cnt;
1473 static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn,
1474 struct ecore_ptt *p_ptt)
1476 struct ecore_dev *p_dev = p_hwfn->p_dev;
1479 /* Prevent possible attentions/interrupts during the recovery handling
1480 * and till its load phase, during which they will be re-enabled.
1482 ecore_int_igu_disable_int(p_hwfn, p_ptt);
1484 DP_NOTICE(p_hwfn, false, "Received a process kill indication\n");
1486 /* The following operations should be done once, and thus in CMT mode
1487 * are carried out by only the first HW function.
1489 if (p_hwfn != ECORE_LEADING_HWFN(p_dev))
1492 if (p_dev->recov_in_prog) {
1493 DP_NOTICE(p_hwfn, false,
1494 "Ignoring the indication since a recovery"
1495 " process is already in progress\n");
1499 p_dev->recov_in_prog = true;
1501 proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt);
1502 DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt);
1504 OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn);
1507 static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
1508 struct ecore_ptt *p_ptt,
1509 enum MFW_DRV_MSG_TYPE type)
1511 enum ecore_mcp_protocol_type stats_type;
1512 union ecore_mcp_protocol_stats stats;
1513 struct ecore_mcp_mb_params mb_params;
1515 enum _ecore_status_t rc;
1518 case MFW_DRV_MSG_GET_LAN_STATS:
1519 stats_type = ECORE_MCP_LAN_STATS;
1520 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1523 DP_INFO(p_hwfn, "Invalid protocol type %d\n", type);
1527 OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats);
1529 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1530 mb_params.cmd = DRV_MSG_CODE_GET_STATS;
1531 mb_params.param = hsi_param;
1532 mb_params.p_data_src = &stats;
1533 mb_params.data_src_size = sizeof(stats);
1534 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1535 if (rc != ECORE_SUCCESS)
1536 DP_ERR(p_hwfn, "Failed to send protocol stats, rc = %d\n", rc);
1539 static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
1540 struct public_func *p_shmem_info)
1542 struct ecore_mcp_function_info *p_info;
1544 p_info = &p_hwfn->mcp_info->func_info;
1546 /* TODO - bandwidth min/max should have valid values of 1-100,
1547 * as well as some indication that the feature is disabled.
1548 * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS
1549 * limit and correct value to min `1' and max `100' if limit isn't in
1552 p_info->bandwidth_min = (p_shmem_info->config &
1553 FUNC_MF_CFG_MIN_BW_MASK) >>
1554 FUNC_MF_CFG_MIN_BW_OFFSET;
1555 if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1557 "bandwidth minimum out of bounds [%02x]. Set to 1\n",
1558 p_info->bandwidth_min);
1559 p_info->bandwidth_min = 1;
1562 p_info->bandwidth_max = (p_shmem_info->config &
1563 FUNC_MF_CFG_MAX_BW_MASK) >>
1564 FUNC_MF_CFG_MAX_BW_OFFSET;
1565 if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1567 "bandwidth maximum out of bounds [%02x]. Set to 100\n",
1568 p_info->bandwidth_max);
1569 p_info->bandwidth_max = 100;
1573 static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
1574 struct ecore_ptt *p_ptt,
1575 struct public_func *p_data,
1578 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1580 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1581 u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1584 OSAL_MEM_ZERO(p_data, sizeof(*p_data));
1586 size = OSAL_MIN_T(u32, sizeof(*p_data),
1587 SECTION_SIZE(mfw_path_offsize));
1588 for (i = 0; i < size / sizeof(u32); i++)
1589 ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
1590 func_addr + (i << 2));
1596 ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1598 struct ecore_mcp_function_info *p_info;
1599 struct public_func shmem_info;
1600 u32 resp = 0, param = 0;
1602 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1604 ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1606 p_info = &p_hwfn->mcp_info->func_info;
1608 ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min);
1610 ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max);
1612 /* Acknowledge the MFW */
1613 ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1617 static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn,
1618 struct ecore_ptt *p_ptt)
1620 /* A single notification should be sent to upper driver in CMT mode */
1621 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1624 DP_NOTICE(p_hwfn, false,
1625 "Fan failure was detected on the network interface card"
1626 " and it's going to be shut down.\n");
1628 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
1631 struct ecore_mdump_cmd_params {
1640 static enum _ecore_status_t
1641 ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1642 struct ecore_mdump_cmd_params *p_mdump_cmd_params)
1644 struct ecore_mcp_mb_params mb_params;
1645 enum _ecore_status_t rc;
1647 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1648 mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
1649 mb_params.param = p_mdump_cmd_params->cmd;
1650 mb_params.p_data_src = p_mdump_cmd_params->p_data_src;
1651 mb_params.data_src_size = p_mdump_cmd_params->data_src_size;
1652 mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst;
1653 mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size;
1654 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1655 if (rc != ECORE_SUCCESS)
1658 p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
1660 if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
1662 "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n",
1663 p_mdump_cmd_params->cmd);
1665 } else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
1667 "The mdump command is not supported by the MFW\n");
1674 static enum _ecore_status_t ecore_mcp_mdump_ack(struct ecore_hwfn *p_hwfn,
1675 struct ecore_ptt *p_ptt)
1677 struct ecore_mdump_cmd_params mdump_cmd_params;
1679 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1680 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK;
1682 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1685 enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn,
1686 struct ecore_ptt *p_ptt,
1689 struct ecore_mdump_cmd_params mdump_cmd_params;
1691 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1692 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_SET_VALUES;
1693 mdump_cmd_params.p_data_src = &epoch;
1694 mdump_cmd_params.data_src_size = sizeof(epoch);
1696 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1699 enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
1700 struct ecore_ptt *p_ptt)
1702 struct ecore_mdump_cmd_params mdump_cmd_params;
1704 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1705 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_TRIGGER;
1707 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1710 static enum _ecore_status_t
1711 ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1712 struct mdump_config_stc *p_mdump_config)
1714 struct ecore_mdump_cmd_params mdump_cmd_params;
1715 enum _ecore_status_t rc;
1717 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1718 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_CONFIG;
1719 mdump_cmd_params.p_data_dst = p_mdump_config;
1720 mdump_cmd_params.data_dst_size = sizeof(*p_mdump_config);
1722 rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1723 if (rc != ECORE_SUCCESS)
1726 if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1728 "Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n",
1729 mdump_cmd_params.mcp_resp);
1730 rc = ECORE_UNKNOWN_ERROR;
1736 enum _ecore_status_t
1737 ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1738 struct ecore_mdump_info *p_mdump_info)
1740 u32 addr, global_offsize, global_addr;
1741 struct mdump_config_stc mdump_config;
1742 enum _ecore_status_t rc;
1744 OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info));
1746 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1748 global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1749 global_addr = SECTION_ADDR(global_offsize, 0);
1750 p_mdump_info->reason = ecore_rd(p_hwfn, p_ptt,
1752 OFFSETOF(struct public_global,
1755 if (p_mdump_info->reason) {
1756 rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config);
1757 if (rc != ECORE_SUCCESS)
1760 p_mdump_info->version = mdump_config.version;
1761 p_mdump_info->config = mdump_config.config;
1762 p_mdump_info->epoch = mdump_config.epoc;
1763 p_mdump_info->num_of_logs = mdump_config.num_of_logs;
1764 p_mdump_info->valid_logs = mdump_config.valid_logs;
1766 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1767 "MFW mdump info: reason %d, version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n",
1768 p_mdump_info->reason, p_mdump_info->version,
1769 p_mdump_info->config, p_mdump_info->epoch,
1770 p_mdump_info->num_of_logs, p_mdump_info->valid_logs);
1772 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1773 "MFW mdump info: reason %d\n", p_mdump_info->reason);
1776 return ECORE_SUCCESS;
1779 enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
1780 struct ecore_ptt *p_ptt)
1782 struct ecore_mdump_cmd_params mdump_cmd_params;
1784 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1785 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLEAR_LOGS;
1787 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1790 enum _ecore_status_t
1791 ecore_mcp_mdump_get_retain(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1792 struct ecore_mdump_retain_data *p_mdump_retain)
1794 struct ecore_mdump_cmd_params mdump_cmd_params;
1795 struct mdump_retain_data_stc mfw_mdump_retain;
1796 enum _ecore_status_t rc;
1798 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1799 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN;
1800 mdump_cmd_params.p_data_dst = &mfw_mdump_retain;
1801 mdump_cmd_params.data_dst_size = sizeof(mfw_mdump_retain);
1803 rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1804 if (rc != ECORE_SUCCESS)
1807 if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1809 "Failed to get the mdump retained data [mcp_resp 0x%x]\n",
1810 mdump_cmd_params.mcp_resp);
1811 return ECORE_UNKNOWN_ERROR;
1814 p_mdump_retain->valid = mfw_mdump_retain.valid;
1815 p_mdump_retain->epoch = mfw_mdump_retain.epoch;
1816 p_mdump_retain->pf = mfw_mdump_retain.pf;
1817 p_mdump_retain->status = mfw_mdump_retain.status;
1819 return ECORE_SUCCESS;
1822 enum _ecore_status_t ecore_mcp_mdump_clr_retain(struct ecore_hwfn *p_hwfn,
1823 struct ecore_ptt *p_ptt)
1825 struct ecore_mdump_cmd_params mdump_cmd_params;
1827 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1828 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLR_RETAIN;
1830 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1833 static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
1834 struct ecore_ptt *p_ptt)
1836 struct ecore_mdump_retain_data mdump_retain;
1837 enum _ecore_status_t rc;
1839 /* In CMT mode - no need for more than a single acknowledgment to the
1840 * MFW, and no more than a single notification to the upper driver.
1842 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1845 rc = ecore_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain);
1846 if (rc == ECORE_SUCCESS && mdump_retain.valid) {
1847 DP_NOTICE(p_hwfn, false,
1848 "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n",
1849 mdump_retain.epoch, mdump_retain.pf,
1850 mdump_retain.status);
1852 DP_NOTICE(p_hwfn, false,
1853 "The MFW notified that a critical error occurred in the device\n");
1856 if (p_hwfn->p_dev->allow_mdump) {
1857 DP_NOTICE(p_hwfn, false,
1858 "Not acknowledging the notification to allow the MFW crash dump\n");
1862 DP_NOTICE(p_hwfn, false,
1863 "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n");
1864 ecore_mcp_mdump_ack(p_hwfn, p_ptt);
1865 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
1868 enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
1869 struct ecore_ptt *p_ptt)
1871 struct ecore_mcp_info *info = p_hwfn->mcp_info;
1872 enum _ecore_status_t rc = ECORE_SUCCESS;
1876 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n");
1878 /* Read Messages from MFW */
1879 ecore_mcp_read_mb(p_hwfn, p_ptt);
1881 /* Compare current messages to old ones */
1882 for (i = 0; i < info->mfw_mb_length; i++) {
1883 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
1888 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1889 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
1890 i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
1893 case MFW_DRV_MSG_LINK_CHANGE:
1894 ecore_mcp_handle_link_change(p_hwfn, p_ptt, false);
1896 case MFW_DRV_MSG_VF_DISABLED:
1897 ecore_mcp_handle_vf_flr(p_hwfn, p_ptt);
1899 case MFW_DRV_MSG_LLDP_DATA_UPDATED:
1900 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1901 ECORE_DCBX_REMOTE_LLDP_MIB);
1903 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
1904 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1905 ECORE_DCBX_REMOTE_MIB);
1907 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
1908 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1909 ECORE_DCBX_OPERATIONAL_MIB);
1911 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
1912 ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
1914 case MFW_DRV_MSG_ERROR_RECOVERY:
1915 ecore_mcp_handle_process_kill(p_hwfn, p_ptt);
1917 case MFW_DRV_MSG_GET_LAN_STATS:
1918 case MFW_DRV_MSG_GET_FCOE_STATS:
1919 case MFW_DRV_MSG_GET_ISCSI_STATS:
1920 case MFW_DRV_MSG_GET_RDMA_STATS:
1921 ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
1923 case MFW_DRV_MSG_BW_UPDATE:
1924 ecore_mcp_update_bw(p_hwfn, p_ptt);
1926 case MFW_DRV_MSG_FAILURE_DETECTED:
1927 ecore_mcp_handle_fan_failure(p_hwfn, p_ptt);
1929 case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
1930 ecore_mcp_handle_critical_error(p_hwfn, p_ptt);
1933 DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
1938 /* ACK everything */
1939 for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
1940 OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]);
1942 /* MFW expect answer in BE, so we force write in that format */
1943 ecore_wr(p_hwfn, p_ptt,
1944 info->mfw_mb_addr + sizeof(u32) +
1945 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
1946 sizeof(u32) + i * sizeof(u32), val);
1950 DP_NOTICE(p_hwfn, false,
1951 "Received an MFW message indication but no"
1956 /* Copy the new mfw messages into the shadow */
1957 OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
1962 enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
1963 struct ecore_ptt *p_ptt,
1965 u32 *p_running_bundle_id)
1970 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1971 DP_NOTICE(p_hwfn, false, "Emulation - can't get MFW version\n");
1972 return ECORE_SUCCESS;
1976 if (IS_VF(p_hwfn->p_dev)) {
1977 if (p_hwfn->vf_iov_info) {
1978 struct pfvf_acquire_resp_tlv *p_resp;
1980 p_resp = &p_hwfn->vf_iov_info->acquire_resp;
1981 *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
1982 return ECORE_SUCCESS;
1984 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1985 "VF requested MFW version prior to ACQUIRE\n");
1990 global_offsize = ecore_rd(p_hwfn, p_ptt,
1991 SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
1995 ecore_rd(p_hwfn, p_ptt,
1996 SECTION_ADDR(global_offsize,
1997 0) + OFFSETOF(struct public_global, mfw_ver));
1999 if (p_running_bundle_id != OSAL_NULL) {
2000 *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt,
2001 SECTION_ADDR(global_offsize,
2003 OFFSETOF(struct public_global,
2004 running_bundle_id));
2007 return ECORE_SUCCESS;
2010 enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,
2011 struct ecore_ptt *p_ptt,
2015 /* TODO - Add support for VFs */
2016 if (IS_VF(p_hwfn->p_dev))
2019 if (!ecore_mcp_is_init(p_hwfn)) {
2020 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
2025 *p_media_type = MEDIA_UNSPECIFIED;
2028 *p_media_type = ecore_rd(p_hwfn, p_ptt,
2029 p_hwfn->mcp_info->port_addr +
2030 OFFSETOF(struct public_port,
2034 return ECORE_SUCCESS;
2038 /* Old MFW has a global configuration for all PFs regarding RDMA support */
2040 ecore_mcp_get_shmem_proto_legacy(struct ecore_hwfn *p_hwfn,
2041 enum ecore_pci_personality *p_proto)
2043 *p_proto = ECORE_PCI_ETH;
2045 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2046 "According to Legacy capabilities, L2 personality is %08x\n",
2051 static enum _ecore_status_t
2052 ecore_mcp_get_shmem_proto_mfw(struct ecore_hwfn *p_hwfn,
2053 struct ecore_ptt *p_ptt,
2054 enum ecore_pci_personality *p_proto)
2056 u32 resp = 0, param = 0;
2057 enum _ecore_status_t rc;
2059 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2060 "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
2061 (u32)*p_proto, resp, param);
2062 return ECORE_SUCCESS;
2065 static enum _ecore_status_t
2066 ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn,
2067 struct public_func *p_info,
2068 struct ecore_ptt *p_ptt,
2069 enum ecore_pci_personality *p_proto)
2071 enum _ecore_status_t rc = ECORE_SUCCESS;
2073 switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
2074 case FUNC_MF_CFG_PROTOCOL_ETHERNET:
2075 if (ecore_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto) !=
2077 ecore_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
2086 enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
2087 struct ecore_ptt *p_ptt)
2089 struct ecore_mcp_function_info *info;
2090 struct public_func shmem_info;
2092 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
2093 info = &p_hwfn->mcp_info->func_info;
2095 info->pause_on_host = (shmem_info.config &
2096 FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
2098 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2100 DP_ERR(p_hwfn, "Unknown personality %08x\n",
2101 (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
2105 ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
2107 if (shmem_info.mac_upper || shmem_info.mac_lower) {
2108 info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
2109 info->mac[1] = (u8)(shmem_info.mac_upper);
2110 info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
2111 info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
2112 info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
2113 info->mac[5] = (u8)(shmem_info.mac_lower);
2115 /* TODO - are there protocols for which there's no MAC? */
2116 DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n");
2119 /* TODO - are these calculations true for BE machine? */
2120 info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
2121 (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
2122 info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
2123 (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
2125 info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
2127 info->mtu = (u16)shmem_info.mtu_size;
2132 info->mtu = (u16)shmem_info.mtu_size;
2134 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP),
2135 "Read configuration from shmem: pause_on_host %02x"
2136 " protocol %02x BW [%02x - %02x]"
2137 " MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %lx"
2138 " node %lx ovlan %04x\n",
2139 info->pause_on_host, info->protocol,
2140 info->bandwidth_min, info->bandwidth_max,
2141 info->mac[0], info->mac[1], info->mac[2],
2142 info->mac[3], info->mac[4], info->mac[5],
2143 (unsigned long)info->wwn_port,
2144 (unsigned long)info->wwn_node, info->ovlan);
2146 return ECORE_SUCCESS;
2149 struct ecore_mcp_link_params
2150 *ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn)
2152 if (!p_hwfn || !p_hwfn->mcp_info)
2154 return &p_hwfn->mcp_info->link_input;
2157 struct ecore_mcp_link_state
2158 *ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn)
2160 if (!p_hwfn || !p_hwfn->mcp_info)
2164 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
2165 DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n");
2166 p_hwfn->mcp_info->link_output.link_up = true;
2170 return &p_hwfn->mcp_info->link_output;
2173 struct ecore_mcp_link_capabilities
2174 *ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn)
2176 if (!p_hwfn || !p_hwfn->mcp_info)
2178 return &p_hwfn->mcp_info->link_capabilities;
2181 enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
2182 struct ecore_ptt *p_ptt)
2184 u32 resp = 0, param = 0;
2185 enum _ecore_status_t rc;
2187 rc = ecore_mcp_cmd(p_hwfn, p_ptt,
2188 DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, ¶m);
2190 /* Wait for the drain to complete before returning */
2196 const struct ecore_mcp_function_info
2197 *ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn)
2199 if (!p_hwfn || !p_hwfn->mcp_info)
2201 return &p_hwfn->mcp_info->func_info;
2204 int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
2205 struct ecore_ptt *p_ptt, u32 personalities)
2207 enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT;
2208 struct public_func shmem_info;
2209 int i, count = 0, num_pfs;
2211 num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
2213 for (i = 0; i < num_pfs; i++) {
2214 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
2215 MCP_PF_ID_BY_REL(p_hwfn, i));
2216 if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
2219 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2224 if ((1 << ((u32)protocol)) & personalities)
2231 enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
2232 struct ecore_ptt *p_ptt,
2238 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
2239 DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n");
2244 if (IS_VF(p_hwfn->p_dev))
2247 flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
2248 flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
2249 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
2250 flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_OFFSET));
2252 *p_flash_size = flash_size;
2254 return ECORE_SUCCESS;
2257 enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
2258 struct ecore_ptt *p_ptt)
2260 struct ecore_dev *p_dev = p_hwfn->p_dev;
2262 if (p_dev->recov_in_prog) {
2263 DP_NOTICE(p_hwfn, false,
2264 "Avoid triggering a recovery since such a process"
2265 " is already in progress\n");
2269 DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n");
2270 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
2272 return ECORE_SUCCESS;
2275 static enum _ecore_status_t
2276 ecore_mcp_config_vf_msix_bb(struct ecore_hwfn *p_hwfn,
2277 struct ecore_ptt *p_ptt,
2280 u32 resp = 0, param = 0, rc_param = 0;
2281 enum _ecore_status_t rc;
2283 /* Only Leader can configure MSIX, and need to take CMT into account */
2285 if (!IS_LEAD_HWFN(p_hwfn))
2286 return ECORE_SUCCESS;
2287 num *= p_hwfn->p_dev->num_hwfns;
2289 param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_OFFSET) &
2290 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
2291 param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_OFFSET) &
2292 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
2294 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
2297 if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
2298 DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n",
2302 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2303 "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
2310 static enum _ecore_status_t
2311 ecore_mcp_config_vf_msix_ah(struct ecore_hwfn *p_hwfn,
2312 struct ecore_ptt *p_ptt,
2315 u32 resp = 0, param = num, rc_param = 0;
2316 enum _ecore_status_t rc;
2318 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX,
2319 param, &resp, &rc_param);
2321 if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) {
2322 DP_NOTICE(p_hwfn, true, "MFW failed to set MSI-X for VFs\n");
2325 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2326 "Requested 0x%02x MSI-x interrupts for VFs\n",
2333 enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
2334 struct ecore_ptt *p_ptt,
2337 if (ECORE_IS_BB(p_hwfn->p_dev))
2338 return ecore_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
2340 return ecore_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num);
2343 enum _ecore_status_t
2344 ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2345 struct ecore_mcp_drv_version *p_ver)
2347 struct ecore_mcp_mb_params mb_params;
2348 struct drv_version_stc drv_version;
2352 enum _ecore_status_t rc;
2355 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
2356 return ECORE_SUCCESS;
2359 OSAL_MEM_ZERO(&drv_version, sizeof(drv_version));
2360 drv_version.version = p_ver->version;
2361 num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4;
2362 for (i = 0; i < num_words; i++) {
2363 /* The driver name is expected to be in a big-endian format */
2364 p_name = &p_ver->name[i * sizeof(u32)];
2365 val = OSAL_CPU_TO_BE32(*(u32 *)p_name);
2366 *(u32 *)&drv_version.name[i * sizeof(u32)] = val;
2369 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2370 mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
2371 mb_params.p_data_src = &drv_version;
2372 mb_params.data_src_size = sizeof(drv_version);
2373 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2374 if (rc != ECORE_SUCCESS)
2375 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2380 /* A maximal 100 msec waiting time for the MCP to halt */
2381 #define ECORE_MCP_HALT_SLEEP_MS 10
2382 #define ECORE_MCP_HALT_MAX_RETRIES 10
2384 enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
2385 struct ecore_ptt *p_ptt)
2387 u32 resp = 0, param = 0, cpu_state, cnt = 0;
2388 enum _ecore_status_t rc;
2390 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2392 if (rc != ECORE_SUCCESS) {
2393 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2398 OSAL_MSLEEP(ECORE_MCP_HALT_SLEEP_MS);
2399 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2400 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
2402 } while (++cnt < ECORE_MCP_HALT_MAX_RETRIES);
2404 if (cnt == ECORE_MCP_HALT_MAX_RETRIES) {
2405 DP_NOTICE(p_hwfn, false,
2406 "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2407 ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
2411 ecore_mcp_cmd_set_blocking(p_hwfn, true);
2413 return ECORE_SUCCESS;
2416 #define ECORE_MCP_RESUME_SLEEP_MS 10
2418 enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
2419 struct ecore_ptt *p_ptt)
2421 u32 cpu_mode, cpu_state;
2423 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
2425 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2426 cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2427 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
2429 OSAL_MSLEEP(ECORE_MCP_RESUME_SLEEP_MS);
2430 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2432 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
2433 DP_NOTICE(p_hwfn, false,
2434 "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2435 cpu_mode, cpu_state);
2439 ecore_mcp_cmd_set_blocking(p_hwfn, false);
2441 return ECORE_SUCCESS;
2444 enum _ecore_status_t
2445 ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
2446 struct ecore_ptt *p_ptt,
2447 enum ecore_ov_client client)
2449 enum _ecore_status_t rc;
2450 u32 resp = 0, param = 0;
2454 case ECORE_OV_CLIENT_DRV:
2455 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
2457 case ECORE_OV_CLIENT_USER:
2458 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
2460 case ECORE_OV_CLIENT_VENDOR_SPEC:
2461 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
2464 DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", client);
2468 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
2469 drv_mb_param, &resp, ¶m);
2470 if (rc != ECORE_SUCCESS)
2471 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2476 enum _ecore_status_t
2477 ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
2478 struct ecore_ptt *p_ptt,
2479 enum ecore_ov_driver_state drv_state)
2481 enum _ecore_status_t rc;
2482 u32 resp = 0, param = 0;
2485 switch (drv_state) {
2486 case ECORE_OV_DRIVER_STATE_NOT_LOADED:
2487 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
2489 case ECORE_OV_DRIVER_STATE_DISABLED:
2490 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
2492 case ECORE_OV_DRIVER_STATE_ACTIVE:
2493 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
2496 DP_NOTICE(p_hwfn, true, "Invalid driver state %d\n", drv_state);
2500 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
2501 drv_mb_param, &resp, ¶m);
2502 if (rc != ECORE_SUCCESS)
2503 DP_ERR(p_hwfn, "Failed to send driver state\n");
2508 enum _ecore_status_t
2509 ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2510 struct ecore_fc_npiv_tbl *p_table)
2515 enum _ecore_status_t
2516 ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn,
2517 struct ecore_ptt *p_ptt, u16 mtu)
2522 enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
2523 struct ecore_ptt *p_ptt,
2524 enum ecore_led_mode mode)
2526 u32 resp = 0, param = 0, drv_mb_param;
2527 enum _ecore_status_t rc;
2530 case ECORE_LED_MODE_ON:
2531 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
2533 case ECORE_LED_MODE_OFF:
2534 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
2536 case ECORE_LED_MODE_RESTORE:
2537 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
2540 DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode);
2544 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
2545 drv_mb_param, &resp, ¶m);
2546 if (rc != ECORE_SUCCESS)
2547 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2552 enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
2553 struct ecore_ptt *p_ptt,
2556 u32 resp = 0, param = 0;
2557 enum _ecore_status_t rc;
2559 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
2560 mask_parities, &resp, ¶m);
2562 if (rc != ECORE_SUCCESS) {
2564 "MCP response failure for mask parities, aborting\n");
2565 } else if (resp != FW_MSG_CODE_OK) {
2567 "MCP did not ack mask parity request. Old MFW?\n");
2574 enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
2577 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2578 u32 bytes_left, offset, bytes_to_copy, buf_size;
2579 u32 nvm_offset, resp, param;
2580 struct ecore_ptt *p_ptt;
2581 enum _ecore_status_t rc = ECORE_SUCCESS;
2583 p_ptt = ecore_ptt_acquire(p_hwfn);
2589 while (bytes_left > 0) {
2590 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
2591 MCP_DRV_NVM_BUF_LEN);
2592 nvm_offset = (addr + offset) | (bytes_to_copy <<
2593 DRV_MB_PARAM_NVM_LEN_OFFSET);
2594 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
2595 DRV_MSG_CODE_NVM_READ_NVRAM,
2596 nvm_offset, &resp, ¶m, &buf_size,
2597 (u32 *)(p_buf + offset));
2598 if (rc != ECORE_SUCCESS) {
2599 DP_NOTICE(p_dev, false,
2600 "ecore_mcp_nvm_rd_cmd() failed, rc = %d\n",
2602 resp = FW_MSG_CODE_ERROR;
2606 if (resp != FW_MSG_CODE_NVM_OK) {
2607 DP_NOTICE(p_dev, false,
2608 "nvm read failed, resp = 0x%08x\n", resp);
2609 rc = ECORE_UNKNOWN_ERROR;
2613 /* This can be a lengthy process, and it's possible scheduler
2614 * isn't preemptible. Sleep a bit to prevent CPU hogging.
2616 if (bytes_left % 0x1000 <
2617 (bytes_left - buf_size) % 0x1000)
2621 bytes_left -= buf_size;
2624 p_dev->mcp_nvm_resp = resp;
2625 ecore_ptt_release(p_hwfn, p_ptt);
2630 enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
2631 u32 addr, u8 *p_buf, u32 len)
2633 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2634 struct ecore_ptt *p_ptt;
2636 enum _ecore_status_t rc;
2638 p_ptt = ecore_ptt_acquire(p_hwfn);
2642 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
2643 (cmd == ECORE_PHY_CORE_READ) ?
2644 DRV_MSG_CODE_PHY_CORE_READ :
2645 DRV_MSG_CODE_PHY_RAW_READ,
2646 addr, &resp, ¶m, &len, (u32 *)p_buf);
2647 if (rc != ECORE_SUCCESS)
2648 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2650 p_dev->mcp_nvm_resp = resp;
2651 ecore_ptt_release(p_hwfn, p_ptt);
2656 enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
2658 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2659 struct ecore_ptt *p_ptt;
2661 p_ptt = ecore_ptt_acquire(p_hwfn);
2665 OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp));
2666 ecore_ptt_release(p_hwfn, p_ptt);
2668 return ECORE_SUCCESS;
2671 enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr)
2673 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2674 struct ecore_ptt *p_ptt;
2676 enum _ecore_status_t rc;
2678 p_ptt = ecore_ptt_acquire(p_hwfn);
2681 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_DEL_FILE, addr,
2683 p_dev->mcp_nvm_resp = resp;
2684 ecore_ptt_release(p_hwfn, p_ptt);
2689 enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
2692 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2693 struct ecore_ptt *p_ptt;
2695 enum _ecore_status_t rc;
2697 p_ptt = ecore_ptt_acquire(p_hwfn);
2700 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, addr,
2702 p_dev->mcp_nvm_resp = resp;
2703 ecore_ptt_release(p_hwfn, p_ptt);
2708 /* rc receives ECORE_INVAL as default parameter because
2709 * it might not enter the while loop if the len is 0
2711 enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
2712 u32 addr, u8 *p_buf, u32 len)
2714 u32 buf_idx, buf_size, nvm_cmd, nvm_offset, resp, param;
2715 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2716 enum _ecore_status_t rc = ECORE_INVAL;
2717 struct ecore_ptt *p_ptt;
2719 p_ptt = ecore_ptt_acquire(p_hwfn);
2724 case ECORE_PUT_FILE_DATA:
2725 nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
2727 case ECORE_NVM_WRITE_NVRAM:
2728 nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
2730 case ECORE_EXT_PHY_FW_UPGRADE:
2731 nvm_cmd = DRV_MSG_CODE_EXT_PHY_FW_UPGRADE;
2734 DP_NOTICE(p_hwfn, true, "Invalid nvm write command 0x%x\n",
2741 while (buf_idx < len) {
2742 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
2743 MCP_DRV_NVM_BUF_LEN);
2744 nvm_offset = ((buf_size << DRV_MB_PARAM_NVM_LEN_OFFSET) |
2747 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset,
2748 &resp, ¶m, buf_size,
2749 (u32 *)&p_buf[buf_idx]);
2750 if (rc != ECORE_SUCCESS) {
2751 DP_NOTICE(p_dev, false,
2752 "ecore_mcp_nvm_write() failed, rc = %d\n",
2754 resp = FW_MSG_CODE_ERROR;
2758 if (resp != FW_MSG_CODE_OK &&
2759 resp != FW_MSG_CODE_NVM_OK &&
2760 resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK) {
2761 DP_NOTICE(p_dev, false,
2762 "nvm write failed, resp = 0x%08x\n", resp);
2763 rc = ECORE_UNKNOWN_ERROR;
2767 /* This can be a lengthy process, and it's possible scheduler
2768 * isn't preemptible. Sleep a bit to prevent CPU hogging.
2770 if (buf_idx % 0x1000 >
2771 (buf_idx + buf_size) % 0x1000)
2774 buf_idx += buf_size;
2777 p_dev->mcp_nvm_resp = resp;
2779 ecore_ptt_release(p_hwfn, p_ptt);
2784 enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
2785 u32 addr, u8 *p_buf, u32 len)
2787 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2788 struct ecore_ptt *p_ptt;
2789 u32 resp, param, nvm_cmd;
2790 enum _ecore_status_t rc;
2792 p_ptt = ecore_ptt_acquire(p_hwfn);
2796 nvm_cmd = (cmd == ECORE_PHY_CORE_WRITE) ? DRV_MSG_CODE_PHY_CORE_WRITE :
2797 DRV_MSG_CODE_PHY_RAW_WRITE;
2798 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, addr,
2799 &resp, ¶m, len, (u32 *)p_buf);
2800 if (rc != ECORE_SUCCESS)
2801 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2802 p_dev->mcp_nvm_resp = resp;
2803 ecore_ptt_release(p_hwfn, p_ptt);
2808 enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
2811 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2812 struct ecore_ptt *p_ptt;
2814 enum _ecore_status_t rc;
2816 p_ptt = ecore_ptt_acquire(p_hwfn);
2820 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_SECURE_MODE, addr,
2822 p_dev->mcp_nvm_resp = resp;
2823 ecore_ptt_release(p_hwfn, p_ptt);
2828 enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
2829 struct ecore_ptt *p_ptt,
2830 u32 port, u32 addr, u32 offset,
2833 u32 bytes_left, bytes_to_copy, buf_size, nvm_offset;
2835 enum _ecore_status_t rc;
2837 nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) |
2838 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET);
2842 while (bytes_left > 0) {
2843 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
2844 MAX_I2C_TRANSACTION_SIZE);
2845 nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
2846 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
2847 nvm_offset |= ((addr + offset) <<
2848 DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET);
2849 nvm_offset |= (bytes_to_copy <<
2850 DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET);
2851 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
2852 DRV_MSG_CODE_TRANSCEIVER_READ,
2853 nvm_offset, &resp, ¶m, &buf_size,
2854 (u32 *)(p_buf + offset));
2855 if ((resp & FW_MSG_CODE_MASK) ==
2856 FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
2858 } else if ((resp & FW_MSG_CODE_MASK) !=
2859 FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
2860 return ECORE_UNKNOWN_ERROR;
2863 bytes_left -= buf_size;
2866 return ECORE_SUCCESS;
2869 enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
2870 struct ecore_ptt *p_ptt,
2871 u32 port, u32 addr, u32 offset,
2874 u32 buf_idx, buf_size, nvm_offset, resp, param;
2875 enum _ecore_status_t rc;
2877 nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) |
2878 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET);
2880 while (buf_idx < len) {
2881 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
2882 MAX_I2C_TRANSACTION_SIZE);
2883 nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
2884 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
2885 nvm_offset |= ((offset + buf_idx) <<
2886 DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET);
2887 nvm_offset |= (buf_size <<
2888 DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET);
2889 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt,
2890 DRV_MSG_CODE_TRANSCEIVER_WRITE,
2891 nvm_offset, &resp, ¶m, buf_size,
2892 (u32 *)&p_buf[buf_idx]);
2893 if ((resp & FW_MSG_CODE_MASK) ==
2894 FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
2896 } else if ((resp & FW_MSG_CODE_MASK) !=
2897 FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
2898 return ECORE_UNKNOWN_ERROR;
2900 buf_idx += buf_size;
2903 return ECORE_SUCCESS;
2906 enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
2907 struct ecore_ptt *p_ptt,
2908 u16 gpio, u32 *gpio_val)
2910 enum _ecore_status_t rc = ECORE_SUCCESS;
2911 u32 drv_mb_param = 0, rsp;
2913 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET);
2915 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ,
2916 drv_mb_param, &rsp, gpio_val);
2918 if (rc != ECORE_SUCCESS)
2921 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
2922 return ECORE_UNKNOWN_ERROR;
2924 return ECORE_SUCCESS;
2927 enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
2928 struct ecore_ptt *p_ptt,
2929 u16 gpio, u16 gpio_val)
2931 enum _ecore_status_t rc = ECORE_SUCCESS;
2932 u32 drv_mb_param = 0, param, rsp;
2934 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET) |
2935 (gpio_val << DRV_MB_PARAM_GPIO_VALUE_OFFSET);
2937 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE,
2938 drv_mb_param, &rsp, ¶m);
2940 if (rc != ECORE_SUCCESS)
2943 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
2944 return ECORE_UNKNOWN_ERROR;
2946 return ECORE_SUCCESS;
2949 enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn,
2950 struct ecore_ptt *p_ptt,
2951 u16 gpio, u32 *gpio_direction,
2954 u32 drv_mb_param = 0, rsp, val = 0;
2955 enum _ecore_status_t rc = ECORE_SUCCESS;
2957 drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET;
2959 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO,
2960 drv_mb_param, &rsp, &val);
2961 if (rc != ECORE_SUCCESS)
2964 *gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >>
2965 DRV_MB_PARAM_GPIO_DIRECTION_OFFSET;
2966 *gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >>
2967 DRV_MB_PARAM_GPIO_CTRL_OFFSET;
2969 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
2970 return ECORE_UNKNOWN_ERROR;
2972 return ECORE_SUCCESS;
2975 enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn,
2976 struct ecore_ptt *p_ptt)
2978 u32 drv_mb_param = 0, rsp, param;
2979 enum _ecore_status_t rc = ECORE_SUCCESS;
2981 drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
2982 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
2984 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2985 drv_mb_param, &rsp, ¶m);
2987 if (rc != ECORE_SUCCESS)
2990 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2991 (param != DRV_MB_PARAM_BIST_RC_PASSED))
2992 rc = ECORE_UNKNOWN_ERROR;
2997 enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn,
2998 struct ecore_ptt *p_ptt)
3000 u32 drv_mb_param, rsp, param;
3001 enum _ecore_status_t rc = ECORE_SUCCESS;
3003 drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
3004 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3006 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3007 drv_mb_param, &rsp, ¶m);
3009 if (rc != ECORE_SUCCESS)
3012 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3013 (param != DRV_MB_PARAM_BIST_RC_PASSED))
3014 rc = ECORE_UNKNOWN_ERROR;
3019 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images(
3020 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *num_images)
3022 u32 drv_mb_param = 0, rsp;
3023 enum _ecore_status_t rc = ECORE_SUCCESS;
3025 drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
3026 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3028 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3029 drv_mb_param, &rsp, num_images);
3031 if (rc != ECORE_SUCCESS)
3034 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
3035 rc = ECORE_UNKNOWN_ERROR;
3040 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att(
3041 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3042 struct bist_nvm_image_att *p_image_att, u32 image_index)
3044 u32 buf_size, nvm_offset, resp, param;
3045 enum _ecore_status_t rc;
3047 nvm_offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
3048 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3049 nvm_offset |= (image_index <<
3050 DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_OFFSET);
3051 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3052 nvm_offset, &resp, ¶m, &buf_size,
3053 (u32 *)p_image_att);
3054 if (rc != ECORE_SUCCESS)
3057 if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3058 (p_image_att->return_code != 1))
3059 rc = ECORE_UNKNOWN_ERROR;
3064 enum _ecore_status_t
3065 ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn,
3066 struct ecore_ptt *p_ptt,
3067 struct ecore_temperature_info *p_temp_info)
3069 struct ecore_temperature_sensor *p_temp_sensor;
3070 struct temperature_status_stc mfw_temp_info;
3071 struct ecore_mcp_mb_params mb_params;
3073 enum _ecore_status_t rc;
3076 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3077 mb_params.cmd = DRV_MSG_CODE_GET_TEMPERATURE;
3078 mb_params.p_data_dst = &mfw_temp_info;
3079 mb_params.data_dst_size = sizeof(mfw_temp_info);
3080 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3081 if (rc != ECORE_SUCCESS)
3084 OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS != MAX_NUM_OF_SENSORS);
3085 p_temp_info->num_sensors = OSAL_MIN_T(u32, mfw_temp_info.num_of_sensors,
3086 ECORE_MAX_NUM_OF_SENSORS);
3087 for (i = 0; i < p_temp_info->num_sensors; i++) {
3088 val = mfw_temp_info.sensor[i];
3089 p_temp_sensor = &p_temp_info->sensors[i];
3090 p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >>
3091 SENSOR_LOCATION_OFFSET;
3092 p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >>
3093 THRESHOLD_HIGH_OFFSET;
3094 p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >>
3095 CRITICAL_TEMPERATURE_OFFSET;
3096 p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >>
3097 CURRENT_TEMP_OFFSET;
3100 return ECORE_SUCCESS;
3103 enum _ecore_status_t ecore_mcp_get_mba_versions(
3104 struct ecore_hwfn *p_hwfn,
3105 struct ecore_ptt *p_ptt,
3106 struct ecore_mba_vers *p_mba_vers)
3108 u32 buf_size, resp, param;
3109 enum _ecore_status_t rc;
3111 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MBA_VERSION,
3112 0, &resp, ¶m, &buf_size,
3113 &p_mba_vers->mba_vers[0]);
3115 if (rc != ECORE_SUCCESS)
3118 if ((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
3119 rc = ECORE_UNKNOWN_ERROR;
3121 if (buf_size != MCP_DRV_NVM_BUF_LEN)
3122 rc = ECORE_UNKNOWN_ERROR;
3127 enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
3128 struct ecore_ptt *p_ptt,
3133 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MEM_ECC_EVENTS,
3134 0, &rsp, (u32 *)num_events);
3137 static enum resource_id_enum
3138 ecore_mcp_get_mfw_res_id(enum ecore_resources res_id)
3140 enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
3144 mfw_res_id = RESOURCE_NUM_SB_E;
3146 case ECORE_L2_QUEUE:
3147 mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
3150 mfw_res_id = RESOURCE_NUM_VPORT_E;
3153 mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
3156 mfw_res_id = RESOURCE_NUM_PQ_E;
3159 mfw_res_id = RESOURCE_NUM_RL_E;
3163 /* Each VFC resource can accommodate both a MAC and a VLAN */
3164 mfw_res_id = RESOURCE_VFC_FILTER_E;
3167 mfw_res_id = RESOURCE_ILT_E;
3169 case ECORE_LL2_QUEUE:
3170 mfw_res_id = RESOURCE_LL2_QUEUE_E;
3172 case ECORE_RDMA_CNQ_RAM:
3173 case ECORE_CMDQS_CQS:
3174 /* CNQ/CMDQS are the same resource */
3175 mfw_res_id = RESOURCE_CQS_E;
3177 case ECORE_RDMA_STATS_QUEUE:
3178 mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
3181 mfw_res_id = RESOURCE_BDQ_E;
3190 #define ECORE_RESC_ALLOC_VERSION_MAJOR 2
3191 #define ECORE_RESC_ALLOC_VERSION_MINOR 0
3192 #define ECORE_RESC_ALLOC_VERSION \
3193 ((ECORE_RESC_ALLOC_VERSION_MAJOR << \
3194 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_OFFSET) | \
3195 (ECORE_RESC_ALLOC_VERSION_MINOR << \
3196 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_OFFSET))
3198 struct ecore_resc_alloc_in_params {
3200 enum ecore_resources res_id;
3204 struct ecore_resc_alloc_out_params {
3214 #define ECORE_RECOVERY_PROLOG_SLEEP_MS 100
3216 enum _ecore_status_t ecore_recovery_prolog(struct ecore_dev *p_dev)
3218 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3219 struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
3220 enum _ecore_status_t rc;
3222 /* Allow ongoing PCIe transactions to complete */
3223 OSAL_MSLEEP(ECORE_RECOVERY_PROLOG_SLEEP_MS);
3225 /* Clear the PF's internal FID_enable in the PXP */
3226 rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_ptt, false);
3227 if (rc != ECORE_SUCCESS)
3228 DP_NOTICE(p_hwfn, false,
3229 "ecore_pglueb_set_pfid_enable() failed. rc = %d.\n",
3235 static enum _ecore_status_t
3236 ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn,
3237 struct ecore_ptt *p_ptt,
3238 struct ecore_resc_alloc_in_params *p_in_params,
3239 struct ecore_resc_alloc_out_params *p_out_params)
3241 struct ecore_mcp_mb_params mb_params;
3242 struct resource_info mfw_resc_info;
3243 enum _ecore_status_t rc;
3245 OSAL_MEM_ZERO(&mfw_resc_info, sizeof(mfw_resc_info));
3247 mfw_resc_info.res_id = ecore_mcp_get_mfw_res_id(p_in_params->res_id);
3248 if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
3250 "Failed to match resource %d [%s] with the MFW resources\n",
3251 p_in_params->res_id,
3252 ecore_hw_get_resc_name(p_in_params->res_id));
3256 switch (p_in_params->cmd) {
3257 case DRV_MSG_SET_RESOURCE_VALUE_MSG:
3258 mfw_resc_info.size = p_in_params->resc_max_val;
3260 case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
3263 DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
3268 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3269 mb_params.cmd = p_in_params->cmd;
3270 mb_params.param = ECORE_RESC_ALLOC_VERSION;
3271 mb_params.p_data_src = &mfw_resc_info;
3272 mb_params.data_src_size = sizeof(mfw_resc_info);
3273 mb_params.p_data_dst = mb_params.p_data_src;
3274 mb_params.data_dst_size = mb_params.data_src_size;
3276 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3277 "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
3278 p_in_params->cmd, p_in_params->res_id,
3279 ecore_hw_get_resc_name(p_in_params->res_id),
3280 GET_MFW_FIELD(mb_params.param,
3281 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3282 GET_MFW_FIELD(mb_params.param,
3283 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3284 p_in_params->resc_max_val);
3286 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3287 if (rc != ECORE_SUCCESS)
3290 p_out_params->mcp_resp = mb_params.mcp_resp;
3291 p_out_params->mcp_param = mb_params.mcp_param;
3292 p_out_params->resc_num = mfw_resc_info.size;
3293 p_out_params->resc_start = mfw_resc_info.offset;
3294 p_out_params->vf_resc_num = mfw_resc_info.vf_size;
3295 p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
3296 p_out_params->flags = mfw_resc_info.flags;
3298 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3299 "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
3300 GET_MFW_FIELD(p_out_params->mcp_param,
3301 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3302 GET_MFW_FIELD(p_out_params->mcp_param,
3303 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3304 p_out_params->resc_num, p_out_params->resc_start,
3305 p_out_params->vf_resc_num, p_out_params->vf_resc_start,
3306 p_out_params->flags);
3308 return ECORE_SUCCESS;
3311 enum _ecore_status_t
3312 ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3313 enum ecore_resources res_id, u32 resc_max_val,
3316 struct ecore_resc_alloc_out_params out_params;
3317 struct ecore_resc_alloc_in_params in_params;
3318 enum _ecore_status_t rc;
3320 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
3321 in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
3322 in_params.res_id = res_id;
3323 in_params.resc_max_val = resc_max_val;
3324 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
3325 rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3327 if (rc != ECORE_SUCCESS)
3330 *p_mcp_resp = out_params.mcp_resp;
3332 return ECORE_SUCCESS;
3335 enum _ecore_status_t
3336 ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3337 enum ecore_resources res_id, u32 *p_mcp_resp,
3338 u32 *p_resc_num, u32 *p_resc_start)
3340 struct ecore_resc_alloc_out_params out_params;
3341 struct ecore_resc_alloc_in_params in_params;
3342 enum _ecore_status_t rc;
3344 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
3345 in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
3346 in_params.res_id = res_id;
3347 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
3348 rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3350 if (rc != ECORE_SUCCESS)
3353 *p_mcp_resp = out_params.mcp_resp;
3355 if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
3356 *p_resc_num = out_params.resc_num;
3357 *p_resc_start = out_params.resc_start;
3360 return ECORE_SUCCESS;
3363 enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn,
3364 struct ecore_ptt *p_ptt)
3366 u32 mcp_resp, mcp_param;
3368 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
3369 &mcp_resp, &mcp_param);
3372 static enum _ecore_status_t ecore_mcp_resource_cmd(struct ecore_hwfn *p_hwfn,
3373 struct ecore_ptt *p_ptt,
3374 u32 param, u32 *p_mcp_resp,
3377 enum _ecore_status_t rc;
3379 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
3380 p_mcp_resp, p_mcp_param);
3381 if (rc != ECORE_SUCCESS)
3384 if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3386 "The resource command is unsupported by the MFW\n");
3387 return ECORE_NOTIMPL;
3390 if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
3391 u8 opcode = GET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
3393 DP_NOTICE(p_hwfn, false,
3394 "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
3402 enum _ecore_status_t
3403 __ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3404 struct ecore_resc_lock_params *p_params)
3406 u32 param = 0, mcp_resp, mcp_param;
3408 enum _ecore_status_t rc;
3410 switch (p_params->timeout) {
3411 case ECORE_MCP_RESC_LOCK_TO_DEFAULT:
3412 opcode = RESOURCE_OPCODE_REQ;
3413 p_params->timeout = 0;
3415 case ECORE_MCP_RESC_LOCK_TO_NONE:
3416 opcode = RESOURCE_OPCODE_REQ_WO_AGING;
3417 p_params->timeout = 0;
3420 opcode = RESOURCE_OPCODE_REQ_W_AGING;
3424 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3425 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3426 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
3428 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3429 "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
3430 param, p_params->timeout, opcode, p_params->resource);
3432 /* Attempt to acquire the resource */
3433 rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
3435 if (rc != ECORE_SUCCESS)
3438 /* Analyze the response */
3439 p_params->owner = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER);
3440 opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3442 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3443 "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
3444 mcp_param, opcode, p_params->owner);
3447 case RESOURCE_OPCODE_GNT:
3448 p_params->b_granted = true;
3450 case RESOURCE_OPCODE_BUSY:
3451 p_params->b_granted = false;
3454 DP_NOTICE(p_hwfn, false,
3455 "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
3460 return ECORE_SUCCESS;
3463 enum _ecore_status_t
3464 ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3465 struct ecore_resc_lock_params *p_params)
3468 enum _ecore_status_t rc;
3471 /* No need for an interval before the first iteration */
3473 if (p_params->sleep_b4_retry) {
3474 u16 retry_interval_in_ms =
3475 DIV_ROUND_UP(p_params->retry_interval,
3478 OSAL_MSLEEP(retry_interval_in_ms);
3480 OSAL_UDELAY(p_params->retry_interval);
3484 rc = __ecore_mcp_resc_lock(p_hwfn, p_ptt, p_params);
3485 if (rc != ECORE_SUCCESS)
3488 if (p_params->b_granted)
3490 } while (retry_cnt++ < p_params->retry_num);
3492 return ECORE_SUCCESS;
3496 ecore_mcp_resc_lock_default_init(struct ecore_hwfn *p_hwfn,
3497 struct ecore_resc_lock_params *p_lock,
3498 struct ecore_resc_unlock_params *p_unlock,
3499 enum ecore_resc_lock resource,
3500 bool b_is_permanent)
3502 if (p_lock != OSAL_NULL) {
3503 OSAL_MEM_ZERO(p_lock, sizeof(*p_lock));
3505 /* Permanent resources don't require aging, and there's no
3506 * point in trying to acquire them more than once since it's
3507 * unexpected another entity would release them.
3509 if (b_is_permanent) {
3510 p_lock->timeout = ECORE_MCP_RESC_LOCK_TO_NONE;
3512 p_lock->retry_num = ECORE_MCP_RESC_LOCK_RETRY_CNT_DFLT;
3513 p_lock->retry_interval =
3514 ECORE_MCP_RESC_LOCK_RETRY_VAL_DFLT;
3515 p_lock->sleep_b4_retry = true;
3518 p_lock->resource = resource;
3521 if (p_unlock != OSAL_NULL) {
3522 OSAL_MEM_ZERO(p_unlock, sizeof(*p_unlock));
3523 p_unlock->resource = resource;
3527 enum _ecore_status_t
3528 ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3529 struct ecore_resc_unlock_params *p_params)
3531 u32 param = 0, mcp_resp, mcp_param;
3533 enum _ecore_status_t rc;
3535 opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
3536 : RESOURCE_OPCODE_RELEASE;
3537 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3538 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3540 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3541 "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
3542 param, opcode, p_params->resource);
3544 /* Attempt to release the resource */
3545 rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
3547 if (rc != ECORE_SUCCESS)
3550 /* Analyze the response */
3551 opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3553 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3554 "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
3558 case RESOURCE_OPCODE_RELEASED_PREVIOUS:
3560 "Resource unlock request for an already released resource [%d]\n",
3561 p_params->resource);
3563 case RESOURCE_OPCODE_RELEASED:
3564 p_params->b_released = true;
3566 case RESOURCE_OPCODE_WRONG_OWNER:
3567 p_params->b_released = false;
3570 DP_NOTICE(p_hwfn, false,
3571 "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
3576 return ECORE_SUCCESS;
3579 bool ecore_mcp_is_smart_an_supported(struct ecore_hwfn *p_hwfn)
3581 return !!(p_hwfn->mcp_info->capabilities &
3582 FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ);
3585 enum _ecore_status_t ecore_mcp_get_capabilities(struct ecore_hwfn *p_hwfn,
3586 struct ecore_ptt *p_ptt)
3589 enum _ecore_status_t rc;
3591 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT,
3592 0, &mcp_resp, &p_hwfn->mcp_info->capabilities);
3593 if (rc == ECORE_SUCCESS)
3594 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_PROBE),
3595 "MFW supported features: %08x\n",
3596 p_hwfn->mcp_info->capabilities);
3601 enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn,
3602 struct ecore_ptt *p_ptt)
3604 u32 mcp_resp, mcp_param, features;
3606 features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ |
3607 DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE;
3609 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
3610 features, &mcp_resp, &mcp_param);