1e616adb09e3c26e08de78874678c94740c8cf82
[dpdk.git] / drivers / net / qede / base / ecore_mcp.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "bcm_osal.h"
10 #include "ecore.h"
11 #include "ecore_status.h"
12 #include "ecore_mcp.h"
13 #include "mcp_public.h"
14 #include "reg_addr.h"
15 #include "ecore_hw.h"
16 #include "ecore_init_fw_funcs.h"
17 #include "ecore_sriov.h"
18 #include "ecore_vf.h"
19 #include "ecore_iov_api.h"
20 #include "ecore_gtt_reg_addr.h"
21 #include "ecore_iro.h"
22 #include "ecore_dcbx.h"
23
24 #define CHIP_MCP_RESP_ITER_US 10
25 #define EMUL_MCP_RESP_ITER_US (1000 * 1000)
26
27 #define ECORE_DRV_MB_MAX_RETRIES (500 * 1000)   /* Account for 5 sec */
28 #define ECORE_MCP_RESET_RETRIES (50 * 1000)     /* Account for 500 msec */
29
30 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
31         ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
32                  _val)
33
34 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
35         ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
36
37 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
38         DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
39                      OFFSETOF(struct public_drv_mb, _field), _val)
40
41 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
42         DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
43                      OFFSETOF(struct public_drv_mb, _field))
44
45 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
46         DRV_ID_PDA_COMP_VER_SHIFT)
47
48 #define MCP_BYTES_PER_MBIT_SHIFT 17
49
50 #ifndef ASIC_ONLY
51 static int loaded;
52 static int loaded_port[MAX_NUM_PORTS] = { 0 };
53 #endif
54
55 bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn)
56 {
57         if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
58                 return false;
59         return true;
60 }
61
62 void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
63 {
64         u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
65                                         PUBLIC_PORT);
66         u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr);
67
68         p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
69                                                    MFW_PORT(p_hwfn));
70         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
71                    "port_addr = 0x%x, port_id 0x%02x\n",
72                    p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
73 }
74
75 void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
76 {
77         u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
78         OSAL_BE32 tmp;
79         u32 i;
80
81 #ifndef ASIC_ONLY
82         if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev))
83                 return;
84 #endif
85
86         if (!p_hwfn->mcp_info->public_base)
87                 return;
88
89         for (i = 0; i < length; i++) {
90                 tmp = ecore_rd(p_hwfn, p_ptt,
91                                p_hwfn->mcp_info->mfw_mb_addr +
92                                (i << 2) + sizeof(u32));
93
94                 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
95                     OSAL_BE32_TO_CPU(tmp);
96         }
97 }
98
99 enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
100 {
101         if (p_hwfn->mcp_info) {
102                 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
103                 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
104                 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->lock);
105         }
106         OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
107
108         return ECORE_SUCCESS;
109 }
110
111 static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
112                                                    struct ecore_ptt *p_ptt)
113 {
114         struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
115         u32 drv_mb_offsize, mfw_mb_offsize;
116         u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
117
118 #ifndef ASIC_ONLY
119         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
120                 DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n");
121                 p_info->public_base = 0;
122                 return ECORE_INVAL;
123         }
124 #endif
125
126         p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
127         if (!p_info->public_base)
128                 return ECORE_INVAL;
129
130         p_info->public_base |= GRCBASE_MCP;
131
132         /* Calculate the driver and MFW mailbox address */
133         drv_mb_offsize = ecore_rd(p_hwfn, p_ptt,
134                                   SECTION_OFFSIZE_ADDR(p_info->public_base,
135                                                        PUBLIC_DRV_MB));
136         p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
137         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
138                    "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x"
139                    " mcp_pf_id = 0x%x\n",
140                    drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
141
142         /* Set the MFW MB address */
143         mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
144                                   SECTION_OFFSIZE_ADDR(p_info->public_base,
145                                                        PUBLIC_MFW_MB));
146         p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
147         p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
148                                                p_info->mfw_mb_addr);
149
150         /* Get the current driver mailbox sequence before sending
151          * the first command
152          */
153         p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
154             DRV_MSG_SEQ_NUMBER_MASK;
155
156         /* Get current FW pulse sequence */
157         p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
158             DRV_PULSE_SEQ_MASK;
159
160         p_info->mcp_hist = (u16)ecore_rd(p_hwfn, p_ptt,
161                                           MISCS_REG_GENERIC_POR_0);
162
163         return ECORE_SUCCESS;
164 }
165
166 enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
167                                         struct ecore_ptt *p_ptt)
168 {
169         struct ecore_mcp_info *p_info;
170         u32 size;
171
172         /* Allocate mcp_info structure */
173         p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
174                                        sizeof(*p_hwfn->mcp_info));
175         if (!p_hwfn->mcp_info)
176                 goto err;
177         p_info = p_hwfn->mcp_info;
178
179         if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
180                 DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
181                 /* Do not free mcp_info here, since public_base indicate that
182                  * the MCP is not initialized
183                  */
184                 return ECORE_SUCCESS;
185         }
186
187         size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
188         p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
189         p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
190         if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
191                 goto err;
192
193         /* Initialize the MFW spinlock */
194         OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->lock);
195         OSAL_SPIN_LOCK_INIT(&p_info->lock);
196
197         return ECORE_SUCCESS;
198
199 err:
200         DP_NOTICE(p_hwfn, true, "Failed to allocate mcp memory\n");
201         ecore_mcp_free(p_hwfn);
202         return ECORE_NOMEM;
203 }
204
205 /* Locks the MFW mailbox of a PF to ensure a single access.
206  * The lock is achieved in most cases by holding a spinlock, causing other
207  * threads to wait till a previous access is done.
208  * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single
209  * access is achieved by setting a blocking flag, which will fail other
210  * competing contexts to send their mailboxes.
211  */
212 static enum _ecore_status_t ecore_mcp_mb_lock(struct ecore_hwfn *p_hwfn,
213                                               u32 cmd)
214 {
215         OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
216
217         /* The spinlock shouldn't be acquired when the mailbox command is
218          * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel
219          * pending [UN]LOAD_REQ command of another PF together with a spinlock
220          * (i.e. interrupts are disabled) - can lead to a deadlock.
221          * It is assumed that for a single PF, no other mailbox commands can be
222          * sent from another context while sending LOAD_REQ, and that any
223          * parallel commands to UNLOAD_REQ can be cancelled.
224          */
225         if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE)
226                 p_hwfn->mcp_info->block_mb_sending = false;
227
228         /* There's at least a single command that is sent by ecore during the
229          * load sequence [expectation of MFW].
230          */
231         if ((p_hwfn->mcp_info->block_mb_sending) &&
232             (cmd != DRV_MSG_CODE_FEATURE_SUPPORT)) {
233                 DP_NOTICE(p_hwfn, false,
234                           "Trying to send a MFW mailbox command [0x%x]"
235                           " in parallel to [UN]LOAD_REQ. Aborting.\n",
236                           cmd);
237                 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
238                 return ECORE_BUSY;
239         }
240
241         if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) {
242                 p_hwfn->mcp_info->block_mb_sending = true;
243                 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
244         }
245
246         return ECORE_SUCCESS;
247 }
248
249 static void ecore_mcp_mb_unlock(struct ecore_hwfn *p_hwfn, u32 cmd)
250 {
251         if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
252                 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
253 }
254
255 enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
256                                      struct ecore_ptt *p_ptt)
257 {
258         u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
259         u32 delay = CHIP_MCP_RESP_ITER_US;
260         u32 org_mcp_reset_seq, cnt = 0;
261         enum _ecore_status_t rc = ECORE_SUCCESS;
262
263 #ifndef ASIC_ONLY
264         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
265                 delay = EMUL_MCP_RESP_ITER_US;
266 #endif
267
268         /* Ensure that only a single thread is accessing the mailbox at a
269          * certain time.
270          */
271         rc = ecore_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
272         if (rc != ECORE_SUCCESS)
273                 return rc;
274
275         /* Set drv command along with the updated sequence */
276         org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
277         DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
278
279         do {
280                 /* Wait for MFW response */
281                 OSAL_UDELAY(delay);
282                 /* Give the FW up to 500 second (50*1000*10usec) */
283         } while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt,
284                                                 MISCS_REG_GENERIC_POR_0)) &&
285                  (cnt++ < ECORE_MCP_RESET_RETRIES));
286
287         if (org_mcp_reset_seq !=
288             ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
289                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
290                            "MCP was reset after %d usec\n", cnt * delay);
291         } else {
292                 DP_ERR(p_hwfn, "Failed to reset MCP\n");
293                 rc = ECORE_AGAIN;
294         }
295
296         ecore_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
297
298         return rc;
299 }
300
301 static enum _ecore_status_t ecore_do_mcp_cmd(struct ecore_hwfn *p_hwfn,
302                                              struct ecore_ptt *p_ptt,
303                                              u32 cmd, u32 param,
304                                              u32 *o_mcp_resp,
305                                              u32 *o_mcp_param)
306 {
307         u32 delay = CHIP_MCP_RESP_ITER_US;
308         u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
309         u32 seq, cnt = 1, actual_mb_seq;
310         enum _ecore_status_t rc = ECORE_SUCCESS;
311
312 #ifndef ASIC_ONLY
313         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
314                 delay = EMUL_MCP_RESP_ITER_US;
315         /* There is a built-in delay of 100usec in each MFW response read */
316         if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
317                 max_retries /= 10;
318 #endif
319
320         /* Get actual driver mailbox sequence */
321         actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
322             DRV_MSG_SEQ_NUMBER_MASK;
323
324         /* Use MCP history register to check if MCP reset occurred between
325          * init time and now.
326          */
327         if (p_hwfn->mcp_info->mcp_hist !=
328             ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
329                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Rereading MCP offsets\n");
330                 ecore_load_mcp_offsets(p_hwfn, p_ptt);
331                 ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
332         }
333         seq = ++p_hwfn->mcp_info->drv_mb_seq;
334
335         /* Set drv param */
336         DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
337
338         /* Set drv command along with the updated sequence */
339         DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
340
341         do {
342                 /* Wait for MFW response */
343                 OSAL_UDELAY(delay);
344                 *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
345
346                 /* Give the FW up to 5 second (500*10ms) */
347         } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
348                  (cnt++ < max_retries));
349
350         /* Is this a reply to our command? */
351         if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
352                 *o_mcp_resp &= FW_MSG_CODE_MASK;
353                 /* Get the MCP param */
354                 *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
355         } else {
356                 /* FW BUG! */
357                 DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
358                        cmd, param);
359                 *o_mcp_resp = 0;
360                 rc = ECORE_AGAIN;
361                 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
362         }
363         return rc;
364 }
365
366 static enum _ecore_status_t
367 ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
368                         struct ecore_ptt *p_ptt,
369                         struct ecore_mcp_mb_params *p_mb_params)
370 {
371         union drv_union_data union_data;
372         u32 union_data_addr;
373         enum _ecore_status_t rc;
374
375         /* MCP not initialized */
376         if (!ecore_mcp_is_init(p_hwfn)) {
377                 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
378                 return ECORE_BUSY;
379         }
380
381         if (p_mb_params->data_src_size > sizeof(union_data) ||
382             p_mb_params->data_dst_size > sizeof(union_data)) {
383                 DP_ERR(p_hwfn,
384                        "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
385                        p_mb_params->data_src_size, p_mb_params->data_dst_size,
386                        sizeof(union_data));
387                 return ECORE_INVAL;
388         }
389
390         union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
391                           OFFSETOF(struct public_drv_mb, union_data);
392
393         /* Ensure that only a single thread is accessing the mailbox at a
394          * certain time.
395          */
396         rc = ecore_mcp_mb_lock(p_hwfn, p_mb_params->cmd);
397         if (rc != ECORE_SUCCESS)
398                 return rc;
399
400         OSAL_MEM_ZERO(&union_data, sizeof(union_data));
401         if (p_mb_params->p_data_src != OSAL_NULL && p_mb_params->data_src_size)
402                 OSAL_MEMCPY(&union_data, p_mb_params->p_data_src,
403                             p_mb_params->data_src_size);
404         ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
405                         sizeof(union_data));
406
407         rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
408                               p_mb_params->param, &p_mb_params->mcp_resp,
409                               &p_mb_params->mcp_param);
410
411         if (p_mb_params->p_data_dst != OSAL_NULL &&
412             p_mb_params->data_dst_size)
413                 ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
414                                   union_data_addr, p_mb_params->data_dst_size);
415
416         ecore_mcp_mb_unlock(p_hwfn, p_mb_params->cmd);
417
418         return rc;
419 }
420
421 enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
422                                    struct ecore_ptt *p_ptt, u32 cmd, u32 param,
423                                    u32 *o_mcp_resp, u32 *o_mcp_param)
424 {
425         struct ecore_mcp_mb_params mb_params;
426         enum _ecore_status_t rc;
427
428 #ifndef ASIC_ONLY
429         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
430                 if (cmd == DRV_MSG_CODE_UNLOAD_REQ) {
431                         loaded--;
432                         loaded_port[p_hwfn->port_id]--;
433                         DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n",
434                                    loaded);
435                 }
436                 return ECORE_SUCCESS;
437         }
438 #endif
439
440         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
441         mb_params.cmd = cmd;
442         mb_params.param = param;
443         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
444         if (rc != ECORE_SUCCESS)
445                 return rc;
446
447         *o_mcp_resp = mb_params.mcp_resp;
448         *o_mcp_param = mb_params.mcp_param;
449
450         return ECORE_SUCCESS;
451 }
452
453 enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
454                                           struct ecore_ptt *p_ptt,
455                                           u32 cmd,
456                                           u32 param,
457                                           u32 *o_mcp_resp,
458                                           u32 *o_mcp_param,
459                                           u32 i_txn_size, u32 *i_buf)
460 {
461         struct ecore_mcp_mb_params mb_params;
462         enum _ecore_status_t rc;
463
464         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
465         mb_params.cmd = cmd;
466         mb_params.param = param;
467         mb_params.p_data_src = i_buf;
468         mb_params.data_src_size = (u8)i_txn_size;
469         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
470         if (rc != ECORE_SUCCESS)
471                 return rc;
472
473         *o_mcp_resp = mb_params.mcp_resp;
474         *o_mcp_param = mb_params.mcp_param;
475
476         return ECORE_SUCCESS;
477 }
478
479 enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
480                                           struct ecore_ptt *p_ptt,
481                                           u32 cmd,
482                                           u32 param,
483                                           u32 *o_mcp_resp,
484                                           u32 *o_mcp_param,
485                                           u32 *o_txn_size, u32 *o_buf)
486 {
487         struct ecore_mcp_mb_params mb_params;
488         u8 raw_data[MCP_DRV_NVM_BUF_LEN];
489         enum _ecore_status_t rc;
490
491         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
492         mb_params.cmd = cmd;
493         mb_params.param = param;
494         mb_params.p_data_dst = raw_data;
495
496         /* Use the maximal value since the actual one is part of the response */
497         mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
498
499         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
500         if (rc != ECORE_SUCCESS)
501                 return rc;
502
503         *o_mcp_resp = mb_params.mcp_resp;
504         *o_mcp_param = mb_params.mcp_param;
505
506         *o_txn_size = *o_mcp_param;
507         /* @DPDK */
508         OSAL_MEMCPY(o_buf, raw_data, RTE_MIN(*o_txn_size, MCP_DRV_NVM_BUF_LEN));
509
510         return ECORE_SUCCESS;
511 }
512
513 #ifndef ASIC_ONLY
514 static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
515                                     u32 *p_load_code)
516 {
517         static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
518
519         if (!loaded)
520                 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
521         else if (!loaded_port[p_hwfn->port_id])
522                 load_phase = FW_MSG_CODE_DRV_LOAD_PORT;
523         else
524                 load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
525
526         /* On CMT, always tell that it's engine */
527         if (p_hwfn->p_dev->num_hwfns > 1)
528                 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
529
530         *p_load_code = load_phase;
531         loaded++;
532         loaded_port[p_hwfn->port_id]++;
533
534         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
535                    "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n",
536                    *p_load_code, loaded, p_hwfn->port_id,
537                    loaded_port[p_hwfn->port_id]);
538 }
539 #endif
540
541 static bool ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role)
542 {
543         return (drv_role == DRV_ROLE_OS &&
544                 exist_drv_role == DRV_ROLE_PREBOOT) ||
545                (drv_role == DRV_ROLE_KDUMP && exist_drv_role == DRV_ROLE_OS);
546 }
547
548 static enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn,
549                                                       struct ecore_ptt *p_ptt)
550 {
551         u32 resp = 0, param = 0;
552         enum _ecore_status_t rc;
553
554         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
555                            &resp, &param);
556         if (rc != ECORE_SUCCESS)
557                 DP_NOTICE(p_hwfn, false,
558                           "Failed to send cancel load request, rc = %d\n", rc);
559
560         return rc;
561 }
562
563 #define CONFIG_ECORE_L2_BITMAP_IDX      (0x1 << 0)
564 #define CONFIG_ECORE_SRIOV_BITMAP_IDX   (0x1 << 1)
565 #define CONFIG_ECORE_ROCE_BITMAP_IDX    (0x1 << 2)
566 #define CONFIG_ECORE_IWARP_BITMAP_IDX   (0x1 << 3)
567 #define CONFIG_ECORE_FCOE_BITMAP_IDX    (0x1 << 4)
568 #define CONFIG_ECORE_ISCSI_BITMAP_IDX   (0x1 << 5)
569 #define CONFIG_ECORE_LL2_BITMAP_IDX     (0x1 << 6)
570
571 static u32 ecore_get_config_bitmap(void)
572 {
573         u32 config_bitmap = 0x0;
574
575 #ifdef CONFIG_ECORE_L2
576         config_bitmap |= CONFIG_ECORE_L2_BITMAP_IDX;
577 #endif
578 #ifdef CONFIG_ECORE_SRIOV
579         config_bitmap |= CONFIG_ECORE_SRIOV_BITMAP_IDX;
580 #endif
581 #ifdef CONFIG_ECORE_ROCE
582         config_bitmap |= CONFIG_ECORE_ROCE_BITMAP_IDX;
583 #endif
584 #ifdef CONFIG_ECORE_IWARP
585         config_bitmap |= CONFIG_ECORE_IWARP_BITMAP_IDX;
586 #endif
587 #ifdef CONFIG_ECORE_FCOE
588         config_bitmap |= CONFIG_ECORE_FCOE_BITMAP_IDX;
589 #endif
590 #ifdef CONFIG_ECORE_ISCSI
591         config_bitmap |= CONFIG_ECORE_ISCSI_BITMAP_IDX;
592 #endif
593 #ifdef CONFIG_ECORE_LL2
594         config_bitmap |= CONFIG_ECORE_LL2_BITMAP_IDX;
595 #endif
596
597         return config_bitmap;
598 }
599
600 struct ecore_load_req_in_params {
601         u8 hsi_ver;
602 #define ECORE_LOAD_REQ_HSI_VER_DEFAULT  0
603 #define ECORE_LOAD_REQ_HSI_VER_1        1
604         u32 drv_ver_0;
605         u32 drv_ver_1;
606         u32 fw_ver;
607         u8 drv_role;
608         u8 timeout_val;
609         u8 force_cmd;
610         bool avoid_eng_reset;
611 };
612
613 struct ecore_load_req_out_params {
614         u32 load_code;
615         u32 exist_drv_ver_0;
616         u32 exist_drv_ver_1;
617         u32 exist_fw_ver;
618         u8 exist_drv_role;
619         u8 mfw_hsi_ver;
620         bool drv_exists;
621 };
622
623 static enum _ecore_status_t
624 __ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
625                      struct ecore_load_req_in_params *p_in_params,
626                      struct ecore_load_req_out_params *p_out_params)
627 {
628         struct ecore_mcp_mb_params mb_params;
629         struct load_req_stc load_req;
630         struct load_rsp_stc load_rsp;
631         u32 hsi_ver;
632         enum _ecore_status_t rc;
633
634         OSAL_MEM_ZERO(&load_req, sizeof(load_req));
635         load_req.drv_ver_0 = p_in_params->drv_ver_0;
636         load_req.drv_ver_1 = p_in_params->drv_ver_1;
637         load_req.fw_ver = p_in_params->fw_ver;
638         ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_ROLE,
639                             p_in_params->drv_role);
640         ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
641                             p_in_params->timeout_val);
642         ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FORCE,
643                             p_in_params->force_cmd);
644         ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
645                             p_in_params->avoid_eng_reset);
646
647         hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ?
648                   DRV_ID_MCP_HSI_VER_CURRENT :
649                   (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT);
650
651         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
652         mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
653         mb_params.param = PDA_COMP | hsi_ver | p_hwfn->p_dev->drv_type;
654         mb_params.p_data_src = &load_req;
655         mb_params.data_src_size = sizeof(load_req);
656         mb_params.p_data_dst = &load_rsp;
657         mb_params.data_dst_size = sizeof(load_rsp);
658
659         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
660                    "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
661                    mb_params.param,
662                    ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
663                    ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
664                    ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
665                    ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
666
667         if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1)
668                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
669                            "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
670                            load_req.drv_ver_0, load_req.drv_ver_1,
671                            load_req.fw_ver, load_req.misc0,
672                            ECORE_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_ROLE),
673                            ECORE_MFW_GET_FIELD(load_req.misc0,
674                                                LOAD_REQ_LOCK_TO),
675                            ECORE_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FORCE),
676                            ECORE_MFW_GET_FIELD(load_req.misc0,
677                                                LOAD_REQ_FLAGS0));
678
679         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
680         if (rc != ECORE_SUCCESS) {
681                 DP_NOTICE(p_hwfn, false,
682                           "Failed to send load request, rc = %d\n", rc);
683                 return rc;
684         }
685
686         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
687                    "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
688         p_out_params->load_code = mb_params.mcp_resp;
689
690         if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
691             p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
692                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
693                            "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
694                            load_rsp.drv_ver_0, load_rsp.drv_ver_1,
695                            load_rsp.fw_ver, load_rsp.misc0,
696                            ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
697                            ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
698                            ECORE_MFW_GET_FIELD(load_rsp.misc0,
699                                                LOAD_RSP_FLAGS0));
700
701                 p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
702                 p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
703                 p_out_params->exist_fw_ver = load_rsp.fw_ver;
704                 p_out_params->exist_drv_role =
705                         ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
706                 p_out_params->mfw_hsi_ver =
707                         ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
708                 p_out_params->drv_exists =
709                         ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
710                         LOAD_RSP_FLAGS0_DRV_EXISTS;
711         }
712
713         return ECORE_SUCCESS;
714 }
715
716 static enum _ecore_status_t eocre_get_mfw_drv_role(struct ecore_hwfn *p_hwfn,
717                                                    enum ecore_drv_role drv_role,
718                                                    u8 *p_mfw_drv_role)
719 {
720         switch (drv_role) {
721         case ECORE_DRV_ROLE_OS:
722                 *p_mfw_drv_role = DRV_ROLE_OS;
723                 break;
724         case ECORE_DRV_ROLE_KDUMP:
725                 *p_mfw_drv_role = DRV_ROLE_KDUMP;
726                 break;
727         default:
728                 DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role);
729                 return ECORE_INVAL;
730         }
731
732         return ECORE_SUCCESS;
733 }
734
735 enum ecore_load_req_force {
736         ECORE_LOAD_REQ_FORCE_NONE,
737         ECORE_LOAD_REQ_FORCE_PF,
738         ECORE_LOAD_REQ_FORCE_ALL,
739 };
740
741 static enum _ecore_status_t
742 ecore_get_mfw_force_cmd(struct ecore_hwfn *p_hwfn,
743                         enum ecore_load_req_force force_cmd,
744                         u8 *p_mfw_force_cmd)
745 {
746         switch (force_cmd) {
747         case ECORE_LOAD_REQ_FORCE_NONE:
748                 *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
749                 break;
750         case ECORE_LOAD_REQ_FORCE_PF:
751                 *p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
752                 break;
753         case ECORE_LOAD_REQ_FORCE_ALL:
754                 *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
755                 break;
756         default:
757                 DP_ERR(p_hwfn, "Unexpected force value %d\n", force_cmd);
758                 return ECORE_INVAL;
759         }
760
761         return ECORE_SUCCESS;
762 }
763
764 enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
765                                         struct ecore_ptt *p_ptt,
766                                         struct ecore_load_req_params *p_params)
767 {
768         struct ecore_load_req_out_params out_params;
769         struct ecore_load_req_in_params in_params;
770         u8 mfw_drv_role, mfw_force_cmd;
771         enum _ecore_status_t rc;
772
773 #ifndef ASIC_ONLY
774         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
775                 ecore_mcp_mf_workaround(p_hwfn, &p_params->load_code);
776                 return ECORE_SUCCESS;
777         }
778 #endif
779
780         OSAL_MEM_ZERO(&in_params, sizeof(in_params));
781         in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT;
782         in_params.drv_ver_0 = ECORE_VERSION;
783         in_params.drv_ver_1 = ecore_get_config_bitmap();
784         in_params.fw_ver = STORM_FW_VERSION;
785         rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
786         if (rc != ECORE_SUCCESS)
787                 return rc;
788
789         in_params.drv_role = mfw_drv_role;
790         in_params.timeout_val = p_params->timeout_val;
791         rc = ecore_get_mfw_force_cmd(p_hwfn, ECORE_LOAD_REQ_FORCE_NONE,
792                                      &mfw_force_cmd);
793         if (rc != ECORE_SUCCESS)
794                 return rc;
795
796         in_params.force_cmd = mfw_force_cmd;
797         in_params.avoid_eng_reset = p_params->avoid_eng_reset;
798
799         OSAL_MEM_ZERO(&out_params, sizeof(out_params));
800         rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
801         if (rc != ECORE_SUCCESS)
802                 return rc;
803
804         /* First handle cases where another load request should/might be sent:
805          * - MFW expects the old interface [HSI version = 1]
806          * - MFW responds that a force load request is required
807          */
808         if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
809                 DP_INFO(p_hwfn,
810                         "MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n");
811
812                 /* The previous load request set the mailbox blocking */
813                 p_hwfn->mcp_info->block_mb_sending = false;
814
815                 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1;
816                 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
817                 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
818                                           &out_params);
819                 if (rc != ECORE_SUCCESS)
820                         return rc;
821         } else if (out_params.load_code ==
822                    FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
823                 /* The previous load request set the mailbox blocking */
824                 p_hwfn->mcp_info->block_mb_sending = false;
825
826                 if (ecore_mcp_can_force_load(in_params.drv_role,
827                                              out_params.exist_drv_role)) {
828                         DP_INFO(p_hwfn,
829                                 "A force load is required [existing: role %d, fw_ver 0x%08x, drv_ver 0x%08x_0x%08x]. Sending a force load request.\n",
830                                 out_params.exist_drv_role,
831                                 out_params.exist_fw_ver,
832                                 out_params.exist_drv_ver_0,
833                                 out_params.exist_drv_ver_1);
834
835                         rc = ecore_get_mfw_force_cmd(p_hwfn,
836                                                      ECORE_LOAD_REQ_FORCE_ALL,
837                                                      &mfw_force_cmd);
838                         if (rc != ECORE_SUCCESS)
839                                 return rc;
840
841                         in_params.force_cmd = mfw_force_cmd;
842                         OSAL_MEM_ZERO(&out_params, sizeof(out_params));
843                         rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
844                                                   &out_params);
845                         if (rc != ECORE_SUCCESS)
846                                 return rc;
847                 } else {
848                         DP_NOTICE(p_hwfn, false,
849                                   "A force load is required [existing: role %d, fw_ver 0x%08x, drv_ver 0x%08x_0x%08x]. Avoiding to prevent disruption of active PFs.\n",
850                                   out_params.exist_drv_role,
851                                   out_params.exist_fw_ver,
852                                   out_params.exist_drv_ver_0,
853                                   out_params.exist_drv_ver_1);
854
855                         ecore_mcp_cancel_load_req(p_hwfn, p_ptt);
856                         return ECORE_BUSY;
857                 }
858         }
859
860         /* Now handle the other types of responses.
861          * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
862          * expected here after the additional revised load requests were sent.
863          */
864         switch (out_params.load_code) {
865         case FW_MSG_CODE_DRV_LOAD_ENGINE:
866         case FW_MSG_CODE_DRV_LOAD_PORT:
867         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
868                 if (out_params.mfw_hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
869                     out_params.drv_exists) {
870                         /* The role and fw/driver version match, but the PF is
871                          * already loaded and has not been unloaded gracefully.
872                          * This is unexpected since a quasi-FLR request was
873                          * previously sent as part of ecore_hw_prepare().
874                          */
875                         DP_NOTICE(p_hwfn, false,
876                                   "PF is already loaded - shouldn't have got here since a quasi-FLR request was previously sent!\n");
877                         return ECORE_INVAL;
878                 }
879                 break;
880         case FW_MSG_CODE_DRV_LOAD_REFUSED_PDA:
881         case FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG:
882         case FW_MSG_CODE_DRV_LOAD_REFUSED_HSI:
883         case FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT:
884                 DP_NOTICE(p_hwfn, false,
885                           "MFW refused a load request [resp 0x%08x]. Aborting.\n",
886                           out_params.load_code);
887                 return ECORE_BUSY;
888         default:
889                 DP_NOTICE(p_hwfn, false,
890                           "Unexpected response to load request [resp 0x%08x]. Aborting.\n",
891                           out_params.load_code);
892                 break;
893         }
894
895         p_params->load_code = out_params.load_code;
896
897         return ECORE_SUCCESS;
898 }
899
900 enum _ecore_status_t ecore_mcp_load_done(struct ecore_hwfn *p_hwfn,
901                                          struct ecore_ptt *p_ptt)
902 {
903         u32 resp = 0, param = 0;
904         enum _ecore_status_t rc;
905
906         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp,
907                            &param);
908         if (rc != ECORE_SUCCESS) {
909                 DP_NOTICE(p_hwfn, false,
910                           "Failed to send a LOAD_DONE command, rc = %d\n", rc);
911                 return rc;
912         }
913
914 #define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR     (1 << 0)
915
916         /* Check if there is a DID mismatch between nvm-cfg/efuse */
917         if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
918                 DP_NOTICE(p_hwfn, false,
919                           "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
920
921         return ECORE_SUCCESS;
922 }
923
924 enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn,
925                                           struct ecore_ptt *p_ptt)
926 {
927         u32 wol_param, mcp_resp, mcp_param;
928
929         /* @DPDK */
930         wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
931
932         return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param,
933                              &mcp_resp, &mcp_param);
934 }
935
936 enum _ecore_status_t ecore_mcp_unload_done(struct ecore_hwfn *p_hwfn,
937                                            struct ecore_ptt *p_ptt)
938 {
939         struct ecore_mcp_mb_params mb_params;
940         struct mcp_mac wol_mac;
941
942         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
943         mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
944
945         return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
946 }
947
948 static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
949                                     struct ecore_ptt *p_ptt)
950 {
951         u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
952                                         PUBLIC_PATH);
953         u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
954         u32 path_addr = SECTION_ADDR(mfw_path_offsize,
955                                      ECORE_PATH_ID(p_hwfn));
956         u32 disabled_vfs[VF_MAX_STATIC / 32];
957         int i;
958
959         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
960                    "Reading Disabled VF information from [offset %08x],"
961                    " path_addr %08x\n",
962                    mfw_path_offsize, path_addr);
963
964         for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
965                 disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,
966                                            path_addr +
967                                            OFFSETOF(struct public_path,
968                                                     mcp_vf_disabled) +
969                                            sizeof(u32) * i);
970                 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
971                            "FLR-ed VFs [%08x,...,%08x] - %08x\n",
972                            i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
973         }
974
975         if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs))
976                 OSAL_VF_FLR_UPDATE(p_hwfn);
977 }
978
979 enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
980                                           struct ecore_ptt *p_ptt,
981                                           u32 *vfs_to_ack)
982 {
983         u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
984                                         PUBLIC_FUNC);
985         u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);
986         u32 func_addr = SECTION_ADDR(mfw_func_offsize,
987                                      MCP_PF_ID(p_hwfn));
988         struct ecore_mcp_mb_params mb_params;
989         enum _ecore_status_t rc;
990         int i;
991
992         for (i = 0; i < (VF_MAX_STATIC / 32); i++)
993                 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
994                            "Acking VFs [%08x,...,%08x] - %08x\n",
995                            i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
996
997         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
998         mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
999         mb_params.p_data_src = vfs_to_ack;
1000         mb_params.data_src_size = VF_MAX_STATIC / 8;
1001         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt,
1002                                      &mb_params);
1003         if (rc != ECORE_SUCCESS) {
1004                 DP_NOTICE(p_hwfn, false,
1005                           "Failed to pass ACK for VF flr to MFW\n");
1006                 return ECORE_TIMEOUT;
1007         }
1008
1009         /* TMP - clear the ACK bits; should be done by MFW */
1010         for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1011                 ecore_wr(p_hwfn, p_ptt,
1012                          func_addr +
1013                          OFFSETOF(struct public_func, drv_ack_vf_disabled) +
1014                          i * sizeof(u32), 0);
1015
1016         return rc;
1017 }
1018
1019 static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
1020                                                 struct ecore_ptt *p_ptt)
1021 {
1022         u32 transceiver_state;
1023
1024         transceiver_state = ecore_rd(p_hwfn, p_ptt,
1025                                      p_hwfn->mcp_info->port_addr +
1026                                      OFFSETOF(struct public_port,
1027                                               transceiver_data));
1028
1029         DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP),
1030                    "Received transceiver state update [0x%08x] from mfw"
1031                    " [Addr 0x%x]\n",
1032                    transceiver_state, (u32)(p_hwfn->mcp_info->port_addr +
1033                                             OFFSETOF(struct public_port,
1034                                                      transceiver_data)));
1035
1036         transceiver_state = GET_FIELD(transceiver_state, ETH_TRANSCEIVER_STATE);
1037
1038         if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
1039                 DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
1040         else
1041                 DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
1042 }
1043
1044 static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
1045                                          struct ecore_ptt *p_ptt,
1046                                          bool b_reset)
1047 {
1048         struct ecore_mcp_link_state *p_link;
1049         u8 max_bw, min_bw;
1050         u32 status = 0;
1051
1052         p_link = &p_hwfn->mcp_info->link_output;
1053         OSAL_MEMSET(p_link, 0, sizeof(*p_link));
1054         if (!b_reset) {
1055                 status = ecore_rd(p_hwfn, p_ptt,
1056                                   p_hwfn->mcp_info->port_addr +
1057                                   OFFSETOF(struct public_port, link_status));
1058                 DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP),
1059                            "Received link update [0x%08x] from mfw"
1060                            " [Addr 0x%x]\n",
1061                            status, (u32)(p_hwfn->mcp_info->port_addr +
1062                                           OFFSETOF(struct public_port,
1063                                                    link_status)));
1064         } else {
1065                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1066                            "Resetting link indications\n");
1067                 return;
1068         }
1069
1070         if (p_hwfn->b_drv_link_init)
1071                 p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1072         else
1073                 p_link->link_up = false;
1074
1075         p_link->full_duplex = true;
1076         switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
1077         case LINK_STATUS_SPEED_AND_DUPLEX_100G:
1078                 p_link->speed = 100000;
1079                 break;
1080         case LINK_STATUS_SPEED_AND_DUPLEX_50G:
1081                 p_link->speed = 50000;
1082                 break;
1083         case LINK_STATUS_SPEED_AND_DUPLEX_40G:
1084                 p_link->speed = 40000;
1085                 break;
1086         case LINK_STATUS_SPEED_AND_DUPLEX_25G:
1087                 p_link->speed = 25000;
1088                 break;
1089         case LINK_STATUS_SPEED_AND_DUPLEX_20G:
1090                 p_link->speed = 20000;
1091                 break;
1092         case LINK_STATUS_SPEED_AND_DUPLEX_10G:
1093                 p_link->speed = 10000;
1094                 break;
1095         case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
1096                 p_link->full_duplex = false;
1097                 /* Fall-through */
1098         case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
1099                 p_link->speed = 1000;
1100                 break;
1101         default:
1102                 p_link->speed = 0;
1103         }
1104
1105         /* We never store total line speed as p_link->speed is
1106          * again changes according to bandwidth allocation.
1107          */
1108         if (p_link->link_up && p_link->speed)
1109                 p_link->line_speed = p_link->speed;
1110         else
1111                 p_link->line_speed = 0;
1112
1113         max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
1114         min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
1115
1116         /* Max bandwidth configuration */
1117         __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
1118                                            p_link, max_bw);
1119
1120         /* Mintz bandwidth configuration */
1121         __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
1122                                            p_link, min_bw);
1123         ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev, p_ptt,
1124                                               p_link->min_pf_rate);
1125
1126         p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
1127         p_link->an_complete = !!(status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
1128         p_link->parallel_detection = !!(status &
1129                                          LINK_STATUS_PARALLEL_DETECTION_USED);
1130         p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
1131
1132         p_link->partner_adv_speed |=
1133             (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
1134             ECORE_LINK_PARTNER_SPEED_1G_FD : 0;
1135         p_link->partner_adv_speed |=
1136             (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
1137             ECORE_LINK_PARTNER_SPEED_1G_HD : 0;
1138         p_link->partner_adv_speed |=
1139             (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
1140             ECORE_LINK_PARTNER_SPEED_10G : 0;
1141         p_link->partner_adv_speed |=
1142             (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
1143             ECORE_LINK_PARTNER_SPEED_20G : 0;
1144         p_link->partner_adv_speed |=
1145             (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
1146             ECORE_LINK_PARTNER_SPEED_25G : 0;
1147         p_link->partner_adv_speed |=
1148             (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
1149             ECORE_LINK_PARTNER_SPEED_40G : 0;
1150         p_link->partner_adv_speed |=
1151             (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
1152             ECORE_LINK_PARTNER_SPEED_50G : 0;
1153         p_link->partner_adv_speed |=
1154             (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
1155             ECORE_LINK_PARTNER_SPEED_100G : 0;
1156
1157         p_link->partner_tx_flow_ctrl_en =
1158             !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
1159         p_link->partner_rx_flow_ctrl_en =
1160             !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
1161
1162         switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
1163         case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
1164                 p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE;
1165                 break;
1166         case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
1167                 p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE;
1168                 break;
1169         case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
1170                 p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE;
1171                 break;
1172         default:
1173                 p_link->partner_adv_pause = 0;
1174         }
1175
1176         p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
1177
1178         OSAL_LINK_UPDATE(p_hwfn);
1179 }
1180
1181 enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
1182                                         struct ecore_ptt *p_ptt, bool b_up)
1183 {
1184         struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
1185         struct ecore_mcp_mb_params mb_params;
1186         struct eth_phy_cfg phy_cfg;
1187         enum _ecore_status_t rc = ECORE_SUCCESS;
1188         u32 cmd;
1189
1190 #ifndef ASIC_ONLY
1191         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
1192                 return ECORE_SUCCESS;
1193 #endif
1194
1195         /* Set the shmem configuration according to params */
1196         OSAL_MEM_ZERO(&phy_cfg, sizeof(phy_cfg));
1197         cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
1198         if (!params->speed.autoneg)
1199                 phy_cfg.speed = params->speed.forced_speed;
1200         phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
1201         phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
1202         phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1203         phy_cfg.adv_speed = params->speed.advertised_speeds;
1204         phy_cfg.loopback_mode = params->loopback_mode;
1205         p_hwfn->b_drv_link_init = b_up;
1206
1207         if (b_up)
1208                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1209                            "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x\n",
1210                            phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed,
1211                            phy_cfg.loopback_mode);
1212         else
1213                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
1214
1215         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1216         mb_params.cmd = cmd;
1217         mb_params.p_data_src = &phy_cfg;
1218         mb_params.data_src_size = sizeof(phy_cfg);
1219         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1220
1221         /* if mcp fails to respond we must abort */
1222         if (rc != ECORE_SUCCESS) {
1223                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1224                 return rc;
1225         }
1226
1227         /* Reset the link status if needed */
1228         if (!b_up)
1229                 ecore_mcp_handle_link_change(p_hwfn, p_ptt, true);
1230
1231         return rc;
1232 }
1233
1234 u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
1235                                    struct ecore_ptt *p_ptt)
1236 {
1237         u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
1238
1239         /* TODO - Add support for VFs */
1240         if (IS_VF(p_hwfn->p_dev))
1241                 return ECORE_INVAL;
1242
1243         path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1244                                                  PUBLIC_PATH);
1245         path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr);
1246         path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn));
1247
1248         proc_kill_cnt = ecore_rd(p_hwfn, p_ptt,
1249                                  path_addr +
1250                                  OFFSETOF(struct public_path, process_kill)) &
1251             PROCESS_KILL_COUNTER_MASK;
1252
1253         return proc_kill_cnt;
1254 }
1255
1256 static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn,
1257                                           struct ecore_ptt *p_ptt)
1258 {
1259         struct ecore_dev *p_dev = p_hwfn->p_dev;
1260         u32 proc_kill_cnt;
1261
1262         /* Prevent possible attentions/interrupts during the recovery handling
1263          * and till its load phase, during which they will be re-enabled.
1264          */
1265         ecore_int_igu_disable_int(p_hwfn, p_ptt);
1266
1267         DP_NOTICE(p_hwfn, false, "Received a process kill indication\n");
1268
1269         /* The following operations should be done once, and thus in CMT mode
1270          * are carried out by only the first HW function.
1271          */
1272         if (p_hwfn != ECORE_LEADING_HWFN(p_dev))
1273                 return;
1274
1275         if (p_dev->recov_in_prog) {
1276                 DP_NOTICE(p_hwfn, false,
1277                           "Ignoring the indication since a recovery"
1278                           " process is already in progress\n");
1279                 return;
1280         }
1281
1282         p_dev->recov_in_prog = true;
1283
1284         proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt);
1285         DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt);
1286
1287         OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn);
1288 }
1289
1290 static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
1291                                           struct ecore_ptt *p_ptt,
1292                                           enum MFW_DRV_MSG_TYPE type)
1293 {
1294         enum ecore_mcp_protocol_type stats_type;
1295         union ecore_mcp_protocol_stats stats;
1296         struct ecore_mcp_mb_params mb_params;
1297         u32 hsi_param;
1298         enum _ecore_status_t rc;
1299
1300         switch (type) {
1301         case MFW_DRV_MSG_GET_LAN_STATS:
1302                 stats_type = ECORE_MCP_LAN_STATS;
1303                 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1304                 break;
1305         default:
1306                 DP_INFO(p_hwfn, "Invalid protocol type %d\n", type);
1307                 return;
1308         }
1309
1310         OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats);
1311
1312         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1313         mb_params.cmd = DRV_MSG_CODE_GET_STATS;
1314         mb_params.param = hsi_param;
1315         mb_params.p_data_src = &stats;
1316         mb_params.data_src_size = sizeof(stats);
1317         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1318         if (rc != ECORE_SUCCESS)
1319                 DP_ERR(p_hwfn, "Failed to send protocol stats, rc = %d\n", rc);
1320 }
1321
1322 static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
1323                                     struct public_func *p_shmem_info)
1324 {
1325         struct ecore_mcp_function_info *p_info;
1326
1327         p_info = &p_hwfn->mcp_info->func_info;
1328
1329         /* TODO - bandwidth min/max should have valid values of 1-100,
1330          * as well as some indication that the feature is disabled.
1331          * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS
1332          * limit and correct value to min `1' and max `100' if limit isn't in
1333          * range.
1334          */
1335         p_info->bandwidth_min = (p_shmem_info->config &
1336                                  FUNC_MF_CFG_MIN_BW_MASK) >>
1337             FUNC_MF_CFG_MIN_BW_SHIFT;
1338         if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1339                 DP_INFO(p_hwfn,
1340                         "bandwidth minimum out of bounds [%02x]. Set to 1\n",
1341                         p_info->bandwidth_min);
1342                 p_info->bandwidth_min = 1;
1343         }
1344
1345         p_info->bandwidth_max = (p_shmem_info->config &
1346                                  FUNC_MF_CFG_MAX_BW_MASK) >>
1347             FUNC_MF_CFG_MAX_BW_SHIFT;
1348         if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1349                 DP_INFO(p_hwfn,
1350                         "bandwidth maximum out of bounds [%02x]. Set to 100\n",
1351                         p_info->bandwidth_max);
1352                 p_info->bandwidth_max = 100;
1353         }
1354 }
1355
1356 static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
1357                                     struct ecore_ptt *p_ptt,
1358                                     struct public_func *p_data,
1359                                     int pfid)
1360 {
1361         u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1362                                         PUBLIC_FUNC);
1363         u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1364         u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1365         u32 i, size;
1366
1367         OSAL_MEM_ZERO(p_data, sizeof(*p_data));
1368
1369         size = OSAL_MIN_T(u32, sizeof(*p_data),
1370                           SECTION_SIZE(mfw_path_offsize));
1371         for (i = 0; i < size / sizeof(u32); i++)
1372                 ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
1373                                               func_addr + (i << 2));
1374
1375         return size;
1376 }
1377
1378 static void
1379 ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1380 {
1381         struct ecore_mcp_function_info *p_info;
1382         struct public_func shmem_info;
1383         u32 resp = 0, param = 0;
1384
1385         ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1386
1387         ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1388
1389         p_info = &p_hwfn->mcp_info->func_info;
1390
1391         ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min);
1392
1393         ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max);
1394
1395         /* Acknowledge the MFW */
1396         ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1397                       &param);
1398 }
1399
1400 static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn,
1401                                          struct ecore_ptt *p_ptt)
1402 {
1403         /* A single notification should be sent to upper driver in CMT mode */
1404         if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1405                 return;
1406
1407         DP_NOTICE(p_hwfn, false,
1408                   "Fan failure was detected on the network interface card"
1409                   " and it's going to be shut down.\n");
1410
1411         ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
1412 }
1413
1414 struct ecore_mdump_cmd_params {
1415         u32 cmd;
1416         void *p_data_src;
1417         u8 data_src_size;
1418         void *p_data_dst;
1419         u8 data_dst_size;
1420         u32 mcp_resp;
1421 };
1422
1423 static enum _ecore_status_t
1424 ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1425                     struct ecore_mdump_cmd_params *p_mdump_cmd_params)
1426 {
1427         struct ecore_mcp_mb_params mb_params;
1428         enum _ecore_status_t rc;
1429
1430         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1431         mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
1432         mb_params.param = p_mdump_cmd_params->cmd;
1433         mb_params.p_data_src = p_mdump_cmd_params->p_data_src;
1434         mb_params.data_src_size = p_mdump_cmd_params->data_src_size;
1435         mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst;
1436         mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size;
1437         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1438         if (rc != ECORE_SUCCESS)
1439                 return rc;
1440
1441         p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
1442         if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
1443                 DP_NOTICE(p_hwfn, false,
1444                           "MFW claims that the mdump command is illegal [mdump_cmd 0x%x]\n",
1445                           p_mdump_cmd_params->cmd);
1446                 rc = ECORE_INVAL;
1447         }
1448
1449         return rc;
1450 }
1451
1452 static enum _ecore_status_t ecore_mcp_mdump_ack(struct ecore_hwfn *p_hwfn,
1453                                                 struct ecore_ptt *p_ptt)
1454 {
1455         struct ecore_mdump_cmd_params mdump_cmd_params;
1456
1457         OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1458         mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK;
1459
1460         return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1461 }
1462
1463 enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn,
1464                                                 struct ecore_ptt *p_ptt,
1465                                                 u32 epoch)
1466 {
1467         struct ecore_mdump_cmd_params mdump_cmd_params;
1468
1469         OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1470         mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_SET_VALUES;
1471         mdump_cmd_params.p_data_src = &epoch;
1472         mdump_cmd_params.data_src_size = sizeof(epoch);
1473
1474         return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1475 }
1476
1477 enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
1478                                              struct ecore_ptt *p_ptt)
1479 {
1480         struct ecore_mdump_cmd_params mdump_cmd_params;
1481
1482         OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1483         mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_TRIGGER;
1484
1485         return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1486 }
1487
1488 static enum _ecore_status_t
1489 ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1490                            struct mdump_config_stc *p_mdump_config)
1491 {
1492         struct ecore_mdump_cmd_params mdump_cmd_params;
1493         enum _ecore_status_t rc;
1494
1495         OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1496         mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_CONFIG;
1497         mdump_cmd_params.p_data_dst = p_mdump_config;
1498         mdump_cmd_params.data_dst_size = sizeof(*p_mdump_config);
1499
1500         rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1501         if (rc != ECORE_SUCCESS)
1502                 return rc;
1503
1504         if (mdump_cmd_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
1505                 DP_INFO(p_hwfn,
1506                         "The mdump command is not supported by the MFW\n");
1507                 return ECORE_NOTIMPL;
1508         }
1509
1510         if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1511                 DP_NOTICE(p_hwfn, false,
1512                           "Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n",
1513                           mdump_cmd_params.mcp_resp);
1514                 rc = ECORE_UNKNOWN_ERROR;
1515         }
1516
1517         return rc;
1518 }
1519
1520 enum _ecore_status_t
1521 ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1522                          struct ecore_mdump_info *p_mdump_info)
1523 {
1524         u32 addr, global_offsize, global_addr;
1525         struct mdump_config_stc mdump_config;
1526         enum _ecore_status_t rc;
1527
1528         OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info));
1529
1530         addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1531                                     PUBLIC_GLOBAL);
1532         global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1533         global_addr = SECTION_ADDR(global_offsize, 0);
1534         p_mdump_info->reason = ecore_rd(p_hwfn, p_ptt,
1535                                         global_addr +
1536                                         OFFSETOF(struct public_global,
1537                                                  mdump_reason));
1538
1539         if (p_mdump_info->reason) {
1540                 rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config);
1541                 if (rc != ECORE_SUCCESS)
1542                         return rc;
1543
1544                 p_mdump_info->version = mdump_config.version;
1545                 p_mdump_info->config = mdump_config.config;
1546                 p_mdump_info->epoch = mdump_config.epoc;
1547                 p_mdump_info->num_of_logs = mdump_config.num_of_logs;
1548                 p_mdump_info->valid_logs = mdump_config.valid_logs;
1549
1550                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1551                            "MFW mdump info: reason %d, version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n",
1552                            p_mdump_info->reason, p_mdump_info->version,
1553                            p_mdump_info->config, p_mdump_info->epoch,
1554                            p_mdump_info->num_of_logs, p_mdump_info->valid_logs);
1555         } else {
1556                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1557                            "MFW mdump info: reason %d\n", p_mdump_info->reason);
1558         }
1559
1560         return ECORE_SUCCESS;
1561 }
1562
1563 enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
1564                                                 struct ecore_ptt *p_ptt)
1565 {
1566         struct ecore_mdump_cmd_params mdump_cmd_params;
1567
1568         OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1569         mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLEAR_LOGS;
1570
1571         return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1572 }
1573
1574 static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
1575                                             struct ecore_ptt *p_ptt)
1576 {
1577         /* In CMT mode - no need for more than a single acknowledgment to the
1578          * MFW, and no more than a single notification to the upper driver.
1579          */
1580         if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1581                 return;
1582
1583         DP_NOTICE(p_hwfn, false,
1584                   "Received a critical error notification from the MFW!\n");
1585
1586         if (p_hwfn->p_dev->allow_mdump) {
1587                 DP_NOTICE(p_hwfn, false,
1588                           "Not acknowledging the notification to allow the MFW crash dump\n");
1589                 return;
1590         }
1591
1592         ecore_mcp_mdump_ack(p_hwfn, p_ptt);
1593         ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
1594 }
1595
1596 enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
1597                                              struct ecore_ptt *p_ptt)
1598 {
1599         struct ecore_mcp_info *info = p_hwfn->mcp_info;
1600         enum _ecore_status_t rc = ECORE_SUCCESS;
1601         bool found = false;
1602         u16 i;
1603
1604         DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n");
1605
1606         /* Read Messages from MFW */
1607         ecore_mcp_read_mb(p_hwfn, p_ptt);
1608
1609         /* Compare current messages to old ones */
1610         for (i = 0; i < info->mfw_mb_length; i++) {
1611                 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
1612                         continue;
1613
1614                 found = true;
1615
1616                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1617                            "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
1618                            i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
1619
1620                 switch (i) {
1621                 case MFW_DRV_MSG_LINK_CHANGE:
1622                         ecore_mcp_handle_link_change(p_hwfn, p_ptt, false);
1623                         break;
1624                 case MFW_DRV_MSG_VF_DISABLED:
1625                         ecore_mcp_handle_vf_flr(p_hwfn, p_ptt);
1626                         break;
1627                 case MFW_DRV_MSG_LLDP_DATA_UPDATED:
1628                         ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1629                                                     ECORE_DCBX_REMOTE_LLDP_MIB);
1630                         break;
1631                 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
1632                         ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1633                                                     ECORE_DCBX_REMOTE_MIB);
1634                         break;
1635                 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
1636                         ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1637                                                     ECORE_DCBX_OPERATIONAL_MIB);
1638                         break;
1639                 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
1640                         ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
1641                         break;
1642                 case MFW_DRV_MSG_ERROR_RECOVERY:
1643                         ecore_mcp_handle_process_kill(p_hwfn, p_ptt);
1644                         break;
1645                 case MFW_DRV_MSG_GET_LAN_STATS:
1646                 case MFW_DRV_MSG_GET_FCOE_STATS:
1647                 case MFW_DRV_MSG_GET_ISCSI_STATS:
1648                 case MFW_DRV_MSG_GET_RDMA_STATS:
1649                         ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
1650                         break;
1651                 case MFW_DRV_MSG_BW_UPDATE:
1652                         ecore_mcp_update_bw(p_hwfn, p_ptt);
1653                         break;
1654                 case MFW_DRV_MSG_FAILURE_DETECTED:
1655                         ecore_mcp_handle_fan_failure(p_hwfn, p_ptt);
1656                         break;
1657                 case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
1658                         ecore_mcp_handle_critical_error(p_hwfn, p_ptt);
1659                         break;
1660                 default:
1661                         DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
1662                         rc = ECORE_INVAL;
1663                 }
1664         }
1665
1666         /* ACK everything */
1667         for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
1668                 OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]);
1669
1670                 /* MFW expect answer in BE, so we force write in that format */
1671                 ecore_wr(p_hwfn, p_ptt,
1672                          info->mfw_mb_addr + sizeof(u32) +
1673                          MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
1674                          sizeof(u32) + i * sizeof(u32), val);
1675         }
1676
1677         if (!found) {
1678                 DP_NOTICE(p_hwfn, false,
1679                           "Received an MFW message indication but no"
1680                           " new message!\n");
1681                 rc = ECORE_INVAL;
1682         }
1683
1684         /* Copy the new mfw messages into the shadow */
1685         OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
1686
1687         return rc;
1688 }
1689
1690 enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
1691                                            struct ecore_ptt *p_ptt,
1692                                            u32 *p_mfw_ver,
1693                                            u32 *p_running_bundle_id)
1694 {
1695         u32 global_offsize;
1696
1697 #ifndef ASIC_ONLY
1698         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1699                 DP_NOTICE(p_hwfn, false, "Emulation - can't get MFW version\n");
1700                 return ECORE_SUCCESS;
1701         }
1702 #endif
1703
1704         if (IS_VF(p_hwfn->p_dev)) {
1705                 if (p_hwfn->vf_iov_info) {
1706                         struct pfvf_acquire_resp_tlv *p_resp;
1707
1708                         p_resp = &p_hwfn->vf_iov_info->acquire_resp;
1709                         *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
1710                         return ECORE_SUCCESS;
1711                 } else {
1712                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1713                                    "VF requested MFW version prior to ACQUIRE\n");
1714                         return ECORE_INVAL;
1715                 }
1716         }
1717
1718         global_offsize = ecore_rd(p_hwfn, p_ptt,
1719                                   SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
1720                                                        public_base,
1721                                                        PUBLIC_GLOBAL));
1722         *p_mfw_ver =
1723             ecore_rd(p_hwfn, p_ptt,
1724                      SECTION_ADDR(global_offsize,
1725                                   0) + OFFSETOF(struct public_global, mfw_ver));
1726
1727         if (p_running_bundle_id != OSAL_NULL) {
1728                 *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt,
1729                                                 SECTION_ADDR(global_offsize,
1730                                                              0) +
1731                                                 OFFSETOF(struct public_global,
1732                                                          running_bundle_id));
1733         }
1734
1735         return ECORE_SUCCESS;
1736 }
1737
1738 enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
1739                                               u32 *p_media_type)
1740 {
1741         struct ecore_hwfn *p_hwfn = &p_dev->hwfns[0];
1742         struct ecore_ptt *p_ptt;
1743
1744         /* TODO - Add support for VFs */
1745         if (IS_VF(p_dev))
1746                 return ECORE_INVAL;
1747
1748         if (!ecore_mcp_is_init(p_hwfn)) {
1749                 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
1750                 return ECORE_BUSY;
1751         }
1752
1753         *p_media_type = MEDIA_UNSPECIFIED;
1754
1755         p_ptt = ecore_ptt_acquire(p_hwfn);
1756         if (!p_ptt)
1757                 return ECORE_BUSY;
1758
1759         *p_media_type = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1760                                  OFFSETOF(struct public_port, media_type));
1761
1762         ecore_ptt_release(p_hwfn, p_ptt);
1763
1764         return ECORE_SUCCESS;
1765 }
1766
1767 /* @DPDK */
1768 /* Old MFW has a global configuration for all PFs regarding RDMA support */
1769 static void
1770 ecore_mcp_get_shmem_proto_legacy(struct ecore_hwfn *p_hwfn,
1771                                  enum ecore_pci_personality *p_proto)
1772 {
1773         *p_proto = ECORE_PCI_ETH;
1774
1775         DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
1776                    "According to Legacy capabilities, L2 personality is %08x\n",
1777                    (u32)*p_proto);
1778 }
1779
1780 /* @DPDK */
1781 static enum _ecore_status_t
1782 ecore_mcp_get_shmem_proto_mfw(struct ecore_hwfn *p_hwfn,
1783                               struct ecore_ptt *p_ptt,
1784                               enum ecore_pci_personality *p_proto)
1785 {
1786         u32 resp = 0, param = 0;
1787         enum _ecore_status_t rc;
1788
1789         DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
1790                    "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
1791                    (u32)*p_proto, resp, param);
1792         return ECORE_SUCCESS;
1793 }
1794
1795 static enum _ecore_status_t
1796 ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn,
1797                           struct public_func *p_info,
1798                           struct ecore_ptt *p_ptt,
1799                           enum ecore_pci_personality *p_proto)
1800 {
1801         enum _ecore_status_t rc = ECORE_SUCCESS;
1802
1803         switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
1804         case FUNC_MF_CFG_PROTOCOL_ETHERNET:
1805                 if (ecore_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto) !=
1806                     ECORE_SUCCESS)
1807                         ecore_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
1808                 break;
1809         default:
1810                 rc = ECORE_INVAL;
1811         }
1812
1813         return rc;
1814 }
1815
1816 enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
1817                                                     struct ecore_ptt *p_ptt)
1818 {
1819         struct ecore_mcp_function_info *info;
1820         struct public_func shmem_info;
1821
1822         ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1823         info = &p_hwfn->mcp_info->func_info;
1824
1825         info->pause_on_host = (shmem_info.config &
1826                                FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
1827
1828         if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
1829                                       &info->protocol)) {
1830                 DP_ERR(p_hwfn, "Unknown personality %08x\n",
1831                        (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
1832                 return ECORE_INVAL;
1833         }
1834
1835         ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1836
1837         if (shmem_info.mac_upper || shmem_info.mac_lower) {
1838                 info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
1839                 info->mac[1] = (u8)(shmem_info.mac_upper);
1840                 info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
1841                 info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
1842                 info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
1843                 info->mac[5] = (u8)(shmem_info.mac_lower);
1844         } else {
1845                 /* TODO - are there protocols for which there's no MAC? */
1846                 DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n");
1847         }
1848
1849         /* TODO - are these calculations true for BE machine? */
1850         info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
1851                          (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
1852         info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
1853                          (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
1854
1855         info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
1856
1857         info->mtu = (u16)shmem_info.mtu_size;
1858
1859         if (info->mtu == 0)
1860                 info->mtu = 1500;
1861
1862         info->mtu = (u16)shmem_info.mtu_size;
1863
1864         DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP),
1865                    "Read configuration from shmem: pause_on_host %02x"
1866                     " protocol %02x BW [%02x - %02x]"
1867                     " MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %lx"
1868                     " node %lx ovlan %04x\n",
1869                    info->pause_on_host, info->protocol,
1870                    info->bandwidth_min, info->bandwidth_max,
1871                    info->mac[0], info->mac[1], info->mac[2],
1872                    info->mac[3], info->mac[4], info->mac[5],
1873                    (unsigned long)info->wwn_port,
1874                    (unsigned long)info->wwn_node, info->ovlan);
1875
1876         return ECORE_SUCCESS;
1877 }
1878
1879 struct ecore_mcp_link_params
1880 *ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn)
1881 {
1882         if (!p_hwfn || !p_hwfn->mcp_info)
1883                 return OSAL_NULL;
1884         return &p_hwfn->mcp_info->link_input;
1885 }
1886
1887 struct ecore_mcp_link_state
1888 *ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn)
1889 {
1890         if (!p_hwfn || !p_hwfn->mcp_info)
1891                 return OSAL_NULL;
1892
1893 #ifndef ASIC_ONLY
1894         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
1895                 DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n");
1896                 p_hwfn->mcp_info->link_output.link_up = true;
1897         }
1898 #endif
1899
1900         return &p_hwfn->mcp_info->link_output;
1901 }
1902
1903 struct ecore_mcp_link_capabilities
1904 *ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn)
1905 {
1906         if (!p_hwfn || !p_hwfn->mcp_info)
1907                 return OSAL_NULL;
1908         return &p_hwfn->mcp_info->link_capabilities;
1909 }
1910
1911 enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
1912                                      struct ecore_ptt *p_ptt)
1913 {
1914         u32 resp = 0, param = 0;
1915         enum _ecore_status_t rc;
1916
1917         rc = ecore_mcp_cmd(p_hwfn, p_ptt,
1918                            DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, &param);
1919
1920         /* Wait for the drain to complete before returning */
1921         OSAL_MSLEEP(1020);
1922
1923         return rc;
1924 }
1925
1926 const struct ecore_mcp_function_info
1927 *ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn)
1928 {
1929         if (!p_hwfn || !p_hwfn->mcp_info)
1930                 return OSAL_NULL;
1931         return &p_hwfn->mcp_info->func_info;
1932 }
1933
1934 enum _ecore_status_t ecore_mcp_nvm_command(struct ecore_hwfn *p_hwfn,
1935                                            struct ecore_ptt *p_ptt,
1936                                            struct ecore_mcp_nvm_params *params)
1937 {
1938         enum _ecore_status_t rc;
1939
1940         switch (params->type) {
1941         case ECORE_MCP_NVM_RD:
1942                 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1943                                           params->nvm_common.offset,
1944                                           &params->nvm_common.resp,
1945                                           &params->nvm_common.param,
1946                                           params->nvm_rd.buf_size,
1947                                           params->nvm_rd.buf);
1948                 break;
1949         case ECORE_MCP_CMD:
1950                 rc = ecore_mcp_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1951                                    params->nvm_common.offset,
1952                                    &params->nvm_common.resp,
1953                                    &params->nvm_common.param);
1954                 break;
1955         case ECORE_MCP_NVM_WR:
1956                 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1957                                           params->nvm_common.offset,
1958                                           &params->nvm_common.resp,
1959                                           &params->nvm_common.param,
1960                                           params->nvm_wr.buf_size,
1961                                           params->nvm_wr.buf);
1962                 break;
1963         default:
1964                 rc = ECORE_NOTIMPL;
1965                 break;
1966         }
1967         return rc;
1968 }
1969
1970 int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
1971                                   struct ecore_ptt *p_ptt, u32 personalities)
1972 {
1973         enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT;
1974         struct public_func shmem_info;
1975         int i, count = 0, num_pfs;
1976
1977         num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
1978
1979         for (i = 0; i < num_pfs; i++) {
1980                 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1981                                          MCP_PF_ID_BY_REL(p_hwfn, i));
1982                 if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
1983                         continue;
1984
1985                 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
1986                                               &protocol) !=
1987                     ECORE_SUCCESS)
1988                         continue;
1989
1990                 if ((1 << ((u32)protocol)) & personalities)
1991                         count++;
1992         }
1993
1994         return count;
1995 }
1996
1997 enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
1998                                               struct ecore_ptt *p_ptt,
1999                                               u32 *p_flash_size)
2000 {
2001         u32 flash_size;
2002
2003 #ifndef ASIC_ONLY
2004         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
2005                 DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n");
2006                 return ECORE_INVAL;
2007         }
2008 #endif
2009
2010         if (IS_VF(p_hwfn->p_dev))
2011                 return ECORE_INVAL;
2012
2013         flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
2014         flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
2015             MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
2016         flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
2017
2018         *p_flash_size = flash_size;
2019
2020         return ECORE_SUCCESS;
2021 }
2022
2023 enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
2024                                                   struct ecore_ptt *p_ptt)
2025 {
2026         struct ecore_dev *p_dev = p_hwfn->p_dev;
2027
2028         if (p_dev->recov_in_prog) {
2029                 DP_NOTICE(p_hwfn, false,
2030                           "Avoid triggering a recovery since such a process"
2031                           " is already in progress\n");
2032                 return ECORE_AGAIN;
2033         }
2034
2035         DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n");
2036         ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
2037
2038         return ECORE_SUCCESS;
2039 }
2040
2041 enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
2042                                               struct ecore_ptt *p_ptt,
2043                                               u8 vf_id, u8 num)
2044 {
2045         u32 resp = 0, param = 0, rc_param = 0;
2046         enum _ecore_status_t rc;
2047
2048 /* Only Leader can configure MSIX, and need to take CMT into account */
2049
2050         if (!IS_LEAD_HWFN(p_hwfn))
2051                 return ECORE_SUCCESS;
2052         num *= p_hwfn->p_dev->num_hwfns;
2053
2054         param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
2055             DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
2056         param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
2057             DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
2058
2059         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
2060                            &resp, &rc_param);
2061
2062         if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
2063                 DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n",
2064                           vf_id);
2065                 rc = ECORE_INVAL;
2066         } else {
2067                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2068                            "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
2069                             num, vf_id);
2070         }
2071
2072         return rc;
2073 }
2074
2075 enum _ecore_status_t
2076 ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2077                            struct ecore_mcp_drv_version *p_ver)
2078 {
2079         struct ecore_mcp_mb_params mb_params;
2080         struct drv_version_stc drv_version;
2081         u32 num_words, i;
2082         void *p_name;
2083         OSAL_BE32 val;
2084         enum _ecore_status_t rc;
2085
2086 #ifndef ASIC_ONLY
2087         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
2088                 return ECORE_SUCCESS;
2089 #endif
2090
2091         OSAL_MEM_ZERO(&drv_version, sizeof(drv_version));
2092         drv_version.version = p_ver->version;
2093         num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4;
2094         for (i = 0; i < num_words; i++) {
2095                 /* The driver name is expected to be in a big-endian format */
2096                 p_name = &p_ver->name[i * sizeof(u32)];
2097                 val = OSAL_CPU_TO_BE32(*(u32 *)p_name);
2098                 *(u32 *)&drv_version.name[i * sizeof(u32)] = val;
2099         }
2100
2101         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2102         mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
2103         mb_params.p_data_src = &drv_version;
2104         mb_params.data_src_size = sizeof(drv_version);
2105         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2106         if (rc != ECORE_SUCCESS)
2107                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2108
2109         return rc;
2110 }
2111
2112 enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
2113                                     struct ecore_ptt *p_ptt)
2114 {
2115         enum _ecore_status_t rc;
2116         u32 resp = 0, param = 0;
2117
2118         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2119                            &param);
2120         if (rc != ECORE_SUCCESS)
2121                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2122
2123         return rc;
2124 }
2125
2126 enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
2127                                       struct ecore_ptt *p_ptt)
2128 {
2129         u32 value, cpu_mode;
2130
2131         ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
2132
2133         value = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2134         value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2135         ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
2136         cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2137
2138         return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -1 : 0;
2139 }
2140
2141 enum _ecore_status_t
2142 ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
2143                                    struct ecore_ptt *p_ptt,
2144                                    enum ecore_ov_client client)
2145 {
2146         enum _ecore_status_t rc;
2147         u32 resp = 0, param = 0;
2148         u32 drv_mb_param;
2149
2150         switch (client) {
2151         case ECORE_OV_CLIENT_DRV:
2152                 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
2153                 break;
2154         case ECORE_OV_CLIENT_USER:
2155                 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
2156                 break;
2157         case ECORE_OV_CLIENT_VENDOR_SPEC:
2158                 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
2159                 break;
2160         default:
2161                 DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", client);
2162                 return ECORE_INVAL;
2163         }
2164
2165         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
2166                            drv_mb_param, &resp, &param);
2167         if (rc != ECORE_SUCCESS)
2168                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2169
2170         return rc;
2171 }
2172
2173 enum _ecore_status_t
2174 ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
2175                                  struct ecore_ptt *p_ptt,
2176                                  enum ecore_ov_driver_state drv_state)
2177 {
2178         enum _ecore_status_t rc;
2179         u32 resp = 0, param = 0;
2180         u32 drv_mb_param;
2181
2182         switch (drv_state) {
2183         case ECORE_OV_DRIVER_STATE_NOT_LOADED:
2184                 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
2185                 break;
2186         case ECORE_OV_DRIVER_STATE_DISABLED:
2187                 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
2188                 break;
2189         case ECORE_OV_DRIVER_STATE_ACTIVE:
2190                 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
2191                 break;
2192         default:
2193                 DP_NOTICE(p_hwfn, true, "Invalid driver state %d\n", drv_state);
2194                 return ECORE_INVAL;
2195         }
2196
2197         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
2198                            drv_mb_param, &resp, &param);
2199         if (rc != ECORE_SUCCESS)
2200                 DP_ERR(p_hwfn, "Failed to send driver state\n");
2201
2202         return rc;
2203 }
2204
2205 enum _ecore_status_t
2206 ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2207                          struct ecore_fc_npiv_tbl *p_table)
2208 {
2209         return 0;
2210 }
2211
2212 enum _ecore_status_t
2213 ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn,
2214                         struct ecore_ptt *p_ptt, u16 mtu)
2215 {
2216         return 0;
2217 }
2218
2219 enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
2220                                        struct ecore_ptt *p_ptt,
2221                                        enum ecore_led_mode mode)
2222 {
2223         u32 resp = 0, param = 0, drv_mb_param;
2224         enum _ecore_status_t rc;
2225
2226         switch (mode) {
2227         case ECORE_LED_MODE_ON:
2228                 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
2229                 break;
2230         case ECORE_LED_MODE_OFF:
2231                 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
2232                 break;
2233         case ECORE_LED_MODE_RESTORE:
2234                 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
2235                 break;
2236         default:
2237                 DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode);
2238                 return ECORE_INVAL;
2239         }
2240
2241         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
2242                            drv_mb_param, &resp, &param);
2243         if (rc != ECORE_SUCCESS)
2244                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2245
2246         return rc;
2247 }
2248
2249 enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
2250                                              struct ecore_ptt *p_ptt,
2251                                              u32 mask_parities)
2252 {
2253         enum _ecore_status_t rc;
2254         u32 resp = 0, param = 0;
2255
2256         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
2257                            mask_parities, &resp, &param);
2258
2259         if (rc != ECORE_SUCCESS) {
2260                 DP_ERR(p_hwfn,
2261                        "MCP response failure for mask parities, aborting\n");
2262         } else if (resp != FW_MSG_CODE_OK) {
2263                 DP_ERR(p_hwfn,
2264                        "MCP did not ack mask parity request. Old MFW?\n");
2265                 rc = ECORE_INVAL;
2266         }
2267
2268         return rc;
2269 }
2270
2271 enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
2272                                         u8 *p_buf, u32 len)
2273 {
2274         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2275         u32 bytes_left, offset, bytes_to_copy, buf_size;
2276         struct ecore_mcp_nvm_params params;
2277         struct ecore_ptt *p_ptt;
2278         enum _ecore_status_t rc = ECORE_SUCCESS;
2279
2280         p_ptt = ecore_ptt_acquire(p_hwfn);
2281         if (!p_ptt)
2282                 return ECORE_BUSY;
2283
2284         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2285         bytes_left = len;
2286         offset = 0;
2287         params.type = ECORE_MCP_NVM_RD;
2288         params.nvm_rd.buf_size = &buf_size;
2289         params.nvm_common.cmd = DRV_MSG_CODE_NVM_READ_NVRAM;
2290         while (bytes_left > 0) {
2291                 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
2292                                            MCP_DRV_NVM_BUF_LEN);
2293                 params.nvm_common.offset = (addr + offset) |
2294                     (bytes_to_copy << DRV_MB_PARAM_NVM_LEN_SHIFT);
2295                 params.nvm_rd.buf = (u32 *)(p_buf + offset);
2296                 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2297                 if (rc != ECORE_SUCCESS || (params.nvm_common.resp !=
2298                                             FW_MSG_CODE_NVM_OK)) {
2299                         DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2300                         break;
2301                 }
2302
2303                 /* This can be a lengthy process, and it's possible scheduler
2304                  * isn't preemptible. Sleep a bit to prevent CPU hogging.
2305                  */
2306                 if (bytes_left % 0x1000 <
2307                     (bytes_left - *params.nvm_rd.buf_size) % 0x1000)
2308                         OSAL_MSLEEP(1);
2309
2310                 offset += *params.nvm_rd.buf_size;
2311                 bytes_left -= *params.nvm_rd.buf_size;
2312         }
2313
2314         p_dev->mcp_nvm_resp = params.nvm_common.resp;
2315         ecore_ptt_release(p_hwfn, p_ptt);
2316
2317         return rc;
2318 }
2319
2320 enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
2321                                         u32 addr, u8 *p_buf, u32 len)
2322 {
2323         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2324         struct ecore_mcp_nvm_params params;
2325         struct ecore_ptt *p_ptt;
2326         enum _ecore_status_t rc;
2327
2328         p_ptt = ecore_ptt_acquire(p_hwfn);
2329         if (!p_ptt)
2330                 return ECORE_BUSY;
2331
2332         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2333         params.type = ECORE_MCP_NVM_RD;
2334         params.nvm_rd.buf_size = &len;
2335         params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_READ) ?
2336             DRV_MSG_CODE_PHY_CORE_READ : DRV_MSG_CODE_PHY_RAW_READ;
2337         params.nvm_common.offset = addr;
2338         params.nvm_rd.buf = (u32 *)p_buf;
2339         rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2340         if (rc != ECORE_SUCCESS)
2341                 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2342
2343         p_dev->mcp_nvm_resp = params.nvm_common.resp;
2344         ecore_ptt_release(p_hwfn, p_ptt);
2345
2346         return rc;
2347 }
2348
2349 enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
2350 {
2351         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2352         struct ecore_mcp_nvm_params params;
2353         struct ecore_ptt *p_ptt;
2354
2355         p_ptt = ecore_ptt_acquire(p_hwfn);
2356         if (!p_ptt)
2357                 return ECORE_BUSY;
2358
2359         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2360         OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp));
2361         ecore_ptt_release(p_hwfn, p_ptt);
2362
2363         return ECORE_SUCCESS;
2364 }
2365
2366 enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr)
2367 {
2368         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2369         struct ecore_mcp_nvm_params params;
2370         struct ecore_ptt *p_ptt;
2371         enum _ecore_status_t rc;
2372
2373         p_ptt = ecore_ptt_acquire(p_hwfn);
2374         if (!p_ptt)
2375                 return ECORE_BUSY;
2376         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2377         params.type = ECORE_MCP_CMD;
2378         params.nvm_common.cmd = DRV_MSG_CODE_NVM_DEL_FILE;
2379         params.nvm_common.offset = addr;
2380         rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2381         p_dev->mcp_nvm_resp = params.nvm_common.resp;
2382         ecore_ptt_release(p_hwfn, p_ptt);
2383
2384         return rc;
2385 }
2386
2387 enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
2388                                                   u32 addr)
2389 {
2390         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2391         struct ecore_mcp_nvm_params params;
2392         struct ecore_ptt *p_ptt;
2393         enum _ecore_status_t rc;
2394
2395         p_ptt = ecore_ptt_acquire(p_hwfn);
2396         if (!p_ptt)
2397                 return ECORE_BUSY;
2398         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2399         params.type = ECORE_MCP_CMD;
2400         params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN;
2401         params.nvm_common.offset = addr;
2402         rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2403         p_dev->mcp_nvm_resp = params.nvm_common.resp;
2404         ecore_ptt_release(p_hwfn, p_ptt);
2405
2406         return rc;
2407 }
2408
2409 /* rc receives ECORE_INVAL as default parameter because
2410  * it might not enter the while loop if the len is 0
2411  */
2412 enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
2413                                          u32 addr, u8 *p_buf, u32 len)
2414 {
2415         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2416         enum _ecore_status_t rc = ECORE_INVAL;
2417         struct ecore_mcp_nvm_params params;
2418         struct ecore_ptt *p_ptt;
2419         u32 buf_idx, buf_size;
2420
2421         p_ptt = ecore_ptt_acquire(p_hwfn);
2422         if (!p_ptt)
2423                 return ECORE_BUSY;
2424
2425         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2426         params.type = ECORE_MCP_NVM_WR;
2427         if (cmd == ECORE_PUT_FILE_DATA)
2428                 params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
2429         else
2430                 params.nvm_common.cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
2431         buf_idx = 0;
2432         while (buf_idx < len) {
2433                 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
2434                                       MCP_DRV_NVM_BUF_LEN);
2435                 params.nvm_common.offset = ((buf_size <<
2436                                              DRV_MB_PARAM_NVM_LEN_SHIFT)
2437                                             | addr) + buf_idx;
2438                 params.nvm_wr.buf_size = buf_size;
2439                 params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
2440                 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2441                 if (rc != ECORE_SUCCESS ||
2442                     ((params.nvm_common.resp != FW_MSG_CODE_NVM_OK) &&
2443                      (params.nvm_common.resp !=
2444                       FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK)))
2445                         DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2446
2447                 /* This can be a lengthy process, and it's possible scheduler
2448                  * isn't preemptible. Sleep a bit to prevent CPU hogging.
2449                  */
2450                 if (buf_idx % 0x1000 >
2451                     (buf_idx + buf_size) % 0x1000)
2452                         OSAL_MSLEEP(1);
2453
2454                 buf_idx += buf_size;
2455         }
2456
2457         p_dev->mcp_nvm_resp = params.nvm_common.resp;
2458         ecore_ptt_release(p_hwfn, p_ptt);
2459
2460         return rc;
2461 }
2462
2463 enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
2464                                          u32 addr, u8 *p_buf, u32 len)
2465 {
2466         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2467         struct ecore_mcp_nvm_params params;
2468         struct ecore_ptt *p_ptt;
2469         enum _ecore_status_t rc;
2470
2471         p_ptt = ecore_ptt_acquire(p_hwfn);
2472         if (!p_ptt)
2473                 return ECORE_BUSY;
2474
2475         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2476         params.type = ECORE_MCP_NVM_WR;
2477         params.nvm_wr.buf_size = len;
2478         params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_WRITE) ?
2479             DRV_MSG_CODE_PHY_CORE_WRITE : DRV_MSG_CODE_PHY_RAW_WRITE;
2480         params.nvm_common.offset = addr;
2481         params.nvm_wr.buf = (u32 *)p_buf;
2482         rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2483         if (rc != ECORE_SUCCESS)
2484                 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2485         p_dev->mcp_nvm_resp = params.nvm_common.resp;
2486         ecore_ptt_release(p_hwfn, p_ptt);
2487
2488         return rc;
2489 }
2490
2491 enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
2492                                                    u32 addr)
2493 {
2494         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2495         struct ecore_mcp_nvm_params params;
2496         struct ecore_ptt *p_ptt;
2497         enum _ecore_status_t rc;
2498
2499         p_ptt = ecore_ptt_acquire(p_hwfn);
2500         if (!p_ptt)
2501                 return ECORE_BUSY;
2502
2503         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2504         params.type = ECORE_MCP_CMD;
2505         params.nvm_common.cmd = DRV_MSG_CODE_SET_SECURE_MODE;
2506         params.nvm_common.offset = addr;
2507         rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2508         p_dev->mcp_nvm_resp = params.nvm_common.resp;
2509         ecore_ptt_release(p_hwfn, p_ptt);
2510
2511         return rc;
2512 }
2513
2514 enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
2515                                             struct ecore_ptt *p_ptt,
2516                                             u32 port, u32 addr, u32 offset,
2517                                             u32 len, u8 *p_buf)
2518 {
2519         struct ecore_mcp_nvm_params params;
2520         enum _ecore_status_t rc;
2521         u32 bytes_left, bytes_to_copy, buf_size;
2522
2523         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2524         params.nvm_common.offset =
2525                 (port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) |
2526                 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT);
2527         addr = offset;
2528         offset = 0;
2529         bytes_left = len;
2530         params.type = ECORE_MCP_NVM_RD;
2531         params.nvm_rd.buf_size = &buf_size;
2532         params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_READ;
2533         while (bytes_left > 0) {
2534                 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
2535                                            MAX_I2C_TRANSACTION_SIZE);
2536                 params.nvm_rd.buf = (u32 *)(p_buf + offset);
2537                 params.nvm_common.offset &=
2538                         (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
2539                          DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
2540                 params.nvm_common.offset |=
2541                         ((addr + offset) <<
2542                          DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT);
2543                 params.nvm_common.offset |=
2544                         (bytes_to_copy << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT);
2545                 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2546                 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
2547                     FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
2548                         return ECORE_NODEV;
2549                 } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
2550                            FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
2551                         return ECORE_UNKNOWN_ERROR;
2552
2553                 offset += *params.nvm_rd.buf_size;
2554                 bytes_left -= *params.nvm_rd.buf_size;
2555         }
2556
2557         return ECORE_SUCCESS;
2558 }
2559
2560 enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
2561                                              struct ecore_ptt *p_ptt,
2562                                              u32 port, u32 addr, u32 offset,
2563                                              u32 len, u8 *p_buf)
2564 {
2565         struct ecore_mcp_nvm_params params;
2566         enum _ecore_status_t rc;
2567         u32 buf_idx, buf_size;
2568
2569         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2570         params.nvm_common.offset =
2571                 (port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) |
2572                 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT);
2573         params.type = ECORE_MCP_NVM_WR;
2574         params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_WRITE;
2575         buf_idx = 0;
2576         while (buf_idx < len) {
2577                 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
2578                                       MAX_I2C_TRANSACTION_SIZE);
2579                 params.nvm_common.offset &=
2580                         (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
2581                          DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
2582                 params.nvm_common.offset |=
2583                         ((offset + buf_idx) <<
2584                          DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT);
2585                 params.nvm_common.offset |=
2586                         (buf_size << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT);
2587                 params.nvm_wr.buf_size = buf_size;
2588                 params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
2589                 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2590                 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
2591                     FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
2592                         return ECORE_NODEV;
2593                 } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
2594                            FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
2595                         return ECORE_UNKNOWN_ERROR;
2596
2597                 buf_idx += buf_size;
2598         }
2599
2600         return ECORE_SUCCESS;
2601 }
2602
2603 enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
2604                                          struct ecore_ptt *p_ptt,
2605                                          u16 gpio, u32 *gpio_val)
2606 {
2607         enum _ecore_status_t rc = ECORE_SUCCESS;
2608         u32 drv_mb_param = 0, rsp;
2609
2610         drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT);
2611
2612         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ,
2613                            drv_mb_param, &rsp, gpio_val);
2614
2615         if (rc != ECORE_SUCCESS)
2616                 return rc;
2617
2618         if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
2619                 return ECORE_UNKNOWN_ERROR;
2620
2621         return ECORE_SUCCESS;
2622 }
2623
2624 enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
2625                                           struct ecore_ptt *p_ptt,
2626                                           u16 gpio, u16 gpio_val)
2627 {
2628         enum _ecore_status_t rc = ECORE_SUCCESS;
2629         u32 drv_mb_param = 0, param, rsp;
2630
2631         drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT) |
2632                 (gpio_val << DRV_MB_PARAM_GPIO_VALUE_SHIFT);
2633
2634         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE,
2635                            drv_mb_param, &rsp, &param);
2636
2637         if (rc != ECORE_SUCCESS)
2638                 return rc;
2639
2640         if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
2641                 return ECORE_UNKNOWN_ERROR;
2642
2643         return ECORE_SUCCESS;
2644 }
2645
2646 enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn,
2647                                          struct ecore_ptt *p_ptt,
2648                                          u16 gpio, u32 *gpio_direction,
2649                                          u32 *gpio_ctrl)
2650 {
2651         u32 drv_mb_param = 0, rsp, val = 0;
2652         enum _ecore_status_t rc = ECORE_SUCCESS;
2653
2654         drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT;
2655
2656         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO,
2657                            drv_mb_param, &rsp, &val);
2658         if (rc != ECORE_SUCCESS)
2659                 return rc;
2660
2661         *gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >>
2662                            DRV_MB_PARAM_GPIO_DIRECTION_SHIFT;
2663         *gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >>
2664                       DRV_MB_PARAM_GPIO_CTRL_SHIFT;
2665
2666         if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
2667                 return ECORE_UNKNOWN_ERROR;
2668
2669         return ECORE_SUCCESS;
2670 }
2671
2672 enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn,
2673                                                   struct ecore_ptt *p_ptt)
2674 {
2675         u32 drv_mb_param = 0, rsp, param;
2676         enum _ecore_status_t rc = ECORE_SUCCESS;
2677
2678         drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
2679                         DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2680
2681         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2682                            drv_mb_param, &rsp, &param);
2683
2684         if (rc != ECORE_SUCCESS)
2685                 return rc;
2686
2687         if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2688             (param != DRV_MB_PARAM_BIST_RC_PASSED))
2689                 rc = ECORE_UNKNOWN_ERROR;
2690
2691         return rc;
2692 }
2693
2694 enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn,
2695                                                struct ecore_ptt *p_ptt)
2696 {
2697         u32 drv_mb_param, rsp, param;
2698         enum _ecore_status_t rc = ECORE_SUCCESS;
2699
2700         drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
2701                         DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2702
2703         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2704                            drv_mb_param, &rsp, &param);
2705
2706         if (rc != ECORE_SUCCESS)
2707                 return rc;
2708
2709         if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2710             (param != DRV_MB_PARAM_BIST_RC_PASSED))
2711                 rc = ECORE_UNKNOWN_ERROR;
2712
2713         return rc;
2714 }
2715
2716 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images(
2717         struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *num_images)
2718 {
2719         u32 drv_mb_param = 0, rsp;
2720         enum _ecore_status_t rc = ECORE_SUCCESS;
2721
2722         drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
2723                         DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2724
2725         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2726                            drv_mb_param, &rsp, num_images);
2727
2728         if (rc != ECORE_SUCCESS)
2729                 return rc;
2730
2731         if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
2732                 rc = ECORE_UNKNOWN_ERROR;
2733
2734         return rc;
2735 }
2736
2737 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att(
2738         struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2739         struct bist_nvm_image_att *p_image_att, u32 image_index)
2740 {
2741         struct ecore_mcp_nvm_params params;
2742         enum _ecore_status_t rc;
2743         u32 buf_size;
2744
2745         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2746         params.nvm_common.offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
2747                                     DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2748         params.nvm_common.offset |= (image_index <<
2749                                     DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT);
2750
2751         params.type = ECORE_MCP_NVM_RD;
2752         params.nvm_rd.buf_size = &buf_size;
2753         params.nvm_common.cmd = DRV_MSG_CODE_BIST_TEST;
2754         params.nvm_rd.buf = (u32 *)p_image_att;
2755
2756         rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2757         if (rc != ECORE_SUCCESS)
2758                 return rc;
2759
2760         if (((params.nvm_common.resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2761             (p_image_att->return_code != 1))
2762                 rc = ECORE_UNKNOWN_ERROR;
2763
2764         return rc;
2765 }
2766
2767 enum _ecore_status_t
2768 ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn,
2769                                struct ecore_ptt *p_ptt,
2770                                struct ecore_temperature_info *p_temp_info)
2771 {
2772         struct ecore_temperature_sensor *p_temp_sensor;
2773         struct temperature_status_stc mfw_temp_info;
2774         struct ecore_mcp_mb_params mb_params;
2775         u32 val;
2776         enum _ecore_status_t rc;
2777         u8 i;
2778
2779         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2780         mb_params.cmd = DRV_MSG_CODE_GET_TEMPERATURE;
2781         mb_params.p_data_dst = &mfw_temp_info;
2782         mb_params.data_dst_size = sizeof(mfw_temp_info);
2783         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2784         if (rc != ECORE_SUCCESS)
2785                 return rc;
2786
2787         OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS != MAX_NUM_OF_SENSORS);
2788         p_temp_info->num_sensors = OSAL_MIN_T(u32, mfw_temp_info.num_of_sensors,
2789                                               ECORE_MAX_NUM_OF_SENSORS);
2790         for (i = 0; i < p_temp_info->num_sensors; i++) {
2791                 val = mfw_temp_info.sensor[i];
2792                 p_temp_sensor = &p_temp_info->sensors[i];
2793                 p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >>
2794                                                  SENSOR_LOCATION_SHIFT;
2795                 p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >>
2796                                                 THRESHOLD_HIGH_SHIFT;
2797                 p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >>
2798                                           CRITICAL_TEMPERATURE_SHIFT;
2799                 p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >>
2800                                               CURRENT_TEMP_SHIFT;
2801         }
2802
2803         return ECORE_SUCCESS;
2804 }
2805
2806 enum _ecore_status_t ecore_mcp_get_mba_versions(
2807         struct ecore_hwfn *p_hwfn,
2808         struct ecore_ptt *p_ptt,
2809         struct ecore_mba_vers *p_mba_vers)
2810 {
2811         struct ecore_mcp_nvm_params params;
2812         enum _ecore_status_t rc;
2813         u32 buf_size;
2814
2815         OSAL_MEM_ZERO(&params, sizeof(params));
2816         params.type = ECORE_MCP_NVM_RD;
2817         params.nvm_common.cmd = DRV_MSG_CODE_GET_MBA_VERSION;
2818         params.nvm_common.offset = 0;
2819         params.nvm_rd.buf = &p_mba_vers->mba_vers[0];
2820         params.nvm_rd.buf_size = &buf_size;
2821         rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2822
2823         if (rc != ECORE_SUCCESS)
2824                 return rc;
2825
2826         if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
2827             FW_MSG_CODE_NVM_OK)
2828                 rc = ECORE_UNKNOWN_ERROR;
2829
2830         if (buf_size != MCP_DRV_NVM_BUF_LEN)
2831                 rc = ECORE_UNKNOWN_ERROR;
2832
2833         return rc;
2834 }
2835
2836 enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
2837                                               struct ecore_ptt *p_ptt,
2838                                               u64 *num_events)
2839 {
2840         u32 rsp;
2841
2842         return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MEM_ECC_EVENTS,
2843                              0, &rsp, (u32 *)num_events);
2844 }
2845
2846 static enum resource_id_enum
2847 ecore_mcp_get_mfw_res_id(enum ecore_resources res_id)
2848 {
2849         enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
2850
2851         switch (res_id) {
2852         case ECORE_SB:
2853                 mfw_res_id = RESOURCE_NUM_SB_E;
2854                 break;
2855         case ECORE_L2_QUEUE:
2856                 mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
2857                 break;
2858         case ECORE_VPORT:
2859                 mfw_res_id = RESOURCE_NUM_VPORT_E;
2860                 break;
2861         case ECORE_RSS_ENG:
2862                 mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
2863                 break;
2864         case ECORE_PQ:
2865                 mfw_res_id = RESOURCE_NUM_PQ_E;
2866                 break;
2867         case ECORE_RL:
2868                 mfw_res_id = RESOURCE_NUM_RL_E;
2869                 break;
2870         case ECORE_MAC:
2871         case ECORE_VLAN:
2872                 /* Each VFC resource can accommodate both a MAC and a VLAN */
2873                 mfw_res_id = RESOURCE_VFC_FILTER_E;
2874                 break;
2875         case ECORE_ILT:
2876                 mfw_res_id = RESOURCE_ILT_E;
2877                 break;
2878         case ECORE_LL2_QUEUE:
2879                 mfw_res_id = RESOURCE_LL2_QUEUE_E;
2880                 break;
2881         case ECORE_RDMA_CNQ_RAM:
2882         case ECORE_CMDQS_CQS:
2883                 /* CNQ/CMDQS are the same resource */
2884                 mfw_res_id = RESOURCE_CQS_E;
2885                 break;
2886         case ECORE_RDMA_STATS_QUEUE:
2887                 mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
2888                 break;
2889         case ECORE_BDQ:
2890                 mfw_res_id = RESOURCE_BDQ_E;
2891                 break;
2892         default:
2893                 break;
2894         }
2895
2896         return mfw_res_id;
2897 }
2898
2899 #define ECORE_RESC_ALLOC_VERSION_MAJOR  2
2900 #define ECORE_RESC_ALLOC_VERSION_MINOR  0
2901 #define ECORE_RESC_ALLOC_VERSION                                \
2902         ((ECORE_RESC_ALLOC_VERSION_MAJOR <<                     \
2903           DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) |    \
2904          (ECORE_RESC_ALLOC_VERSION_MINOR <<                     \
2905           DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
2906
2907 struct ecore_resc_alloc_in_params {
2908         u32 cmd;
2909         enum ecore_resources res_id;
2910         u32 resc_max_val;
2911 };
2912
2913 struct ecore_resc_alloc_out_params {
2914         u32 mcp_resp;
2915         u32 mcp_param;
2916         u32 resc_num;
2917         u32 resc_start;
2918         u32 vf_resc_num;
2919         u32 vf_resc_start;
2920         u32 flags;
2921 };
2922
2923 #define ECORE_RECOVERY_PROLOG_SLEEP_MS  100
2924
2925 enum _ecore_status_t ecore_recovery_prolog(struct ecore_dev *p_dev)
2926 {
2927         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2928         struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
2929         enum _ecore_status_t rc;
2930
2931         /* Allow ongoing PCIe transactions to complete */
2932         OSAL_MSLEEP(ECORE_RECOVERY_PROLOG_SLEEP_MS);
2933
2934         /* Clear the PF's internal FID_enable in the PXP */
2935         rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_ptt, false);
2936         if (rc != ECORE_SUCCESS)
2937                 DP_NOTICE(p_hwfn, false,
2938                           "ecore_pglueb_set_pfid_enable() failed. rc = %d.\n",
2939                           rc);
2940
2941         return rc;
2942 }
2943
2944 static enum _ecore_status_t
2945 ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn,
2946                               struct ecore_ptt *p_ptt,
2947                               struct ecore_resc_alloc_in_params *p_in_params,
2948                               struct ecore_resc_alloc_out_params *p_out_params)
2949 {
2950         struct ecore_mcp_mb_params mb_params;
2951         struct resource_info mfw_resc_info;
2952         enum _ecore_status_t rc;
2953
2954         OSAL_MEM_ZERO(&mfw_resc_info, sizeof(mfw_resc_info));
2955
2956         mfw_resc_info.res_id = ecore_mcp_get_mfw_res_id(p_in_params->res_id);
2957         if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
2958                 DP_ERR(p_hwfn,
2959                        "Failed to match resource %d [%s] with the MFW resources\n",
2960                        p_in_params->res_id,
2961                        ecore_hw_get_resc_name(p_in_params->res_id));
2962                 return ECORE_INVAL;
2963         }
2964
2965         switch (p_in_params->cmd) {
2966         case DRV_MSG_SET_RESOURCE_VALUE_MSG:
2967                 mfw_resc_info.size = p_in_params->resc_max_val;
2968                 /* Fallthrough */
2969         case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
2970                 break;
2971         default:
2972                 DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
2973                        p_in_params->cmd);
2974                 return ECORE_INVAL;
2975         }
2976
2977         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2978         mb_params.cmd = p_in_params->cmd;
2979         mb_params.param = ECORE_RESC_ALLOC_VERSION;
2980         mb_params.p_data_src = &mfw_resc_info;
2981         mb_params.data_src_size = sizeof(mfw_resc_info);
2982         mb_params.p_data_dst = mb_params.p_data_src;
2983         mb_params.data_dst_size = mb_params.data_src_size;
2984
2985         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2986                    "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
2987                    p_in_params->cmd, p_in_params->res_id,
2988                    ecore_hw_get_resc_name(p_in_params->res_id),
2989                    ECORE_MFW_GET_FIELD(mb_params.param,
2990                            DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
2991                    ECORE_MFW_GET_FIELD(mb_params.param,
2992                            DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
2993                    p_in_params->resc_max_val);
2994
2995         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2996         if (rc != ECORE_SUCCESS)
2997                 return rc;
2998
2999         p_out_params->mcp_resp = mb_params.mcp_resp;
3000         p_out_params->mcp_param = mb_params.mcp_param;
3001         p_out_params->resc_num = mfw_resc_info.size;
3002         p_out_params->resc_start = mfw_resc_info.offset;
3003         p_out_params->vf_resc_num = mfw_resc_info.vf_size;
3004         p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
3005         p_out_params->flags = mfw_resc_info.flags;
3006
3007         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3008                    "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
3009                    ECORE_MFW_GET_FIELD(p_out_params->mcp_param,
3010                            FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3011                    ECORE_MFW_GET_FIELD(p_out_params->mcp_param,
3012                            FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3013                    p_out_params->resc_num, p_out_params->resc_start,
3014                    p_out_params->vf_resc_num, p_out_params->vf_resc_start,
3015                    p_out_params->flags);
3016
3017         return ECORE_SUCCESS;
3018 }
3019
3020 enum _ecore_status_t
3021 ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3022                            enum ecore_resources res_id, u32 resc_max_val,
3023                            u32 *p_mcp_resp)
3024 {
3025         struct ecore_resc_alloc_out_params out_params;
3026         struct ecore_resc_alloc_in_params in_params;
3027         enum _ecore_status_t rc;
3028
3029         OSAL_MEM_ZERO(&in_params, sizeof(in_params));
3030         in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
3031         in_params.res_id = res_id;
3032         in_params.resc_max_val = resc_max_val;
3033         OSAL_MEM_ZERO(&out_params, sizeof(out_params));
3034         rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3035                                            &out_params);
3036         if (rc != ECORE_SUCCESS)
3037                 return rc;
3038
3039         *p_mcp_resp = out_params.mcp_resp;
3040
3041         return ECORE_SUCCESS;
3042 }
3043
3044 enum _ecore_status_t
3045 ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3046                         enum ecore_resources res_id, u32 *p_mcp_resp,
3047                         u32 *p_resc_num, u32 *p_resc_start)
3048 {
3049         struct ecore_resc_alloc_out_params out_params;
3050         struct ecore_resc_alloc_in_params in_params;
3051         enum _ecore_status_t rc;
3052
3053         OSAL_MEM_ZERO(&in_params, sizeof(in_params));
3054         in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
3055         in_params.res_id = res_id;
3056         OSAL_MEM_ZERO(&out_params, sizeof(out_params));
3057         rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3058                                            &out_params);
3059         if (rc != ECORE_SUCCESS)
3060                 return rc;
3061
3062         *p_mcp_resp = out_params.mcp_resp;
3063
3064         if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
3065                 *p_resc_num = out_params.resc_num;
3066                 *p_resc_start = out_params.resc_start;
3067         }
3068
3069         return ECORE_SUCCESS;
3070 }
3071
3072 enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn,
3073                                                struct ecore_ptt *p_ptt)
3074 {
3075         u32 mcp_resp, mcp_param;
3076
3077         return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
3078                              &mcp_resp, &mcp_param);
3079 }
3080
3081 static enum _ecore_status_t ecore_mcp_resource_cmd(struct ecore_hwfn *p_hwfn,
3082                                                    struct ecore_ptt *p_ptt,
3083                                                    u32 param, u32 *p_mcp_resp,
3084                                                    u32 *p_mcp_param)
3085 {
3086         enum _ecore_status_t rc;
3087
3088         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
3089                            p_mcp_resp, p_mcp_param);
3090         if (rc != ECORE_SUCCESS)
3091                 return rc;
3092
3093         if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3094                 DP_INFO(p_hwfn,
3095                         "The resource command is unsupported by the MFW\n");
3096                 return ECORE_NOTIMPL;
3097         }
3098
3099         if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
3100                 u8 opcode = ECORE_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
3101
3102                 DP_NOTICE(p_hwfn, false,
3103                           "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
3104                           param, opcode);
3105                 return ECORE_INVAL;
3106         }
3107
3108         return rc;
3109 }
3110
3111 enum _ecore_status_t
3112 __ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3113                       struct ecore_resc_lock_params *p_params)
3114 {
3115         u32 param = 0, mcp_resp, mcp_param;
3116         u8 opcode;
3117         enum _ecore_status_t rc;
3118
3119         switch (p_params->timeout) {
3120         case ECORE_MCP_RESC_LOCK_TO_DEFAULT:
3121                 opcode = RESOURCE_OPCODE_REQ;
3122                 p_params->timeout = 0;
3123                 break;
3124         case ECORE_MCP_RESC_LOCK_TO_NONE:
3125                 opcode = RESOURCE_OPCODE_REQ_WO_AGING;
3126                 p_params->timeout = 0;
3127                 break;
3128         default:
3129                 opcode = RESOURCE_OPCODE_REQ_W_AGING;
3130                 break;
3131         }
3132
3133         ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3134         ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3135         ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
3136
3137         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3138                    "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
3139                    param, p_params->timeout, opcode, p_params->resource);
3140
3141         /* Attempt to acquire the resource */
3142         rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
3143                                     &mcp_param);
3144         if (rc != ECORE_SUCCESS)
3145                 return rc;
3146
3147         /* Analyze the response */
3148         p_params->owner = ECORE_MFW_GET_FIELD(mcp_param,
3149                                              RESOURCE_CMD_RSP_OWNER);
3150         opcode = ECORE_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3151
3152         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3153                    "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
3154                    mcp_param, opcode, p_params->owner);
3155
3156         switch (opcode) {
3157         case RESOURCE_OPCODE_GNT:
3158                 p_params->b_granted = true;
3159                 break;
3160         case RESOURCE_OPCODE_BUSY:
3161                 p_params->b_granted = false;
3162                 break;
3163         default:
3164                 DP_NOTICE(p_hwfn, false,
3165                           "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
3166                           mcp_param, opcode);
3167                 return ECORE_INVAL;
3168         }
3169
3170         return ECORE_SUCCESS;
3171 }
3172
3173 enum _ecore_status_t
3174 ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3175                     struct ecore_resc_lock_params *p_params)
3176 {
3177         u32 retry_cnt = 0;
3178         enum _ecore_status_t rc;
3179
3180         do {
3181                 /* No need for an interval before the first iteration */
3182                 if (retry_cnt) {
3183                         if (p_params->sleep_b4_retry) {
3184                                 u16 retry_interval_in_ms =
3185                                         DIV_ROUND_UP(p_params->retry_interval,
3186                                                      1000);
3187
3188                                 OSAL_MSLEEP(retry_interval_in_ms);
3189                         } else {
3190                                 OSAL_UDELAY(p_params->retry_interval);
3191                         }
3192                 }
3193
3194                 rc = __ecore_mcp_resc_lock(p_hwfn, p_ptt, p_params);
3195                 if (rc != ECORE_SUCCESS)
3196                         return rc;
3197
3198                 if (p_params->b_granted)
3199                         break;
3200         } while (retry_cnt++ < p_params->retry_num);
3201
3202         return ECORE_SUCCESS;
3203 }
3204
3205 enum _ecore_status_t
3206 ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3207                       struct ecore_resc_unlock_params *p_params)
3208 {
3209         u32 param = 0, mcp_resp, mcp_param;
3210         u8 opcode;
3211         enum _ecore_status_t rc;
3212
3213         opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
3214                                    : RESOURCE_OPCODE_RELEASE;
3215         ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3216         ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3217
3218         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3219                    "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
3220                    param, opcode, p_params->resource);
3221
3222         /* Attempt to release the resource */
3223         rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
3224                                     &mcp_param);
3225         if (rc != ECORE_SUCCESS)
3226                 return rc;
3227
3228         /* Analyze the response */
3229         opcode = ECORE_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3230
3231         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3232                    "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
3233                    mcp_param, opcode);
3234
3235         switch (opcode) {
3236         case RESOURCE_OPCODE_RELEASED_PREVIOUS:
3237                 DP_INFO(p_hwfn,
3238                         "Resource unlock request for an already released resource [%d]\n",
3239                         p_params->resource);
3240                 /* Fallthrough */
3241         case RESOURCE_OPCODE_RELEASED:
3242                 p_params->b_released = true;
3243                 break;
3244         case RESOURCE_OPCODE_WRONG_OWNER:
3245                 p_params->b_released = false;
3246                 break;
3247         default:
3248                 DP_NOTICE(p_hwfn, false,
3249                           "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
3250                           mcp_param, opcode);
3251                 return ECORE_INVAL;
3252         }
3253
3254         return ECORE_SUCCESS;
3255 }
3256
3257 bool ecore_mcp_is_smart_an_supported(struct ecore_hwfn *p_hwfn)
3258 {
3259         return !!(p_hwfn->mcp_info->capabilities &
3260                   FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ);
3261 }
3262
3263 enum _ecore_status_t ecore_mcp_get_capabilities(struct ecore_hwfn *p_hwfn,
3264                                                 struct ecore_ptt *p_ptt)
3265 {
3266         u32 mcp_resp;
3267         enum _ecore_status_t rc;
3268
3269         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT,
3270                            0, &mcp_resp, &p_hwfn->mcp_info->capabilities);
3271         if (rc == ECORE_SUCCESS)
3272                 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_PROBE),
3273                            "MFW supported features: %08x\n",
3274                            p_hwfn->mcp_info->capabilities);
3275
3276         return rc;
3277 }
3278
3279 enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn,
3280                                                 struct ecore_ptt *p_ptt)
3281 {
3282         u32 mcp_resp, mcp_param, features;
3283
3284         features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ;
3285
3286         return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
3287                              features, &mcp_resp, &mcp_param);
3288 }