net/qede/base: support previous driver unload
[dpdk.git] / drivers / net / qede / base / ecore_mcp.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "bcm_osal.h"
10 #include "ecore.h"
11 #include "ecore_status.h"
12 #include "ecore_mcp.h"
13 #include "mcp_public.h"
14 #include "reg_addr.h"
15 #include "ecore_hw.h"
16 #include "ecore_init_fw_funcs.h"
17 #include "ecore_sriov.h"
18 #include "ecore_vf.h"
19 #include "ecore_iov_api.h"
20 #include "ecore_gtt_reg_addr.h"
21 #include "ecore_iro.h"
22 #include "ecore_dcbx.h"
23
24 #define CHIP_MCP_RESP_ITER_US 10
25 #define EMUL_MCP_RESP_ITER_US (1000 * 1000)
26
27 #define ECORE_DRV_MB_MAX_RETRIES (500 * 1000)   /* Account for 5 sec */
28 #define ECORE_MCP_RESET_RETRIES (50 * 1000)     /* Account for 500 msec */
29
30 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
31         ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
32                  _val)
33
34 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
35         ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
36
37 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
38         DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
39                      OFFSETOF(struct public_drv_mb, _field), _val)
40
41 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
42         DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
43                      OFFSETOF(struct public_drv_mb, _field))
44
45 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
46         DRV_ID_PDA_COMP_VER_SHIFT)
47
48 #define MCP_BYTES_PER_MBIT_SHIFT 17
49
50 #ifndef ASIC_ONLY
51 static int loaded;
52 static int loaded_port[MAX_NUM_PORTS] = { 0 };
53 #endif
54
55 bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn)
56 {
57         if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
58                 return false;
59         return true;
60 }
61
62 void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
63 {
64         u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
65                                         PUBLIC_PORT);
66         u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr);
67
68         p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
69                                                    MFW_PORT(p_hwfn));
70         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
71                    "port_addr = 0x%x, port_id 0x%02x\n",
72                    p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
73 }
74
75 void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
76 {
77         u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
78         OSAL_BE32 tmp;
79         u32 i;
80
81 #ifndef ASIC_ONLY
82         if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev))
83                 return;
84 #endif
85
86         if (!p_hwfn->mcp_info->public_base)
87                 return;
88
89         for (i = 0; i < length; i++) {
90                 tmp = ecore_rd(p_hwfn, p_ptt,
91                                p_hwfn->mcp_info->mfw_mb_addr +
92                                (i << 2) + sizeof(u32));
93
94                 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
95                     OSAL_BE32_TO_CPU(tmp);
96         }
97 }
98
99 enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
100 {
101         if (p_hwfn->mcp_info) {
102                 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
103                 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
104                 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->lock);
105         }
106         OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
107
108         return ECORE_SUCCESS;
109 }
110
111 static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
112                                                    struct ecore_ptt *p_ptt)
113 {
114         struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
115         u32 drv_mb_offsize, mfw_mb_offsize;
116         u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
117
118 #ifndef ASIC_ONLY
119         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
120                 DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n");
121                 p_info->public_base = 0;
122                 return ECORE_INVAL;
123         }
124 #endif
125
126         p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
127         if (!p_info->public_base)
128                 return ECORE_INVAL;
129
130         p_info->public_base |= GRCBASE_MCP;
131
132         /* Calculate the driver and MFW mailbox address */
133         drv_mb_offsize = ecore_rd(p_hwfn, p_ptt,
134                                   SECTION_OFFSIZE_ADDR(p_info->public_base,
135                                                        PUBLIC_DRV_MB));
136         p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
137         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
138                    "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x"
139                    " mcp_pf_id = 0x%x\n",
140                    drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
141
142         /* Set the MFW MB address */
143         mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
144                                   SECTION_OFFSIZE_ADDR(p_info->public_base,
145                                                        PUBLIC_MFW_MB));
146         p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
147         p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
148                                                p_info->mfw_mb_addr);
149
150         /* Get the current driver mailbox sequence before sending
151          * the first command
152          */
153         p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
154             DRV_MSG_SEQ_NUMBER_MASK;
155
156         /* Get current FW pulse sequence */
157         p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
158             DRV_PULSE_SEQ_MASK;
159
160         p_info->mcp_hist = (u16)ecore_rd(p_hwfn, p_ptt,
161                                           MISCS_REG_GENERIC_POR_0);
162
163         return ECORE_SUCCESS;
164 }
165
166 enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
167                                         struct ecore_ptt *p_ptt)
168 {
169         struct ecore_mcp_info *p_info;
170         u32 size;
171
172         /* Allocate mcp_info structure */
173         p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
174                                        sizeof(*p_hwfn->mcp_info));
175         if (!p_hwfn->mcp_info)
176                 goto err;
177         p_info = p_hwfn->mcp_info;
178
179         if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
180                 DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
181                 /* Do not free mcp_info here, since public_base indicate that
182                  * the MCP is not initialized
183                  */
184                 return ECORE_SUCCESS;
185         }
186
187         size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
188         p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
189         p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
190         if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
191                 goto err;
192
193         /* Initialize the MFW spinlock */
194         OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->lock);
195         OSAL_SPIN_LOCK_INIT(&p_info->lock);
196
197         return ECORE_SUCCESS;
198
199 err:
200         DP_NOTICE(p_hwfn, true, "Failed to allocate mcp memory\n");
201         ecore_mcp_free(p_hwfn);
202         return ECORE_NOMEM;
203 }
204
205 /* Locks the MFW mailbox of a PF to ensure a single access.
206  * The lock is achieved in most cases by holding a spinlock, causing other
207  * threads to wait till a previous access is done.
208  * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single
209  * access is achieved by setting a blocking flag, which will fail other
210  * competing contexts to send their mailboxes.
211  */
212 static enum _ecore_status_t ecore_mcp_mb_lock(struct ecore_hwfn *p_hwfn,
213                                               u32 cmd)
214 {
215         OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
216
217         /* The spinlock shouldn't be acquired when the mailbox command is
218          * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel
219          * pending [UN]LOAD_REQ command of another PF together with a spinlock
220          * (i.e. interrupts are disabled) - can lead to a deadlock.
221          * It is assumed that for a single PF, no other mailbox commands can be
222          * sent from another context while sending LOAD_REQ, and that any
223          * parallel commands to UNLOAD_REQ can be cancelled.
224          */
225         if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE)
226                 p_hwfn->mcp_info->block_mb_sending = false;
227
228         if (p_hwfn->mcp_info->block_mb_sending) {
229                 DP_NOTICE(p_hwfn, false,
230                           "Trying to send a MFW mailbox command [0x%x]"
231                           " in parallel to [UN]LOAD_REQ. Aborting.\n",
232                           cmd);
233                 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
234                 return ECORE_BUSY;
235         }
236
237         if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) {
238                 p_hwfn->mcp_info->block_mb_sending = true;
239                 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
240         }
241
242         return ECORE_SUCCESS;
243 }
244
245 static void ecore_mcp_mb_unlock(struct ecore_hwfn *p_hwfn, u32 cmd)
246 {
247         if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
248                 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
249 }
250
251 enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
252                                      struct ecore_ptt *p_ptt)
253 {
254         u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
255         u32 delay = CHIP_MCP_RESP_ITER_US;
256         u32 org_mcp_reset_seq, cnt = 0;
257         enum _ecore_status_t rc = ECORE_SUCCESS;
258
259 #ifndef ASIC_ONLY
260         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
261                 delay = EMUL_MCP_RESP_ITER_US;
262 #endif
263
264         /* Ensure that only a single thread is accessing the mailbox at a
265          * certain time.
266          */
267         rc = ecore_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
268         if (rc != ECORE_SUCCESS)
269                 return rc;
270
271         /* Set drv command along with the updated sequence */
272         org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
273         DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
274
275         do {
276                 /* Wait for MFW response */
277                 OSAL_UDELAY(delay);
278                 /* Give the FW up to 500 second (50*1000*10usec) */
279         } while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt,
280                                                 MISCS_REG_GENERIC_POR_0)) &&
281                  (cnt++ < ECORE_MCP_RESET_RETRIES));
282
283         if (org_mcp_reset_seq !=
284             ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
285                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
286                            "MCP was reset after %d usec\n", cnt * delay);
287         } else {
288                 DP_ERR(p_hwfn, "Failed to reset MCP\n");
289                 rc = ECORE_AGAIN;
290         }
291
292         ecore_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
293
294         return rc;
295 }
296
297 static enum _ecore_status_t ecore_do_mcp_cmd(struct ecore_hwfn *p_hwfn,
298                                              struct ecore_ptt *p_ptt,
299                                              u32 cmd, u32 param,
300                                              u32 *o_mcp_resp,
301                                              u32 *o_mcp_param)
302 {
303         u32 delay = CHIP_MCP_RESP_ITER_US;
304         u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
305         u32 seq, cnt = 1, actual_mb_seq;
306         enum _ecore_status_t rc = ECORE_SUCCESS;
307
308 #ifndef ASIC_ONLY
309         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
310                 delay = EMUL_MCP_RESP_ITER_US;
311         /* There is a built-in delay of 100usec in each MFW response read */
312         if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
313                 max_retries /= 10;
314 #endif
315
316         /* Get actual driver mailbox sequence */
317         actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
318             DRV_MSG_SEQ_NUMBER_MASK;
319
320         /* Use MCP history register to check if MCP reset occurred between
321          * init time and now.
322          */
323         if (p_hwfn->mcp_info->mcp_hist !=
324             ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
325                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Rereading MCP offsets\n");
326                 ecore_load_mcp_offsets(p_hwfn, p_ptt);
327                 ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
328         }
329         seq = ++p_hwfn->mcp_info->drv_mb_seq;
330
331         /* Set drv param */
332         DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
333
334         /* Set drv command along with the updated sequence */
335         DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
336
337         do {
338                 /* Wait for MFW response */
339                 OSAL_UDELAY(delay);
340                 *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
341
342                 /* Give the FW up to 5 second (500*10ms) */
343         } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
344                  (cnt++ < max_retries));
345
346         /* Is this a reply to our command? */
347         if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
348                 *o_mcp_resp &= FW_MSG_CODE_MASK;
349                 /* Get the MCP param */
350                 *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
351         } else {
352                 /* FW BUG! */
353                 DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
354                        cmd, param);
355                 *o_mcp_resp = 0;
356                 rc = ECORE_AGAIN;
357                 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
358         }
359         return rc;
360 }
361
362 static enum _ecore_status_t
363 ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
364                         struct ecore_ptt *p_ptt,
365                         struct ecore_mcp_mb_params *p_mb_params)
366 {
367         u32 union_data_addr;
368         enum _ecore_status_t rc;
369
370         /* MCP not initialized */
371         if (!ecore_mcp_is_init(p_hwfn)) {
372                 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
373                 return ECORE_BUSY;
374         }
375
376         union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
377                           OFFSETOF(struct public_drv_mb, union_data);
378
379         /* Ensure that only a single thread is accessing the mailbox at a
380          * certain time.
381          */
382         rc = ecore_mcp_mb_lock(p_hwfn, p_mb_params->cmd);
383         if (rc != ECORE_SUCCESS)
384                 return rc;
385
386         if (p_mb_params->p_data_src != OSAL_NULL)
387                 ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr,
388                                 p_mb_params->p_data_src,
389                                 sizeof(*p_mb_params->p_data_src));
390
391         rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
392                               p_mb_params->param, &p_mb_params->mcp_resp,
393                               &p_mb_params->mcp_param);
394
395         if (p_mb_params->p_data_dst != OSAL_NULL)
396                 ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
397                                   union_data_addr,
398                                   sizeof(*p_mb_params->p_data_dst));
399
400         ecore_mcp_mb_unlock(p_hwfn, p_mb_params->cmd);
401
402         return rc;
403 }
404
405 enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
406                                    struct ecore_ptt *p_ptt, u32 cmd, u32 param,
407                                    u32 *o_mcp_resp, u32 *o_mcp_param)
408 {
409         struct ecore_mcp_mb_params mb_params;
410         enum _ecore_status_t rc;
411
412 #ifndef ASIC_ONLY
413         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
414                 if (cmd == DRV_MSG_CODE_UNLOAD_REQ) {
415                         loaded--;
416                         loaded_port[p_hwfn->port_id]--;
417                         DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n",
418                                    loaded);
419                 }
420                 return ECORE_SUCCESS;
421         }
422 #endif
423
424         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
425         mb_params.cmd = cmd;
426         mb_params.param = param;
427         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
428         if (rc != ECORE_SUCCESS)
429                 return rc;
430
431         *o_mcp_resp = mb_params.mcp_resp;
432         *o_mcp_param = mb_params.mcp_param;
433
434         return ECORE_SUCCESS;
435 }
436
437 enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
438                                           struct ecore_ptt *p_ptt,
439                                           u32 cmd,
440                                           u32 param,
441                                           u32 *o_mcp_resp,
442                                           u32 *o_mcp_param,
443                                           u32 i_txn_size, u32 *i_buf)
444 {
445         struct ecore_mcp_mb_params mb_params;
446         union drv_union_data union_data;
447         enum _ecore_status_t rc;
448
449         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
450         mb_params.cmd = cmd;
451         mb_params.param = param;
452         OSAL_MEMCPY((u32 *)&union_data.raw_data, i_buf, i_txn_size);
453         mb_params.p_data_src = &union_data;
454         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
455         if (rc != ECORE_SUCCESS)
456                 return rc;
457
458         *o_mcp_resp = mb_params.mcp_resp;
459         *o_mcp_param = mb_params.mcp_param;
460
461         return ECORE_SUCCESS;
462 }
463
464 enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
465                                           struct ecore_ptt *p_ptt,
466                                           u32 cmd,
467                                           u32 param,
468                                           u32 *o_mcp_resp,
469                                           u32 *o_mcp_param,
470                                           u32 *o_txn_size, u32 *o_buf)
471 {
472         struct ecore_mcp_mb_params mb_params;
473         union drv_union_data union_data;
474         enum _ecore_status_t rc;
475
476         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
477         mb_params.cmd = cmd;
478         mb_params.param = param;
479         mb_params.p_data_dst = &union_data;
480         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
481         if (rc != ECORE_SUCCESS)
482                 return rc;
483
484         *o_mcp_resp = mb_params.mcp_resp;
485         *o_mcp_param = mb_params.mcp_param;
486
487         *o_txn_size = *o_mcp_param;
488         OSAL_MEMCPY(o_buf, (u32 *)&union_data.raw_data, *o_txn_size);
489
490         return ECORE_SUCCESS;
491 }
492
493 #ifndef ASIC_ONLY
494 static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
495                                     u32 *p_load_code)
496 {
497         static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
498
499         if (!loaded)
500                 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
501         else if (!loaded_port[p_hwfn->port_id])
502                 load_phase = FW_MSG_CODE_DRV_LOAD_PORT;
503         else
504                 load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
505
506         /* On CMT, always tell that it's engine */
507         if (p_hwfn->p_dev->num_hwfns > 1)
508                 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
509
510         *p_load_code = load_phase;
511         loaded++;
512         loaded_port[p_hwfn->port_id]++;
513
514         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
515                    "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n",
516                    *p_load_code, loaded, p_hwfn->port_id,
517                    loaded_port[p_hwfn->port_id]);
518 }
519 #endif
520
521 static bool ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role)
522 {
523         return (drv_role == DRV_ROLE_OS &&
524                 exist_drv_role == DRV_ROLE_PREBOOT) ||
525                (drv_role == DRV_ROLE_KDUMP && exist_drv_role == DRV_ROLE_OS);
526 }
527
528 static enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn,
529                                                       struct ecore_ptt *p_ptt)
530 {
531         u32 resp = 0, param = 0;
532         enum _ecore_status_t rc;
533
534         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
535                            &resp, &param);
536         if (rc != ECORE_SUCCESS)
537                 DP_NOTICE(p_hwfn, false,
538                           "Failed to send cancel load request, rc = %d\n", rc);
539
540         return rc;
541 }
542
543 #define CONFIG_ECORE_L2_BITMAP_IDX      (0x1 << 0)
544 #define CONFIG_ECORE_SRIOV_BITMAP_IDX   (0x1 << 1)
545 #define CONFIG_ECORE_ROCE_BITMAP_IDX    (0x1 << 2)
546 #define CONFIG_ECORE_IWARP_BITMAP_IDX   (0x1 << 3)
547 #define CONFIG_ECORE_FCOE_BITMAP_IDX    (0x1 << 4)
548 #define CONFIG_ECORE_ISCSI_BITMAP_IDX   (0x1 << 5)
549 #define CONFIG_ECORE_LL2_BITMAP_IDX     (0x1 << 6)
550
551 static u32 ecore_get_config_bitmap(void)
552 {
553         u32 config_bitmap = 0x0;
554
555 #ifdef CONFIG_ECORE_L2
556         config_bitmap |= CONFIG_ECORE_L2_BITMAP_IDX;
557 #endif
558 #ifdef CONFIG_ECORE_SRIOV
559         config_bitmap |= CONFIG_ECORE_SRIOV_BITMAP_IDX;
560 #endif
561 #ifdef CONFIG_ECORE_ROCE
562         config_bitmap |= CONFIG_ECORE_ROCE_BITMAP_IDX;
563 #endif
564 #ifdef CONFIG_ECORE_IWARP
565         config_bitmap |= CONFIG_ECORE_IWARP_BITMAP_IDX;
566 #endif
567 #ifdef CONFIG_ECORE_FCOE
568         config_bitmap |= CONFIG_ECORE_FCOE_BITMAP_IDX;
569 #endif
570 #ifdef CONFIG_ECORE_ISCSI
571         config_bitmap |= CONFIG_ECORE_ISCSI_BITMAP_IDX;
572 #endif
573 #ifdef CONFIG_ECORE_LL2
574         config_bitmap |= CONFIG_ECORE_LL2_BITMAP_IDX;
575 #endif
576
577         return config_bitmap;
578 }
579
580 struct ecore_load_req_in_params {
581         u8 hsi_ver;
582 #define ECORE_LOAD_REQ_HSI_VER_DEFAULT  0
583 #define ECORE_LOAD_REQ_HSI_VER_1        1
584         u32 drv_ver_0;
585         u32 drv_ver_1;
586         u32 fw_ver;
587         u8 drv_role;
588         u8 timeout_val;
589         u8 force_cmd;
590         bool avoid_eng_reset;
591 };
592
593 struct ecore_load_req_out_params {
594         u32 load_code;
595         u32 exist_drv_ver_0;
596         u32 exist_drv_ver_1;
597         u32 exist_fw_ver;
598         u8 exist_drv_role;
599         u8 mfw_hsi_ver;
600         bool drv_exists;
601 };
602
603 static enum _ecore_status_t
604 __ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
605                      struct ecore_load_req_in_params *p_in_params,
606                      struct ecore_load_req_out_params *p_out_params)
607 {
608         union drv_union_data union_data_src, union_data_dst;
609         struct ecore_mcp_mb_params mb_params;
610         struct load_req_stc *p_load_req;
611         struct load_rsp_stc *p_load_rsp;
612         u32 hsi_ver;
613         enum _ecore_status_t rc;
614
615         p_load_req = &union_data_src.load_req;
616         OSAL_MEM_ZERO(p_load_req, sizeof(*p_load_req));
617         p_load_req->drv_ver_0 = p_in_params->drv_ver_0;
618         p_load_req->drv_ver_1 = p_in_params->drv_ver_1;
619         p_load_req->fw_ver = p_in_params->fw_ver;
620         ECORE_MFW_SET_FIELD(p_load_req->misc0, LOAD_REQ_ROLE,
621                             p_in_params->drv_role);
622         ECORE_MFW_SET_FIELD(p_load_req->misc0, LOAD_REQ_LOCK_TO,
623                             p_in_params->timeout_val);
624         ECORE_MFW_SET_FIELD(p_load_req->misc0, LOAD_REQ_FORCE,
625                             p_in_params->force_cmd);
626         ECORE_MFW_SET_FIELD(p_load_req->misc0, LOAD_REQ_FLAGS0,
627                             p_in_params->avoid_eng_reset);
628
629         hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ?
630                   DRV_ID_MCP_HSI_VER_CURRENT :
631                   (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT);
632
633         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
634         mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
635         mb_params.param = PDA_COMP | hsi_ver | p_hwfn->p_dev->drv_type;
636         mb_params.p_data_src = &union_data_src;
637         mb_params.p_data_dst = &union_data_dst;
638
639         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
640                    "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
641                    mb_params.param,
642                    ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
643                    ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
644                    ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
645                    ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
646
647         if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1)
648                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
649                            "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
650                            p_load_req->drv_ver_0, p_load_req->drv_ver_1,
651                            p_load_req->fw_ver, p_load_req->misc0,
652                            ECORE_MFW_GET_FIELD(p_load_req->misc0,
653                                                LOAD_REQ_ROLE),
654                            ECORE_MFW_GET_FIELD(p_load_req->misc0,
655                                                LOAD_REQ_LOCK_TO),
656                            ECORE_MFW_GET_FIELD(p_load_req->misc0,
657                                                LOAD_REQ_FORCE),
658                            ECORE_MFW_GET_FIELD(p_load_req->misc0,
659                                                LOAD_REQ_FLAGS0));
660
661         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
662         if (rc != ECORE_SUCCESS) {
663                 DP_NOTICE(p_hwfn, false,
664                           "Failed to send load request, rc = %d\n", rc);
665                 return rc;
666         }
667
668         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
669                    "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
670         p_out_params->load_code = mb_params.mcp_resp;
671
672         if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
673             p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
674                 p_load_rsp = &union_data_dst.load_rsp;
675                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
676                            "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
677                            p_load_rsp->drv_ver_0, p_load_rsp->drv_ver_1,
678                            p_load_rsp->fw_ver, p_load_rsp->misc0,
679                            ECORE_MFW_GET_FIELD(p_load_rsp->misc0,
680                                                LOAD_RSP_ROLE),
681                            ECORE_MFW_GET_FIELD(p_load_rsp->misc0,
682                                                LOAD_RSP_HSI),
683                            ECORE_MFW_GET_FIELD(p_load_rsp->misc0,
684                                                LOAD_RSP_FLAGS0));
685
686                 p_out_params->exist_drv_ver_0 = p_load_rsp->drv_ver_0;
687                 p_out_params->exist_drv_ver_1 = p_load_rsp->drv_ver_1;
688                 p_out_params->exist_fw_ver = p_load_rsp->fw_ver;
689                 p_out_params->exist_drv_role =
690                         ECORE_MFW_GET_FIELD(p_load_rsp->misc0, LOAD_RSP_ROLE);
691                 p_out_params->mfw_hsi_ver =
692                         ECORE_MFW_GET_FIELD(p_load_rsp->misc0, LOAD_RSP_HSI);
693                 p_out_params->drv_exists =
694                         ECORE_MFW_GET_FIELD(p_load_rsp->misc0,
695                                             LOAD_RSP_FLAGS0) &
696                         LOAD_RSP_FLAGS0_DRV_EXISTS;
697         }
698
699         return ECORE_SUCCESS;
700 }
701
702 static enum _ecore_status_t eocre_get_mfw_drv_role(struct ecore_hwfn *p_hwfn,
703                                                    enum ecore_drv_role drv_role,
704                                                    u8 *p_mfw_drv_role)
705 {
706         switch (drv_role) {
707         case ECORE_DRV_ROLE_OS:
708                 *p_mfw_drv_role = DRV_ROLE_OS;
709                 break;
710         case ECORE_DRV_ROLE_KDUMP:
711                 *p_mfw_drv_role = DRV_ROLE_KDUMP;
712                 break;
713         default:
714                 DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role);
715                 return ECORE_INVAL;
716         }
717
718         return ECORE_SUCCESS;
719 }
720
721 enum ecore_load_req_force {
722         ECORE_LOAD_REQ_FORCE_NONE,
723         ECORE_LOAD_REQ_FORCE_PF,
724         ECORE_LOAD_REQ_FORCE_ALL,
725 };
726
727 static enum _ecore_status_t
728 ecore_get_mfw_force_cmd(struct ecore_hwfn *p_hwfn,
729                         enum ecore_load_req_force force_cmd,
730                         u8 *p_mfw_force_cmd)
731 {
732         switch (force_cmd) {
733         case ECORE_LOAD_REQ_FORCE_NONE:
734                 *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
735                 break;
736         case ECORE_LOAD_REQ_FORCE_PF:
737                 *p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
738                 break;
739         case ECORE_LOAD_REQ_FORCE_ALL:
740                 *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
741                 break;
742         default:
743                 DP_ERR(p_hwfn, "Unexpected force value %d\n", force_cmd);
744                 return ECORE_INVAL;
745         }
746
747         return ECORE_SUCCESS;
748 }
749
750 enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
751                                         struct ecore_ptt *p_ptt,
752                                         struct ecore_load_req_params *p_params)
753 {
754         struct ecore_load_req_out_params out_params;
755         struct ecore_load_req_in_params in_params;
756         u8 mfw_drv_role, mfw_force_cmd;
757         enum _ecore_status_t rc;
758
759 #ifndef ASIC_ONLY
760         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
761                 ecore_mcp_mf_workaround(p_hwfn, &p_params->load_code);
762                 return ECORE_SUCCESS;
763         }
764 #endif
765
766         OSAL_MEM_ZERO(&in_params, sizeof(in_params));
767         in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT;
768         in_params.drv_ver_0 = ECORE_VERSION;
769         in_params.drv_ver_1 = ecore_get_config_bitmap();
770         in_params.fw_ver = STORM_FW_VERSION;
771         rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
772         if (rc != ECORE_SUCCESS)
773                 return rc;
774
775         in_params.drv_role = mfw_drv_role;
776         in_params.timeout_val = p_params->timeout_val;
777         rc = ecore_get_mfw_force_cmd(p_hwfn, ECORE_LOAD_REQ_FORCE_NONE,
778                                      &mfw_force_cmd);
779         if (rc != ECORE_SUCCESS)
780                 return rc;
781
782         in_params.force_cmd = mfw_force_cmd;
783         in_params.avoid_eng_reset = p_params->avoid_eng_reset;
784
785         OSAL_MEM_ZERO(&out_params, sizeof(out_params));
786         rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
787         if (rc != ECORE_SUCCESS)
788                 return rc;
789
790         /* First handle cases where another load request should/might be sent:
791          * - MFW expects the old interface [HSI version = 1]
792          * - MFW responds that a force load request is required
793          */
794         if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
795                 DP_INFO(p_hwfn,
796                         "MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n");
797
798                 /* The previous load request set the mailbox blocking */
799                 p_hwfn->mcp_info->block_mb_sending = false;
800
801                 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1;
802                 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
803                 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
804                                           &out_params);
805                 if (rc != ECORE_SUCCESS)
806                         return rc;
807         } else if (out_params.load_code ==
808                    FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
809                 /* The previous load request set the mailbox blocking */
810                 p_hwfn->mcp_info->block_mb_sending = false;
811
812                 if (ecore_mcp_can_force_load(in_params.drv_role,
813                                              out_params.exist_drv_role)) {
814                         DP_INFO(p_hwfn,
815                                 "A force load is required [existing: role %d, fw_ver 0x%08x, drv_ver 0x%08x_0x%08x]. Sending a force load request.\n",
816                                 out_params.exist_drv_role,
817                                 out_params.exist_fw_ver,
818                                 out_params.exist_drv_ver_0,
819                                 out_params.exist_drv_ver_1);
820
821                         rc = ecore_get_mfw_force_cmd(p_hwfn,
822                                                      ECORE_LOAD_REQ_FORCE_ALL,
823                                                      &mfw_force_cmd);
824                         if (rc != ECORE_SUCCESS)
825                                 return rc;
826
827                         in_params.force_cmd = mfw_force_cmd;
828                         OSAL_MEM_ZERO(&out_params, sizeof(out_params));
829                         rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
830                                                   &out_params);
831                         if (rc != ECORE_SUCCESS)
832                                 return rc;
833                 } else {
834                         DP_NOTICE(p_hwfn, false,
835                                   "A force load is required [existing: role %d, fw_ver 0x%08x, drv_ver 0x%08x_0x%08x]. Avoiding to prevent disruption of active PFs.\n",
836                                   out_params.exist_drv_role,
837                                   out_params.exist_fw_ver,
838                                   out_params.exist_drv_ver_0,
839                                   out_params.exist_drv_ver_1);
840
841                         ecore_mcp_cancel_load_req(p_hwfn, p_ptt);
842                         return ECORE_BUSY;
843                 }
844         }
845
846         /* Now handle the other types of responses.
847          * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
848          * expected here after the additional revised load requests were sent.
849          */
850         switch (out_params.load_code) {
851         case FW_MSG_CODE_DRV_LOAD_ENGINE:
852         case FW_MSG_CODE_DRV_LOAD_PORT:
853         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
854                 if (out_params.mfw_hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
855                     out_params.drv_exists) {
856                         /* The role and fw/driver version match, but the PF is
857                          * already loaded and has not been unloaded gracefully.
858                          * This is unexpected since a quasi-FLR request was
859                          * previously sent as part of ecore_hw_prepare().
860                          */
861                         DP_NOTICE(p_hwfn, false,
862                                   "PF is already loaded - shouldn't have got here since a quasi-FLR request was previously sent!\n");
863                         return ECORE_INVAL;
864                 }
865                 break;
866         case FW_MSG_CODE_DRV_LOAD_REFUSED_PDA:
867         case FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG:
868         case FW_MSG_CODE_DRV_LOAD_REFUSED_HSI:
869         case FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT:
870                 DP_NOTICE(p_hwfn, false,
871                           "MFW refused a load request [resp 0x%08x]. Aborting.\n",
872                           out_params.load_code);
873                 return ECORE_BUSY;
874         default:
875                 DP_NOTICE(p_hwfn, false,
876                           "Unexpected response to load request [resp 0x%08x]. Aborting.\n",
877                           out_params.load_code);
878                 break;
879         }
880
881         p_params->load_code = out_params.load_code;
882
883         return ECORE_SUCCESS;
884 }
885
886 static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
887                                     struct ecore_ptt *p_ptt)
888 {
889         u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
890                                         PUBLIC_PATH);
891         u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
892         u32 path_addr = SECTION_ADDR(mfw_path_offsize,
893                                      ECORE_PATH_ID(p_hwfn));
894         u32 disabled_vfs[VF_MAX_STATIC / 32];
895         int i;
896
897         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
898                    "Reading Disabled VF information from [offset %08x],"
899                    " path_addr %08x\n",
900                    mfw_path_offsize, path_addr);
901
902         for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
903                 disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,
904                                            path_addr +
905                                            OFFSETOF(struct public_path,
906                                                     mcp_vf_disabled) +
907                                            sizeof(u32) * i);
908                 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
909                            "FLR-ed VFs [%08x,...,%08x] - %08x\n",
910                            i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
911         }
912
913         if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs))
914                 OSAL_VF_FLR_UPDATE(p_hwfn);
915 }
916
917 enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
918                                           struct ecore_ptt *p_ptt,
919                                           u32 *vfs_to_ack)
920 {
921         u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
922                                         PUBLIC_FUNC);
923         u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);
924         u32 func_addr = SECTION_ADDR(mfw_func_offsize,
925                                      MCP_PF_ID(p_hwfn));
926         struct ecore_mcp_mb_params mb_params;
927         union drv_union_data union_data;
928         enum _ecore_status_t rc;
929         int i;
930
931         for (i = 0; i < (VF_MAX_STATIC / 32); i++)
932                 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
933                            "Acking VFs [%08x,...,%08x] - %08x\n",
934                            i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
935
936         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
937         mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
938         OSAL_MEMCPY(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8);
939         mb_params.p_data_src = &union_data;
940         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt,
941                                      &mb_params);
942         if (rc != ECORE_SUCCESS) {
943                 DP_NOTICE(p_hwfn, false,
944                           "Failed to pass ACK for VF flr to MFW\n");
945                 return ECORE_TIMEOUT;
946         }
947
948         /* TMP - clear the ACK bits; should be done by MFW */
949         for (i = 0; i < (VF_MAX_STATIC / 32); i++)
950                 ecore_wr(p_hwfn, p_ptt,
951                          func_addr +
952                          OFFSETOF(struct public_func, drv_ack_vf_disabled) +
953                          i * sizeof(u32), 0);
954
955         return rc;
956 }
957
958 static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
959                                                 struct ecore_ptt *p_ptt)
960 {
961         u32 transceiver_state;
962
963         transceiver_state = ecore_rd(p_hwfn, p_ptt,
964                                      p_hwfn->mcp_info->port_addr +
965                                      OFFSETOF(struct public_port,
966                                               transceiver_data));
967
968         DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP),
969                    "Received transceiver state update [0x%08x] from mfw"
970                    " [Addr 0x%x]\n",
971                    transceiver_state, (u32)(p_hwfn->mcp_info->port_addr +
972                                             OFFSETOF(struct public_port,
973                                                      transceiver_data)));
974
975         transceiver_state = GET_FIELD(transceiver_state, ETH_TRANSCEIVER_STATE);
976
977         if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
978                 DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
979         else
980                 DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
981 }
982
983 static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
984                                          struct ecore_ptt *p_ptt,
985                                          bool b_reset)
986 {
987         struct ecore_mcp_link_state *p_link;
988         u8 max_bw, min_bw;
989         u32 status = 0;
990
991         p_link = &p_hwfn->mcp_info->link_output;
992         OSAL_MEMSET(p_link, 0, sizeof(*p_link));
993         if (!b_reset) {
994                 status = ecore_rd(p_hwfn, p_ptt,
995                                   p_hwfn->mcp_info->port_addr +
996                                   OFFSETOF(struct public_port, link_status));
997                 DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP),
998                            "Received link update [0x%08x] from mfw"
999                            " [Addr 0x%x]\n",
1000                            status, (u32)(p_hwfn->mcp_info->port_addr +
1001                                           OFFSETOF(struct public_port,
1002                                                    link_status)));
1003         } else {
1004                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1005                            "Resetting link indications\n");
1006                 return;
1007         }
1008
1009         if (p_hwfn->b_drv_link_init)
1010                 p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1011         else
1012                 p_link->link_up = false;
1013
1014         p_link->full_duplex = true;
1015         switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
1016         case LINK_STATUS_SPEED_AND_DUPLEX_100G:
1017                 p_link->speed = 100000;
1018                 break;
1019         case LINK_STATUS_SPEED_AND_DUPLEX_50G:
1020                 p_link->speed = 50000;
1021                 break;
1022         case LINK_STATUS_SPEED_AND_DUPLEX_40G:
1023                 p_link->speed = 40000;
1024                 break;
1025         case LINK_STATUS_SPEED_AND_DUPLEX_25G:
1026                 p_link->speed = 25000;
1027                 break;
1028         case LINK_STATUS_SPEED_AND_DUPLEX_20G:
1029                 p_link->speed = 20000;
1030                 break;
1031         case LINK_STATUS_SPEED_AND_DUPLEX_10G:
1032                 p_link->speed = 10000;
1033                 break;
1034         case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
1035                 p_link->full_duplex = false;
1036                 /* Fall-through */
1037         case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
1038                 p_link->speed = 1000;
1039                 break;
1040         default:
1041                 p_link->speed = 0;
1042         }
1043
1044         /* We never store total line speed as p_link->speed is
1045          * again changes according to bandwidth allocation.
1046          */
1047         if (p_link->link_up && p_link->speed)
1048                 p_link->line_speed = p_link->speed;
1049         else
1050                 p_link->line_speed = 0;
1051
1052         max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
1053         min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
1054
1055         /* Max bandwidth configuration */
1056         __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
1057                                            p_link, max_bw);
1058
1059         /* Mintz bandwidth configuration */
1060         __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
1061                                            p_link, min_bw);
1062         ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev,
1063                                               p_link->min_pf_rate);
1064
1065         p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
1066         p_link->an_complete = !!(status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
1067         p_link->parallel_detection = !!(status &
1068                                          LINK_STATUS_PARALLEL_DETECTION_USED);
1069         p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
1070
1071         p_link->partner_adv_speed |=
1072             (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
1073             ECORE_LINK_PARTNER_SPEED_1G_FD : 0;
1074         p_link->partner_adv_speed |=
1075             (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
1076             ECORE_LINK_PARTNER_SPEED_1G_HD : 0;
1077         p_link->partner_adv_speed |=
1078             (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
1079             ECORE_LINK_PARTNER_SPEED_10G : 0;
1080         p_link->partner_adv_speed |=
1081             (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
1082             ECORE_LINK_PARTNER_SPEED_20G : 0;
1083         p_link->partner_adv_speed |=
1084             (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
1085             ECORE_LINK_PARTNER_SPEED_25G : 0;
1086         p_link->partner_adv_speed |=
1087             (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
1088             ECORE_LINK_PARTNER_SPEED_40G : 0;
1089         p_link->partner_adv_speed |=
1090             (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
1091             ECORE_LINK_PARTNER_SPEED_50G : 0;
1092         p_link->partner_adv_speed |=
1093             (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
1094             ECORE_LINK_PARTNER_SPEED_100G : 0;
1095
1096         p_link->partner_tx_flow_ctrl_en =
1097             !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
1098         p_link->partner_rx_flow_ctrl_en =
1099             !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
1100
1101         switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
1102         case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
1103                 p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE;
1104                 break;
1105         case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
1106                 p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE;
1107                 break;
1108         case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
1109                 p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE;
1110                 break;
1111         default:
1112                 p_link->partner_adv_pause = 0;
1113         }
1114
1115         p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
1116
1117         OSAL_LINK_UPDATE(p_hwfn);
1118 }
1119
1120 enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
1121                                         struct ecore_ptt *p_ptt, bool b_up)
1122 {
1123         struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
1124         struct ecore_mcp_mb_params mb_params;
1125         union drv_union_data union_data;
1126         struct eth_phy_cfg *p_phy_cfg;
1127         enum _ecore_status_t rc = ECORE_SUCCESS;
1128         u32 cmd;
1129
1130 #ifndef ASIC_ONLY
1131         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
1132                 return ECORE_SUCCESS;
1133 #endif
1134
1135         /* Set the shmem configuration according to params */
1136         p_phy_cfg = &union_data.drv_phy_cfg;
1137         OSAL_MEMSET(p_phy_cfg, 0, sizeof(*p_phy_cfg));
1138         cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
1139         if (!params->speed.autoneg)
1140                 p_phy_cfg->speed = params->speed.forced_speed;
1141         p_phy_cfg->pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
1142         p_phy_cfg->pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
1143         p_phy_cfg->pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1144         p_phy_cfg->adv_speed = params->speed.advertised_speeds;
1145         p_phy_cfg->loopback_mode = params->loopback_mode;
1146         p_hwfn->b_drv_link_init = b_up;
1147
1148         if (b_up)
1149                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1150                            "Configuring Link: Speed 0x%08x, Pause 0x%08x,"
1151                            " adv_speed 0x%08x, loopback 0x%08x\n",
1152                            p_phy_cfg->speed, p_phy_cfg->pause,
1153                            p_phy_cfg->adv_speed, p_phy_cfg->loopback_mode);
1154         else
1155                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
1156
1157         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1158         mb_params.cmd = cmd;
1159         mb_params.p_data_src = &union_data;
1160         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1161
1162         /* if mcp fails to respond we must abort */
1163         if (rc != ECORE_SUCCESS) {
1164                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1165                 return rc;
1166         }
1167
1168         /* Reset the link status if needed */
1169         if (!b_up)
1170                 ecore_mcp_handle_link_change(p_hwfn, p_ptt, true);
1171
1172         return rc;
1173 }
1174
1175 u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
1176                                    struct ecore_ptt *p_ptt)
1177 {
1178         u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
1179
1180         /* TODO - Add support for VFs */
1181         if (IS_VF(p_hwfn->p_dev))
1182                 return ECORE_INVAL;
1183
1184         path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1185                                                  PUBLIC_PATH);
1186         path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr);
1187         path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn));
1188
1189         proc_kill_cnt = ecore_rd(p_hwfn, p_ptt,
1190                                  path_addr +
1191                                  OFFSETOF(struct public_path, process_kill)) &
1192             PROCESS_KILL_COUNTER_MASK;
1193
1194         return proc_kill_cnt;
1195 }
1196
1197 static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn,
1198                                           struct ecore_ptt *p_ptt)
1199 {
1200         struct ecore_dev *p_dev = p_hwfn->p_dev;
1201         u32 proc_kill_cnt;
1202
1203         /* Prevent possible attentions/interrupts during the recovery handling
1204          * and till its load phase, during which they will be re-enabled.
1205          */
1206         ecore_int_igu_disable_int(p_hwfn, p_ptt);
1207
1208         DP_NOTICE(p_hwfn, false, "Received a process kill indication\n");
1209
1210         /* The following operations should be done once, and thus in CMT mode
1211          * are carried out by only the first HW function.
1212          */
1213         if (p_hwfn != ECORE_LEADING_HWFN(p_dev))
1214                 return;
1215
1216         if (p_dev->recov_in_prog) {
1217                 DP_NOTICE(p_hwfn, false,
1218                           "Ignoring the indication since a recovery"
1219                           " process is already in progress\n");
1220                 return;
1221         }
1222
1223         p_dev->recov_in_prog = true;
1224
1225         proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt);
1226         DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt);
1227
1228         OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn);
1229 }
1230
1231 static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
1232                                           struct ecore_ptt *p_ptt,
1233                                           enum MFW_DRV_MSG_TYPE type)
1234 {
1235         enum ecore_mcp_protocol_type stats_type;
1236         union ecore_mcp_protocol_stats stats;
1237         struct ecore_mcp_mb_params mb_params;
1238         union drv_union_data union_data;
1239         u32 hsi_param;
1240
1241         switch (type) {
1242         case MFW_DRV_MSG_GET_LAN_STATS:
1243                 stats_type = ECORE_MCP_LAN_STATS;
1244                 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1245                 break;
1246         default:
1247                 DP_INFO(p_hwfn, "Invalid protocol type %d\n", type);
1248                 return;
1249         }
1250
1251         OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats);
1252
1253         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1254         mb_params.cmd = DRV_MSG_CODE_GET_STATS;
1255         mb_params.param = hsi_param;
1256         OSAL_MEMCPY(&union_data, &stats, sizeof(stats));
1257         mb_params.p_data_src = &union_data;
1258         ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1259 }
1260
1261 static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
1262                                     struct public_func *p_shmem_info)
1263 {
1264         struct ecore_mcp_function_info *p_info;
1265
1266         p_info = &p_hwfn->mcp_info->func_info;
1267
1268         /* TODO - bandwidth min/max should have valid values of 1-100,
1269          * as well as some indication that the feature is disabled.
1270          * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS
1271          * limit and correct value to min `1' and max `100' if limit isn't in
1272          * range.
1273          */
1274         p_info->bandwidth_min = (p_shmem_info->config &
1275                                  FUNC_MF_CFG_MIN_BW_MASK) >>
1276             FUNC_MF_CFG_MIN_BW_SHIFT;
1277         if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1278                 DP_INFO(p_hwfn,
1279                         "bandwidth minimum out of bounds [%02x]. Set to 1\n",
1280                         p_info->bandwidth_min);
1281                 p_info->bandwidth_min = 1;
1282         }
1283
1284         p_info->bandwidth_max = (p_shmem_info->config &
1285                                  FUNC_MF_CFG_MAX_BW_MASK) >>
1286             FUNC_MF_CFG_MAX_BW_SHIFT;
1287         if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1288                 DP_INFO(p_hwfn,
1289                         "bandwidth maximum out of bounds [%02x]. Set to 100\n",
1290                         p_info->bandwidth_max);
1291                 p_info->bandwidth_max = 100;
1292         }
1293 }
1294
1295 static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
1296                                     struct ecore_ptt *p_ptt,
1297                                     struct public_func *p_data,
1298                                     int pfid)
1299 {
1300         u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1301                                         PUBLIC_FUNC);
1302         u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1303         u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1304         u32 i, size;
1305
1306         OSAL_MEM_ZERO(p_data, sizeof(*p_data));
1307
1308         size = OSAL_MIN_T(u32, sizeof(*p_data),
1309                           SECTION_SIZE(mfw_path_offsize));
1310         for (i = 0; i < size / sizeof(u32); i++)
1311                 ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
1312                                               func_addr + (i << 2));
1313
1314         return size;
1315 }
1316
1317 static void
1318 ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1319 {
1320         struct ecore_mcp_function_info *p_info;
1321         struct public_func shmem_info;
1322         u32 resp = 0, param = 0;
1323
1324         ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1325
1326         ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1327
1328         p_info = &p_hwfn->mcp_info->func_info;
1329
1330         ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min);
1331
1332         ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max);
1333
1334         /* Acknowledge the MFW */
1335         ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1336                       &param);
1337 }
1338
1339 static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn,
1340                                          struct ecore_ptt *p_ptt)
1341 {
1342         /* A single notification should be sent to upper driver in CMT mode */
1343         if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1344                 return;
1345
1346         DP_NOTICE(p_hwfn, false,
1347                   "Fan failure was detected on the network interface card"
1348                   " and it's going to be shut down.\n");
1349
1350         ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
1351 }
1352
1353 static enum _ecore_status_t
1354 ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1355                     u32 mdump_cmd, union drv_union_data *p_data_src,
1356                     union drv_union_data *p_data_dst, u32 *p_mcp_resp)
1357 {
1358         struct ecore_mcp_mb_params mb_params;
1359         enum _ecore_status_t rc;
1360
1361         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1362         mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
1363         mb_params.param = mdump_cmd;
1364         mb_params.p_data_src = p_data_src;
1365         mb_params.p_data_dst = p_data_dst;
1366         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1367         if (rc != ECORE_SUCCESS)
1368                 return rc;
1369
1370         *p_mcp_resp = mb_params.mcp_resp;
1371         if (*p_mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
1372                 DP_NOTICE(p_hwfn, false,
1373                           "MFW claims that the mdump command is illegal [mdump_cmd 0x%x]\n",
1374                           mdump_cmd);
1375                 rc = ECORE_INVAL;
1376         }
1377
1378         return rc;
1379 }
1380
1381 static enum _ecore_status_t ecore_mcp_mdump_ack(struct ecore_hwfn *p_hwfn,
1382                                                 struct ecore_ptt *p_ptt)
1383 {
1384         u32 mcp_resp;
1385
1386         return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_ACK,
1387                                    OSAL_NULL, OSAL_NULL, &mcp_resp);
1388 }
1389
1390 enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn,
1391                                                 struct ecore_ptt *p_ptt,
1392                                                 u32 epoch)
1393 {
1394         union drv_union_data union_data;
1395         u32 mcp_resp;
1396
1397         OSAL_MEMCPY(&union_data.raw_data, &epoch, sizeof(epoch));
1398
1399         return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_SET_VALUES,
1400                                    &union_data, OSAL_NULL, &mcp_resp);
1401 }
1402
1403 enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
1404                                              struct ecore_ptt *p_ptt)
1405 {
1406         u32 mcp_resp;
1407
1408         p_hwfn->p_dev->mdump_en = true;
1409
1410         return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_TRIGGER,
1411                                    OSAL_NULL, OSAL_NULL, &mcp_resp);
1412 }
1413
1414 static enum _ecore_status_t
1415 ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1416                            struct mdump_config_stc *p_mdump_config)
1417 {
1418         union drv_union_data union_data;
1419         u32 mcp_resp;
1420         enum _ecore_status_t rc;
1421
1422         rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_GET_CONFIG,
1423                                  OSAL_NULL, &union_data, &mcp_resp);
1424         if (rc != ECORE_SUCCESS)
1425                 return rc;
1426
1427         /* A zero response implies that the mdump command is not supported */
1428         if (!mcp_resp)
1429                 return ECORE_NOTIMPL;
1430
1431         if (mcp_resp != FW_MSG_CODE_OK) {
1432                 DP_NOTICE(p_hwfn, false,
1433                           "Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n",
1434                           mcp_resp);
1435                 rc = ECORE_UNKNOWN_ERROR;
1436         }
1437
1438         OSAL_MEMCPY(p_mdump_config, &union_data.mdump_config,
1439                     sizeof(*p_mdump_config));
1440
1441         return rc;
1442 }
1443
1444 enum _ecore_status_t
1445 ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1446                          struct ecore_mdump_info *p_mdump_info)
1447 {
1448         u32 addr, global_offsize, global_addr;
1449         struct mdump_config_stc mdump_config;
1450         enum _ecore_status_t rc;
1451
1452         OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info));
1453
1454         addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1455                                     PUBLIC_GLOBAL);
1456         global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1457         global_addr = SECTION_ADDR(global_offsize, 0);
1458         p_mdump_info->reason = ecore_rd(p_hwfn, p_ptt,
1459                                         global_addr +
1460                                         OFFSETOF(struct public_global,
1461                                                  mdump_reason));
1462
1463         if (p_mdump_info->reason) {
1464                 rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config);
1465                 if (rc != ECORE_SUCCESS)
1466                         return rc;
1467
1468                 p_mdump_info->version = mdump_config.version;
1469                 p_mdump_info->config = mdump_config.config;
1470                 p_mdump_info->epoch = mdump_config.epoc;
1471                 p_mdump_info->num_of_logs = mdump_config.num_of_logs;
1472                 p_mdump_info->valid_logs = mdump_config.valid_logs;
1473
1474                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1475                            "MFW mdump info: reason %d, version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n",
1476                            p_mdump_info->reason, p_mdump_info->version,
1477                            p_mdump_info->config, p_mdump_info->epoch,
1478                            p_mdump_info->num_of_logs, p_mdump_info->valid_logs);
1479         } else {
1480                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1481                            "MFW mdump info: reason %d\n", p_mdump_info->reason);
1482         }
1483
1484         return ECORE_SUCCESS;
1485 }
1486
1487 enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
1488                                                 struct ecore_ptt *p_ptt)
1489 {
1490         u32 mcp_resp;
1491
1492         return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_CLEAR_LOGS,
1493                                    OSAL_NULL, OSAL_NULL, &mcp_resp);
1494 }
1495
1496 static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
1497                                             struct ecore_ptt *p_ptt)
1498 {
1499         /* In CMT mode - no need for more than a single acknowledgment to the
1500          * MFW, and no more than a single notification to the upper driver.
1501          */
1502         if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1503                 return;
1504
1505         DP_NOTICE(p_hwfn, false,
1506                   "Received a critical error notification from the MFW!\n");
1507
1508         if (p_hwfn->p_dev->mdump_en) {
1509                 DP_NOTICE(p_hwfn, false,
1510                           "Not acknowledging the notification to allow the MFW crash dump\n");
1511                 p_hwfn->p_dev->mdump_en = false;
1512                 return;
1513         }
1514
1515         ecore_mcp_mdump_ack(p_hwfn, p_ptt);
1516         ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
1517 }
1518
1519 enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
1520                                              struct ecore_ptt *p_ptt)
1521 {
1522         struct ecore_mcp_info *info = p_hwfn->mcp_info;
1523         enum _ecore_status_t rc = ECORE_SUCCESS;
1524         bool found = false;
1525         u16 i;
1526
1527         DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n");
1528
1529         /* Read Messages from MFW */
1530         ecore_mcp_read_mb(p_hwfn, p_ptt);
1531
1532         /* Compare current messages to old ones */
1533         for (i = 0; i < info->mfw_mb_length; i++) {
1534                 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
1535                         continue;
1536
1537                 found = true;
1538
1539                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1540                            "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
1541                            i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
1542
1543                 switch (i) {
1544                 case MFW_DRV_MSG_LINK_CHANGE:
1545                         ecore_mcp_handle_link_change(p_hwfn, p_ptt, false);
1546                         break;
1547                 case MFW_DRV_MSG_VF_DISABLED:
1548                         ecore_mcp_handle_vf_flr(p_hwfn, p_ptt);
1549                         break;
1550                 case MFW_DRV_MSG_LLDP_DATA_UPDATED:
1551                         ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1552                                                     ECORE_DCBX_REMOTE_LLDP_MIB);
1553                         break;
1554                 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
1555                         ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1556                                                     ECORE_DCBX_REMOTE_MIB);
1557                         break;
1558                 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
1559                         ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1560                                                     ECORE_DCBX_OPERATIONAL_MIB);
1561                         break;
1562                 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
1563                         ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
1564                         break;
1565                 case MFW_DRV_MSG_ERROR_RECOVERY:
1566                         ecore_mcp_handle_process_kill(p_hwfn, p_ptt);
1567                         break;
1568                 case MFW_DRV_MSG_GET_LAN_STATS:
1569                 case MFW_DRV_MSG_GET_FCOE_STATS:
1570                 case MFW_DRV_MSG_GET_ISCSI_STATS:
1571                 case MFW_DRV_MSG_GET_RDMA_STATS:
1572                         ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
1573                         break;
1574                 case MFW_DRV_MSG_BW_UPDATE:
1575                         ecore_mcp_update_bw(p_hwfn, p_ptt);
1576                         break;
1577                 case MFW_DRV_MSG_FAILURE_DETECTED:
1578                         ecore_mcp_handle_fan_failure(p_hwfn, p_ptt);
1579                         break;
1580                 case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
1581                         ecore_mcp_handle_critical_error(p_hwfn, p_ptt);
1582                         break;
1583                 default:
1584                         DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
1585                         rc = ECORE_INVAL;
1586                 }
1587         }
1588
1589         /* ACK everything */
1590         for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
1591                 OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]);
1592
1593                 /* MFW expect answer in BE, so we force write in that format */
1594                 ecore_wr(p_hwfn, p_ptt,
1595                          info->mfw_mb_addr + sizeof(u32) +
1596                          MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
1597                          sizeof(u32) + i * sizeof(u32), val);
1598         }
1599
1600         if (!found) {
1601                 DP_NOTICE(p_hwfn, false,
1602                           "Received an MFW message indication but no"
1603                           " new message!\n");
1604                 rc = ECORE_INVAL;
1605         }
1606
1607         /* Copy the new mfw messages into the shadow */
1608         OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
1609
1610         return rc;
1611 }
1612
1613 enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
1614                                            struct ecore_ptt *p_ptt,
1615                                            u32 *p_mfw_ver,
1616                                            u32 *p_running_bundle_id)
1617 {
1618         u32 global_offsize;
1619
1620 #ifndef ASIC_ONLY
1621         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1622                 DP_NOTICE(p_hwfn, false, "Emulation - can't get MFW version\n");
1623                 return ECORE_SUCCESS;
1624         }
1625 #endif
1626
1627         if (IS_VF(p_hwfn->p_dev)) {
1628                 if (p_hwfn->vf_iov_info) {
1629                         struct pfvf_acquire_resp_tlv *p_resp;
1630
1631                         p_resp = &p_hwfn->vf_iov_info->acquire_resp;
1632                         *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
1633                         return ECORE_SUCCESS;
1634                 } else {
1635                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1636                                    "VF requested MFW version prior to ACQUIRE\n");
1637                         return ECORE_INVAL;
1638                 }
1639         }
1640
1641         global_offsize = ecore_rd(p_hwfn, p_ptt,
1642                                   SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
1643                                                        public_base,
1644                                                        PUBLIC_GLOBAL));
1645         *p_mfw_ver =
1646             ecore_rd(p_hwfn, p_ptt,
1647                      SECTION_ADDR(global_offsize,
1648                                   0) + OFFSETOF(struct public_global, mfw_ver));
1649
1650         if (p_running_bundle_id != OSAL_NULL) {
1651                 *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt,
1652                                                 SECTION_ADDR(global_offsize,
1653                                                              0) +
1654                                                 OFFSETOF(struct public_global,
1655                                                          running_bundle_id));
1656         }
1657
1658         return ECORE_SUCCESS;
1659 }
1660
1661 enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
1662                                               u32 *p_media_type)
1663 {
1664         struct ecore_hwfn *p_hwfn = &p_dev->hwfns[0];
1665         struct ecore_ptt *p_ptt;
1666
1667         /* TODO - Add support for VFs */
1668         if (IS_VF(p_dev))
1669                 return ECORE_INVAL;
1670
1671         if (!ecore_mcp_is_init(p_hwfn)) {
1672                 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
1673                 return ECORE_BUSY;
1674         }
1675
1676         *p_media_type = MEDIA_UNSPECIFIED;
1677
1678         p_ptt = ecore_ptt_acquire(p_hwfn);
1679         if (!p_ptt)
1680                 return ECORE_BUSY;
1681
1682         *p_media_type = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1683                                  OFFSETOF(struct public_port, media_type));
1684
1685         ecore_ptt_release(p_hwfn, p_ptt);
1686
1687         return ECORE_SUCCESS;
1688 }
1689
1690 /* @DPDK */
1691 /* Old MFW has a global configuration for all PFs regarding RDMA support */
1692 static void
1693 ecore_mcp_get_shmem_proto_legacy(struct ecore_hwfn *p_hwfn,
1694                                  enum ecore_pci_personality *p_proto)
1695 {
1696         *p_proto = ECORE_PCI_ETH;
1697
1698         DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
1699                    "According to Legacy capabilities, L2 personality is %08x\n",
1700                    (u32)*p_proto);
1701 }
1702
1703 /* @DPDK */
1704 static enum _ecore_status_t
1705 ecore_mcp_get_shmem_proto_mfw(struct ecore_hwfn *p_hwfn,
1706                               struct ecore_ptt *p_ptt,
1707                               enum ecore_pci_personality *p_proto)
1708 {
1709         u32 resp = 0, param = 0;
1710         enum _ecore_status_t rc;
1711
1712         DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
1713                    "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
1714                    (u32)*p_proto, resp, param);
1715         return ECORE_SUCCESS;
1716 }
1717
1718 static enum _ecore_status_t
1719 ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn,
1720                           struct public_func *p_info,
1721                           struct ecore_ptt *p_ptt,
1722                           enum ecore_pci_personality *p_proto)
1723 {
1724         enum _ecore_status_t rc = ECORE_SUCCESS;
1725
1726         switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
1727         case FUNC_MF_CFG_PROTOCOL_ETHERNET:
1728                 if (ecore_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto) !=
1729                     ECORE_SUCCESS)
1730                         ecore_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
1731                 break;
1732         default:
1733                 rc = ECORE_INVAL;
1734         }
1735
1736         return rc;
1737 }
1738
1739 enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
1740                                                     struct ecore_ptt *p_ptt)
1741 {
1742         struct ecore_mcp_function_info *info;
1743         struct public_func shmem_info;
1744
1745         ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1746         info = &p_hwfn->mcp_info->func_info;
1747
1748         info->pause_on_host = (shmem_info.config &
1749                                FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
1750
1751         if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
1752                                       &info->protocol)) {
1753                 DP_ERR(p_hwfn, "Unknown personality %08x\n",
1754                        (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
1755                 return ECORE_INVAL;
1756         }
1757
1758         ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1759
1760         if (shmem_info.mac_upper || shmem_info.mac_lower) {
1761                 info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
1762                 info->mac[1] = (u8)(shmem_info.mac_upper);
1763                 info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
1764                 info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
1765                 info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
1766                 info->mac[5] = (u8)(shmem_info.mac_lower);
1767         } else {
1768                 /* TODO - are there protocols for which there's no MAC? */
1769                 DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n");
1770         }
1771
1772         /* TODO - are these calculations true for BE machine? */
1773         info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
1774                          (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
1775         info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
1776                          (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
1777
1778         info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
1779
1780         info->mtu = (u16)shmem_info.mtu_size;
1781
1782         if (info->mtu == 0)
1783                 info->mtu = 1500;
1784
1785         info->mtu = (u16)shmem_info.mtu_size;
1786
1787         DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP),
1788                    "Read configuration from shmem: pause_on_host %02x"
1789                     " protocol %02x BW [%02x - %02x]"
1790                     " MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %lx"
1791                     " node %lx ovlan %04x\n",
1792                    info->pause_on_host, info->protocol,
1793                    info->bandwidth_min, info->bandwidth_max,
1794                    info->mac[0], info->mac[1], info->mac[2],
1795                    info->mac[3], info->mac[4], info->mac[5],
1796                    (unsigned long)info->wwn_port,
1797                    (unsigned long)info->wwn_node, info->ovlan);
1798
1799         return ECORE_SUCCESS;
1800 }
1801
1802 struct ecore_mcp_link_params
1803 *ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn)
1804 {
1805         if (!p_hwfn || !p_hwfn->mcp_info)
1806                 return OSAL_NULL;
1807         return &p_hwfn->mcp_info->link_input;
1808 }
1809
1810 struct ecore_mcp_link_state
1811 *ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn)
1812 {
1813         if (!p_hwfn || !p_hwfn->mcp_info)
1814                 return OSAL_NULL;
1815
1816 #ifndef ASIC_ONLY
1817         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
1818                 DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n");
1819                 p_hwfn->mcp_info->link_output.link_up = true;
1820         }
1821 #endif
1822
1823         return &p_hwfn->mcp_info->link_output;
1824 }
1825
1826 struct ecore_mcp_link_capabilities
1827 *ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn)
1828 {
1829         if (!p_hwfn || !p_hwfn->mcp_info)
1830                 return OSAL_NULL;
1831         return &p_hwfn->mcp_info->link_capabilities;
1832 }
1833
1834 enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
1835                                      struct ecore_ptt *p_ptt)
1836 {
1837         u32 resp = 0, param = 0;
1838         enum _ecore_status_t rc;
1839
1840         rc = ecore_mcp_cmd(p_hwfn, p_ptt,
1841                            DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, &param);
1842
1843         /* Wait for the drain to complete before returning */
1844         OSAL_MSLEEP(1020);
1845
1846         return rc;
1847 }
1848
1849 const struct ecore_mcp_function_info
1850 *ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn)
1851 {
1852         if (!p_hwfn || !p_hwfn->mcp_info)
1853                 return OSAL_NULL;
1854         return &p_hwfn->mcp_info->func_info;
1855 }
1856
1857 enum _ecore_status_t ecore_mcp_nvm_command(struct ecore_hwfn *p_hwfn,
1858                                            struct ecore_ptt *p_ptt,
1859                                            struct ecore_mcp_nvm_params *params)
1860 {
1861         enum _ecore_status_t rc;
1862
1863         switch (params->type) {
1864         case ECORE_MCP_NVM_RD:
1865                 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1866                                           params->nvm_common.offset,
1867                                           &params->nvm_common.resp,
1868                                           &params->nvm_common.param,
1869                                           params->nvm_rd.buf_size,
1870                                           params->nvm_rd.buf);
1871                 break;
1872         case ECORE_MCP_CMD:
1873                 rc = ecore_mcp_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1874                                    params->nvm_common.offset,
1875                                    &params->nvm_common.resp,
1876                                    &params->nvm_common.param);
1877                 break;
1878         case ECORE_MCP_NVM_WR:
1879                 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1880                                           params->nvm_common.offset,
1881                                           &params->nvm_common.resp,
1882                                           &params->nvm_common.param,
1883                                           params->nvm_wr.buf_size,
1884                                           params->nvm_wr.buf);
1885                 break;
1886         default:
1887                 rc = ECORE_NOTIMPL;
1888                 break;
1889         }
1890         return rc;
1891 }
1892
1893 int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
1894                                   struct ecore_ptt *p_ptt, u32 personalities)
1895 {
1896         enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT;
1897         struct public_func shmem_info;
1898         int i, count = 0, num_pfs;
1899
1900         num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
1901
1902         for (i = 0; i < num_pfs; i++) {
1903                 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1904                                          MCP_PF_ID_BY_REL(p_hwfn, i));
1905                 if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
1906                         continue;
1907
1908                 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
1909                                               &protocol) !=
1910                     ECORE_SUCCESS)
1911                         continue;
1912
1913                 if ((1 << ((u32)protocol)) & personalities)
1914                         count++;
1915         }
1916
1917         return count;
1918 }
1919
1920 enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
1921                                               struct ecore_ptt *p_ptt,
1922                                               u32 *p_flash_size)
1923 {
1924         u32 flash_size;
1925
1926 #ifndef ASIC_ONLY
1927         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1928                 DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n");
1929                 return ECORE_INVAL;
1930         }
1931 #endif
1932
1933         if (IS_VF(p_hwfn->p_dev))
1934                 return ECORE_INVAL;
1935
1936         flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
1937         flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
1938             MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
1939         flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
1940
1941         *p_flash_size = flash_size;
1942
1943         return ECORE_SUCCESS;
1944 }
1945
1946 enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
1947                                                   struct ecore_ptt *p_ptt)
1948 {
1949         struct ecore_dev *p_dev = p_hwfn->p_dev;
1950
1951         if (p_dev->recov_in_prog) {
1952                 DP_NOTICE(p_hwfn, false,
1953                           "Avoid triggering a recovery since such a process"
1954                           " is already in progress\n");
1955                 return ECORE_AGAIN;
1956         }
1957
1958         DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n");
1959         ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
1960
1961         return ECORE_SUCCESS;
1962 }
1963
1964 enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
1965                                               struct ecore_ptt *p_ptt,
1966                                               u8 vf_id, u8 num)
1967 {
1968         u32 resp = 0, param = 0, rc_param = 0;
1969         enum _ecore_status_t rc;
1970
1971 /* Only Leader can configure MSIX, and need to take CMT into account */
1972
1973         if (!IS_LEAD_HWFN(p_hwfn))
1974                 return ECORE_SUCCESS;
1975         num *= p_hwfn->p_dev->num_hwfns;
1976
1977         param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
1978             DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
1979         param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
1980             DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
1981
1982         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
1983                            &resp, &rc_param);
1984
1985         if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
1986                 DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n",
1987                           vf_id);
1988                 rc = ECORE_INVAL;
1989         } else {
1990                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1991                            "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
1992                             num, vf_id);
1993         }
1994
1995         return rc;
1996 }
1997
1998 enum _ecore_status_t
1999 ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2000                            struct ecore_mcp_drv_version *p_ver)
2001 {
2002         struct drv_version_stc *p_drv_version;
2003         struct ecore_mcp_mb_params mb_params;
2004         union drv_union_data union_data;
2005         u32 num_words, i;
2006         void *p_name;
2007         OSAL_BE32 val;
2008         enum _ecore_status_t rc;
2009
2010 #ifndef ASIC_ONLY
2011         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
2012                 return ECORE_SUCCESS;
2013 #endif
2014
2015         p_drv_version = &union_data.drv_version;
2016         p_drv_version->version = p_ver->version;
2017         num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4;
2018         for (i = 0; i < num_words; i++) {
2019                 /* The driver name is expected to be in a big-endian format */
2020                 p_name = &p_ver->name[i * sizeof(u32)];
2021                 val = OSAL_CPU_TO_BE32(*(u32 *)p_name);
2022                 *(u32 *)&p_drv_version->name[i * sizeof(u32)] = val;
2023         }
2024
2025         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2026         mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
2027         mb_params.p_data_src = &union_data;
2028         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2029         if (rc != ECORE_SUCCESS)
2030                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2031
2032         return rc;
2033 }
2034
2035 enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
2036                                     struct ecore_ptt *p_ptt)
2037 {
2038         enum _ecore_status_t rc;
2039         u32 resp = 0, param = 0;
2040
2041         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2042                            &param);
2043         if (rc != ECORE_SUCCESS)
2044                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2045
2046         return rc;
2047 }
2048
2049 enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
2050                                       struct ecore_ptt *p_ptt)
2051 {
2052         u32 value, cpu_mode;
2053
2054         ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
2055
2056         value = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2057         value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2058         ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
2059         cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2060
2061         return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -1 : 0;
2062 }
2063
2064 enum _ecore_status_t
2065 ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
2066                                    struct ecore_ptt *p_ptt,
2067                                    enum ecore_ov_client client)
2068 {
2069         enum _ecore_status_t rc;
2070         u32 resp = 0, param = 0;
2071         u32 drv_mb_param;
2072
2073         switch (client) {
2074         case ECORE_OV_CLIENT_DRV:
2075                 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
2076                 break;
2077         case ECORE_OV_CLIENT_USER:
2078                 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
2079                 break;
2080         case ECORE_OV_CLIENT_VENDOR_SPEC:
2081                 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
2082                 break;
2083         default:
2084                 DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", client);
2085                 return ECORE_INVAL;
2086         }
2087
2088         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
2089                            drv_mb_param, &resp, &param);
2090         if (rc != ECORE_SUCCESS)
2091                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2092
2093         return rc;
2094 }
2095
2096 enum _ecore_status_t
2097 ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
2098                                  struct ecore_ptt *p_ptt,
2099                                  enum ecore_ov_driver_state drv_state)
2100 {
2101         enum _ecore_status_t rc;
2102         u32 resp = 0, param = 0;
2103         u32 drv_mb_param;
2104
2105         switch (drv_state) {
2106         case ECORE_OV_DRIVER_STATE_NOT_LOADED:
2107                 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
2108                 break;
2109         case ECORE_OV_DRIVER_STATE_DISABLED:
2110                 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
2111                 break;
2112         case ECORE_OV_DRIVER_STATE_ACTIVE:
2113                 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
2114                 break;
2115         default:
2116                 DP_NOTICE(p_hwfn, true, "Invalid driver state %d\n", drv_state);
2117                 return ECORE_INVAL;
2118         }
2119
2120         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
2121                            drv_mb_param, &resp, &param);
2122         if (rc != ECORE_SUCCESS)
2123                 DP_ERR(p_hwfn, "Failed to send driver state\n");
2124
2125         return rc;
2126 }
2127
2128 enum _ecore_status_t
2129 ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2130                          struct ecore_fc_npiv_tbl *p_table)
2131 {
2132         return 0;
2133 }
2134
2135 enum _ecore_status_t
2136 ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn,
2137                         struct ecore_ptt *p_ptt, u16 mtu)
2138 {
2139         return 0;
2140 }
2141
2142 enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
2143                                        struct ecore_ptt *p_ptt,
2144                                        enum ecore_led_mode mode)
2145 {
2146         u32 resp = 0, param = 0, drv_mb_param;
2147         enum _ecore_status_t rc;
2148
2149         switch (mode) {
2150         case ECORE_LED_MODE_ON:
2151                 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
2152                 break;
2153         case ECORE_LED_MODE_OFF:
2154                 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
2155                 break;
2156         case ECORE_LED_MODE_RESTORE:
2157                 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
2158                 break;
2159         default:
2160                 DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode);
2161                 return ECORE_INVAL;
2162         }
2163
2164         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
2165                            drv_mb_param, &resp, &param);
2166         if (rc != ECORE_SUCCESS)
2167                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2168
2169         return rc;
2170 }
2171
2172 enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
2173                                              struct ecore_ptt *p_ptt,
2174                                              u32 mask_parities)
2175 {
2176         enum _ecore_status_t rc;
2177         u32 resp = 0, param = 0;
2178
2179         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
2180                            mask_parities, &resp, &param);
2181
2182         if (rc != ECORE_SUCCESS) {
2183                 DP_ERR(p_hwfn,
2184                        "MCP response failure for mask parities, aborting\n");
2185         } else if (resp != FW_MSG_CODE_OK) {
2186                 DP_ERR(p_hwfn,
2187                        "MCP did not ack mask parity request. Old MFW?\n");
2188                 rc = ECORE_INVAL;
2189         }
2190
2191         return rc;
2192 }
2193
2194 enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
2195                                         u8 *p_buf, u32 len)
2196 {
2197         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2198         u32 bytes_left, offset, bytes_to_copy, buf_size;
2199         struct ecore_mcp_nvm_params params;
2200         struct ecore_ptt *p_ptt;
2201         enum _ecore_status_t rc = ECORE_SUCCESS;
2202
2203         p_ptt = ecore_ptt_acquire(p_hwfn);
2204         if (!p_ptt)
2205                 return ECORE_BUSY;
2206
2207         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2208         bytes_left = len;
2209         offset = 0;
2210         params.type = ECORE_MCP_NVM_RD;
2211         params.nvm_rd.buf_size = &buf_size;
2212         params.nvm_common.cmd = DRV_MSG_CODE_NVM_READ_NVRAM;
2213         while (bytes_left > 0) {
2214                 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
2215                                            MCP_DRV_NVM_BUF_LEN);
2216                 params.nvm_common.offset = (addr + offset) |
2217                     (bytes_to_copy << DRV_MB_PARAM_NVM_LEN_SHIFT);
2218                 params.nvm_rd.buf = (u32 *)(p_buf + offset);
2219                 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2220                 if (rc != ECORE_SUCCESS || (params.nvm_common.resp !=
2221                                             FW_MSG_CODE_NVM_OK)) {
2222                         DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2223                         break;
2224                 }
2225
2226                 /* This can be a lengthy process, and it's possible scheduler
2227                  * isn't preemptible. Sleep a bit to prevent CPU hogging.
2228                  */
2229                 if (bytes_left % 0x1000 <
2230                     (bytes_left - *params.nvm_rd.buf_size) % 0x1000)
2231                         OSAL_MSLEEP(1);
2232
2233                 offset += *params.nvm_rd.buf_size;
2234                 bytes_left -= *params.nvm_rd.buf_size;
2235         }
2236
2237         p_dev->mcp_nvm_resp = params.nvm_common.resp;
2238         ecore_ptt_release(p_hwfn, p_ptt);
2239
2240         return rc;
2241 }
2242
2243 enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
2244                                         u32 addr, u8 *p_buf, u32 len)
2245 {
2246         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2247         struct ecore_mcp_nvm_params params;
2248         struct ecore_ptt *p_ptt;
2249         enum _ecore_status_t rc;
2250
2251         p_ptt = ecore_ptt_acquire(p_hwfn);
2252         if (!p_ptt)
2253                 return ECORE_BUSY;
2254
2255         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2256         params.type = ECORE_MCP_NVM_RD;
2257         params.nvm_rd.buf_size = &len;
2258         params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_READ) ?
2259             DRV_MSG_CODE_PHY_CORE_READ : DRV_MSG_CODE_PHY_RAW_READ;
2260         params.nvm_common.offset = addr;
2261         params.nvm_rd.buf = (u32 *)p_buf;
2262         rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2263         if (rc != ECORE_SUCCESS)
2264                 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2265
2266         p_dev->mcp_nvm_resp = params.nvm_common.resp;
2267         ecore_ptt_release(p_hwfn, p_ptt);
2268
2269         return rc;
2270 }
2271
2272 enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
2273 {
2274         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2275         struct ecore_mcp_nvm_params params;
2276         struct ecore_ptt *p_ptt;
2277
2278         p_ptt = ecore_ptt_acquire(p_hwfn);
2279         if (!p_ptt)
2280                 return ECORE_BUSY;
2281
2282         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2283         OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp));
2284         ecore_ptt_release(p_hwfn, p_ptt);
2285
2286         return ECORE_SUCCESS;
2287 }
2288
2289 enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr)
2290 {
2291         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2292         struct ecore_mcp_nvm_params params;
2293         struct ecore_ptt *p_ptt;
2294         enum _ecore_status_t rc;
2295
2296         p_ptt = ecore_ptt_acquire(p_hwfn);
2297         if (!p_ptt)
2298                 return ECORE_BUSY;
2299         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2300         params.type = ECORE_MCP_CMD;
2301         params.nvm_common.cmd = DRV_MSG_CODE_NVM_DEL_FILE;
2302         params.nvm_common.offset = addr;
2303         rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2304         p_dev->mcp_nvm_resp = params.nvm_common.resp;
2305         ecore_ptt_release(p_hwfn, p_ptt);
2306
2307         return rc;
2308 }
2309
2310 enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
2311                                                   u32 addr)
2312 {
2313         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2314         struct ecore_mcp_nvm_params params;
2315         struct ecore_ptt *p_ptt;
2316         enum _ecore_status_t rc;
2317
2318         p_ptt = ecore_ptt_acquire(p_hwfn);
2319         if (!p_ptt)
2320                 return ECORE_BUSY;
2321         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2322         params.type = ECORE_MCP_CMD;
2323         params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN;
2324         params.nvm_common.offset = addr;
2325         rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2326         p_dev->mcp_nvm_resp = params.nvm_common.resp;
2327         ecore_ptt_release(p_hwfn, p_ptt);
2328
2329         return rc;
2330 }
2331
2332 /* rc receives ECORE_INVAL as default parameter because
2333  * it might not enter the while loop if the len is 0
2334  */
2335 enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
2336                                          u32 addr, u8 *p_buf, u32 len)
2337 {
2338         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2339         enum _ecore_status_t rc = ECORE_INVAL;
2340         struct ecore_mcp_nvm_params params;
2341         struct ecore_ptt *p_ptt;
2342         u32 buf_idx, buf_size;
2343
2344         p_ptt = ecore_ptt_acquire(p_hwfn);
2345         if (!p_ptt)
2346                 return ECORE_BUSY;
2347
2348         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2349         params.type = ECORE_MCP_NVM_WR;
2350         if (cmd == ECORE_PUT_FILE_DATA)
2351                 params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
2352         else
2353                 params.nvm_common.cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
2354         buf_idx = 0;
2355         while (buf_idx < len) {
2356                 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
2357                                       MCP_DRV_NVM_BUF_LEN);
2358                 params.nvm_common.offset = ((buf_size <<
2359                                              DRV_MB_PARAM_NVM_LEN_SHIFT)
2360                                             | addr) + buf_idx;
2361                 params.nvm_wr.buf_size = buf_size;
2362                 params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
2363                 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2364                 if (rc != ECORE_SUCCESS ||
2365                     ((params.nvm_common.resp != FW_MSG_CODE_NVM_OK) &&
2366                      (params.nvm_common.resp !=
2367                       FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK)))
2368                         DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2369
2370                 /* This can be a lengthy process, and it's possible scheduler
2371                  * isn't preemptible. Sleep a bit to prevent CPU hogging.
2372                  */
2373                 if (buf_idx % 0x1000 >
2374                     (buf_idx + buf_size) % 0x1000)
2375                         OSAL_MSLEEP(1);
2376
2377                 buf_idx += buf_size;
2378         }
2379
2380         p_dev->mcp_nvm_resp = params.nvm_common.resp;
2381         ecore_ptt_release(p_hwfn, p_ptt);
2382
2383         return rc;
2384 }
2385
2386 enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
2387                                          u32 addr, u8 *p_buf, u32 len)
2388 {
2389         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2390         struct ecore_mcp_nvm_params params;
2391         struct ecore_ptt *p_ptt;
2392         enum _ecore_status_t rc;
2393
2394         p_ptt = ecore_ptt_acquire(p_hwfn);
2395         if (!p_ptt)
2396                 return ECORE_BUSY;
2397
2398         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2399         params.type = ECORE_MCP_NVM_WR;
2400         params.nvm_wr.buf_size = len;
2401         params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_WRITE) ?
2402             DRV_MSG_CODE_PHY_CORE_WRITE : DRV_MSG_CODE_PHY_RAW_WRITE;
2403         params.nvm_common.offset = addr;
2404         params.nvm_wr.buf = (u32 *)p_buf;
2405         rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2406         if (rc != ECORE_SUCCESS)
2407                 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2408         p_dev->mcp_nvm_resp = params.nvm_common.resp;
2409         ecore_ptt_release(p_hwfn, p_ptt);
2410
2411         return rc;
2412 }
2413
2414 enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
2415                                                    u32 addr)
2416 {
2417         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2418         struct ecore_mcp_nvm_params params;
2419         struct ecore_ptt *p_ptt;
2420         enum _ecore_status_t rc;
2421
2422         p_ptt = ecore_ptt_acquire(p_hwfn);
2423         if (!p_ptt)
2424                 return ECORE_BUSY;
2425
2426         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2427         params.type = ECORE_MCP_CMD;
2428         params.nvm_common.cmd = DRV_MSG_CODE_SET_SECURE_MODE;
2429         params.nvm_common.offset = addr;
2430         rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2431         p_dev->mcp_nvm_resp = params.nvm_common.resp;
2432         ecore_ptt_release(p_hwfn, p_ptt);
2433
2434         return rc;
2435 }
2436
2437 enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
2438                                             struct ecore_ptt *p_ptt,
2439                                             u32 port, u32 addr, u32 offset,
2440                                             u32 len, u8 *p_buf)
2441 {
2442         struct ecore_mcp_nvm_params params;
2443         enum _ecore_status_t rc;
2444         u32 bytes_left, bytes_to_copy, buf_size;
2445
2446         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2447         params.nvm_common.offset =
2448                 (port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) |
2449                 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT);
2450         addr = offset;
2451         offset = 0;
2452         bytes_left = len;
2453         params.type = ECORE_MCP_NVM_RD;
2454         params.nvm_rd.buf_size = &buf_size;
2455         params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_READ;
2456         while (bytes_left > 0) {
2457                 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
2458                                            MAX_I2C_TRANSACTION_SIZE);
2459                 params.nvm_rd.buf = (u32 *)(p_buf + offset);
2460                 params.nvm_common.offset &=
2461                         (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
2462                          DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
2463                 params.nvm_common.offset |=
2464                         ((addr + offset) <<
2465                          DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT);
2466                 params.nvm_common.offset |=
2467                         (bytes_to_copy << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT);
2468                 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2469                 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
2470                     FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
2471                         return ECORE_NODEV;
2472                 } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
2473                            FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
2474                         return ECORE_UNKNOWN_ERROR;
2475
2476                 offset += *params.nvm_rd.buf_size;
2477                 bytes_left -= *params.nvm_rd.buf_size;
2478         }
2479
2480         return ECORE_SUCCESS;
2481 }
2482
2483 enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
2484                                              struct ecore_ptt *p_ptt,
2485                                              u32 port, u32 addr, u32 offset,
2486                                              u32 len, u8 *p_buf)
2487 {
2488         struct ecore_mcp_nvm_params params;
2489         enum _ecore_status_t rc;
2490         u32 buf_idx, buf_size;
2491
2492         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2493         params.nvm_common.offset =
2494                 (port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) |
2495                 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT);
2496         params.type = ECORE_MCP_NVM_WR;
2497         params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_WRITE;
2498         buf_idx = 0;
2499         while (buf_idx < len) {
2500                 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
2501                                       MAX_I2C_TRANSACTION_SIZE);
2502                 params.nvm_common.offset &=
2503                         (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
2504                          DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
2505                 params.nvm_common.offset |=
2506                         ((offset + buf_idx) <<
2507                          DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT);
2508                 params.nvm_common.offset |=
2509                         (buf_size << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT);
2510                 params.nvm_wr.buf_size = buf_size;
2511                 params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
2512                 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2513                 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
2514                     FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
2515                         return ECORE_NODEV;
2516                 } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
2517                            FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
2518                         return ECORE_UNKNOWN_ERROR;
2519
2520                 buf_idx += buf_size;
2521         }
2522
2523         return ECORE_SUCCESS;
2524 }
2525
2526 enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
2527                                          struct ecore_ptt *p_ptt,
2528                                          u16 gpio, u32 *gpio_val)
2529 {
2530         enum _ecore_status_t rc = ECORE_SUCCESS;
2531         u32 drv_mb_param = 0, rsp;
2532
2533         drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT);
2534
2535         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ,
2536                            drv_mb_param, &rsp, gpio_val);
2537
2538         if (rc != ECORE_SUCCESS)
2539                 return rc;
2540
2541         if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
2542                 return ECORE_UNKNOWN_ERROR;
2543
2544         return ECORE_SUCCESS;
2545 }
2546
2547 enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
2548                                           struct ecore_ptt *p_ptt,
2549                                           u16 gpio, u16 gpio_val)
2550 {
2551         enum _ecore_status_t rc = ECORE_SUCCESS;
2552         u32 drv_mb_param = 0, param, rsp;
2553
2554         drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT) |
2555                 (gpio_val << DRV_MB_PARAM_GPIO_VALUE_SHIFT);
2556
2557         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE,
2558                            drv_mb_param, &rsp, &param);
2559
2560         if (rc != ECORE_SUCCESS)
2561                 return rc;
2562
2563         if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
2564                 return ECORE_UNKNOWN_ERROR;
2565
2566         return ECORE_SUCCESS;
2567 }
2568
2569 enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn,
2570                                          struct ecore_ptt *p_ptt,
2571                                          u16 gpio, u32 *gpio_direction,
2572                                          u32 *gpio_ctrl)
2573 {
2574         u32 drv_mb_param = 0, rsp, val = 0;
2575         enum _ecore_status_t rc = ECORE_SUCCESS;
2576
2577         drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT;
2578
2579         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO,
2580                            drv_mb_param, &rsp, &val);
2581         if (rc != ECORE_SUCCESS)
2582                 return rc;
2583
2584         *gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >>
2585                            DRV_MB_PARAM_GPIO_DIRECTION_SHIFT;
2586         *gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >>
2587                       DRV_MB_PARAM_GPIO_CTRL_SHIFT;
2588
2589         if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
2590                 return ECORE_UNKNOWN_ERROR;
2591
2592         return ECORE_SUCCESS;
2593 }
2594
2595 enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn,
2596                                                   struct ecore_ptt *p_ptt)
2597 {
2598         u32 drv_mb_param = 0, rsp, param;
2599         enum _ecore_status_t rc = ECORE_SUCCESS;
2600
2601         drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
2602                         DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2603
2604         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2605                            drv_mb_param, &rsp, &param);
2606
2607         if (rc != ECORE_SUCCESS)
2608                 return rc;
2609
2610         if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2611             (param != DRV_MB_PARAM_BIST_RC_PASSED))
2612                 rc = ECORE_UNKNOWN_ERROR;
2613
2614         return rc;
2615 }
2616
2617 enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn,
2618                                                struct ecore_ptt *p_ptt)
2619 {
2620         u32 drv_mb_param, rsp, param;
2621         enum _ecore_status_t rc = ECORE_SUCCESS;
2622
2623         drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
2624                         DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2625
2626         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2627                            drv_mb_param, &rsp, &param);
2628
2629         if (rc != ECORE_SUCCESS)
2630                 return rc;
2631
2632         if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2633             (param != DRV_MB_PARAM_BIST_RC_PASSED))
2634                 rc = ECORE_UNKNOWN_ERROR;
2635
2636         return rc;
2637 }
2638
2639 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images(
2640         struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *num_images)
2641 {
2642         u32 drv_mb_param = 0, rsp;
2643         enum _ecore_status_t rc = ECORE_SUCCESS;
2644
2645         drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
2646                         DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2647
2648         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2649                            drv_mb_param, &rsp, num_images);
2650
2651         if (rc != ECORE_SUCCESS)
2652                 return rc;
2653
2654         if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
2655                 rc = ECORE_UNKNOWN_ERROR;
2656
2657         return rc;
2658 }
2659
2660 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att(
2661         struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2662         struct bist_nvm_image_att *p_image_att, u32 image_index)
2663 {
2664         struct ecore_mcp_nvm_params params;
2665         enum _ecore_status_t rc;
2666         u32 buf_size;
2667
2668         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2669         params.nvm_common.offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
2670                                     DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2671         params.nvm_common.offset |= (image_index <<
2672                                     DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT);
2673
2674         params.type = ECORE_MCP_NVM_RD;
2675         params.nvm_rd.buf_size = &buf_size;
2676         params.nvm_common.cmd = DRV_MSG_CODE_BIST_TEST;
2677         params.nvm_rd.buf = (u32 *)p_image_att;
2678
2679         rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2680         if (rc != ECORE_SUCCESS)
2681                 return rc;
2682
2683         if (((params.nvm_common.resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2684             (p_image_att->return_code != 1))
2685                 rc = ECORE_UNKNOWN_ERROR;
2686
2687         return rc;
2688 }
2689
2690 enum _ecore_status_t
2691 ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn,
2692                                struct ecore_ptt *p_ptt,
2693                                struct ecore_temperature_info *p_temp_info)
2694 {
2695         struct ecore_temperature_sensor *p_temp_sensor;
2696         struct temperature_status_stc *p_mfw_temp_info;
2697         struct ecore_mcp_mb_params mb_params;
2698         union drv_union_data union_data;
2699         u32 val;
2700         enum _ecore_status_t rc;
2701         u8 i;
2702
2703         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2704         mb_params.cmd = DRV_MSG_CODE_GET_TEMPERATURE;
2705         mb_params.p_data_dst = &union_data;
2706         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2707         if (rc != ECORE_SUCCESS)
2708                 return rc;
2709
2710         p_mfw_temp_info = &union_data.temp_info;
2711
2712         OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS != MAX_NUM_OF_SENSORS);
2713         p_temp_info->num_sensors = OSAL_MIN_T(u32,
2714                                               p_mfw_temp_info->num_of_sensors,
2715                                               ECORE_MAX_NUM_OF_SENSORS);
2716         for (i = 0; i < p_temp_info->num_sensors; i++) {
2717                 val = p_mfw_temp_info->sensor[i];
2718                 p_temp_sensor = &p_temp_info->sensors[i];
2719                 p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >>
2720                                                  SENSOR_LOCATION_SHIFT;
2721                 p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >>
2722                                                 THRESHOLD_HIGH_SHIFT;
2723                 p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >>
2724                                           CRITICAL_TEMPERATURE_SHIFT;
2725                 p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >>
2726                                               CURRENT_TEMP_SHIFT;
2727         }
2728
2729         return ECORE_SUCCESS;
2730 }
2731
2732 enum _ecore_status_t ecore_mcp_get_mba_versions(
2733         struct ecore_hwfn *p_hwfn,
2734         struct ecore_ptt *p_ptt,
2735         struct ecore_mba_vers *p_mba_vers)
2736 {
2737         struct ecore_mcp_nvm_params params;
2738         enum _ecore_status_t rc;
2739         u32 buf_size;
2740
2741         OSAL_MEM_ZERO(&params, sizeof(params));
2742         params.type = ECORE_MCP_NVM_RD;
2743         params.nvm_common.cmd = DRV_MSG_CODE_GET_MBA_VERSION;
2744         params.nvm_common.offset = 0;
2745         params.nvm_rd.buf = &p_mba_vers->mba_vers[0];
2746         params.nvm_rd.buf_size = &buf_size;
2747         rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2748
2749         if (rc != ECORE_SUCCESS)
2750                 return rc;
2751
2752         if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
2753             FW_MSG_CODE_NVM_OK)
2754                 rc = ECORE_UNKNOWN_ERROR;
2755
2756         if (buf_size != MCP_DRV_NVM_BUF_LEN)
2757                 rc = ECORE_UNKNOWN_ERROR;
2758
2759         return rc;
2760 }
2761
2762 enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
2763                                               struct ecore_ptt *p_ptt,
2764                                               u64 *num_events)
2765 {
2766         u32 rsp;
2767
2768         return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MEM_ECC_EVENTS,
2769                              0, &rsp, (u32 *)num_events);
2770 }
2771
2772 #define ECORE_RESC_ALLOC_VERSION_MAJOR  1
2773 #define ECORE_RESC_ALLOC_VERSION_MINOR  0
2774 #define ECORE_RESC_ALLOC_VERSION                                \
2775         ((ECORE_RESC_ALLOC_VERSION_MAJOR <<                     \
2776           DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) |    \
2777          (ECORE_RESC_ALLOC_VERSION_MINOR <<                     \
2778           DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
2779
2780 enum _ecore_status_t ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn,
2781                                              struct ecore_ptt *p_ptt,
2782                                              struct resource_info *p_resc_info,
2783                                              u32 *p_mcp_resp, u32 *p_mcp_param)
2784 {
2785         struct ecore_mcp_mb_params mb_params;
2786         union drv_union_data union_data;
2787         enum _ecore_status_t rc;
2788
2789         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2790         mb_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
2791         mb_params.param = ECORE_RESC_ALLOC_VERSION;
2792         OSAL_MEMCPY(&union_data.resource, p_resc_info, sizeof(*p_resc_info));
2793         mb_params.p_data_src = &union_data;
2794         mb_params.p_data_dst = &union_data;
2795         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2796         if (rc != ECORE_SUCCESS)
2797                 return rc;
2798
2799         *p_mcp_resp = mb_params.mcp_resp;
2800         *p_mcp_param = mb_params.mcp_param;
2801
2802         OSAL_MEMCPY(p_resc_info, &union_data.resource, sizeof(*p_resc_info));
2803
2804         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2805                    "MFW resource_info: version 0x%x, res_id 0x%x, size 0x%x,"
2806                    " offset 0x%x, vf_size 0x%x, vf_offset 0x%x, flags 0x%x\n",
2807                    *p_mcp_param, p_resc_info->res_id, p_resc_info->size,
2808                    p_resc_info->offset, p_resc_info->vf_size,
2809                    p_resc_info->vf_offset, p_resc_info->flags);
2810
2811         return ECORE_SUCCESS;
2812 }
2813
2814 enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn,
2815                                                struct ecore_ptt *p_ptt)
2816 {
2817         u32 mcp_resp, mcp_param;
2818
2819         return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
2820                              &mcp_resp, &mcp_param);
2821 }
2822
2823 static enum _ecore_status_t ecore_mcp_resource_cmd(struct ecore_hwfn *p_hwfn,
2824                                                    struct ecore_ptt *p_ptt,
2825                                                    u32 param, u32 *p_mcp_resp,
2826                                                    u32 *p_mcp_param)
2827 {
2828         enum _ecore_status_t rc;
2829
2830         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
2831                            p_mcp_resp, p_mcp_param);
2832         if (rc != ECORE_SUCCESS)
2833                 return rc;
2834
2835         /* A zero response implies that the resource command is not supported */
2836         if (!*p_mcp_resp)
2837                 return ECORE_NOTIMPL;
2838
2839         if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
2840                 u8 opcode = ECORE_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
2841
2842                 DP_NOTICE(p_hwfn, false,
2843                           "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
2844                           param, opcode);
2845                 return ECORE_INVAL;
2846         }
2847
2848         return rc;
2849 }
2850
2851 enum _ecore_status_t ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn,
2852                                          struct ecore_ptt *p_ptt,
2853                                          u8 resource_num, u8 timeout,
2854                                          bool *p_granted, u8 *p_owner)
2855 {
2856         u32 param = 0, mcp_resp, mcp_param;
2857         u8 opcode;
2858         enum _ecore_status_t rc;
2859
2860         switch (timeout) {
2861         case ECORE_MCP_RESC_LOCK_TO_DEFAULT:
2862                 opcode = RESOURCE_OPCODE_REQ;
2863                 timeout = 0;
2864                 break;
2865         case ECORE_MCP_RESC_LOCK_TO_NONE:
2866                 opcode = RESOURCE_OPCODE_REQ_WO_AGING;
2867                 timeout = 0;
2868                 break;
2869         default:
2870                 opcode = RESOURCE_OPCODE_REQ_W_AGING;
2871                 break;
2872         }
2873
2874         ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, resource_num);
2875         ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
2876         ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, timeout);
2877
2878         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2879                    "Resource lock request: param 0x%08x [age %d, opcode %d, resc_num %d]\n",
2880                    param, timeout, opcode, resource_num);
2881
2882         /* Attempt to acquire the resource */
2883         rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
2884                                     &mcp_param);
2885         if (rc != ECORE_SUCCESS)
2886                 return rc;
2887
2888         /* Analyze the response */
2889         *p_owner = ECORE_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER);
2890         opcode = ECORE_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
2891
2892         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2893                    "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
2894                    mcp_param, opcode, *p_owner);
2895
2896         switch (opcode) {
2897         case RESOURCE_OPCODE_GNT:
2898                 *p_granted = true;
2899                 break;
2900         case RESOURCE_OPCODE_BUSY:
2901                 *p_granted = false;
2902                 break;
2903         default:
2904                 DP_NOTICE(p_hwfn, false,
2905                           "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
2906                           mcp_param, opcode);
2907                 return ECORE_INVAL;
2908         }
2909
2910         return ECORE_SUCCESS;
2911 }
2912
2913 enum _ecore_status_t ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn,
2914                                            struct ecore_ptt *p_ptt,
2915                                            u8 resource_num, bool force,
2916                                            bool *p_released)
2917 {
2918         u32 param = 0, mcp_resp, mcp_param;
2919         u8 opcode;
2920         enum _ecore_status_t rc;
2921
2922         opcode = force ? RESOURCE_OPCODE_FORCE_RELEASE
2923                        : RESOURCE_OPCODE_RELEASE;
2924         ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, resource_num);
2925         ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
2926
2927         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2928                    "Resource unlock request: param 0x%08x [opcode %d, resc_num %d]\n",
2929                    param, opcode, resource_num);
2930
2931         /* Attempt to release the resource */
2932         rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
2933                                     &mcp_param);
2934         if (rc != ECORE_SUCCESS)
2935                 return rc;
2936
2937         /* Analyze the response */
2938         opcode = ECORE_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
2939
2940         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2941                    "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
2942                    mcp_param, opcode);
2943
2944         switch (opcode) {
2945         case RESOURCE_OPCODE_RELEASED_PREVIOUS:
2946                 DP_INFO(p_hwfn,
2947                         "Resource unlock request for an already released resource [resc_num %d]\n",
2948                         resource_num);
2949                 /* Fallthrough */
2950         case RESOURCE_OPCODE_RELEASED:
2951                 *p_released = true;
2952                 break;
2953         case RESOURCE_OPCODE_WRONG_OWNER:
2954                 *p_released = false;
2955                 break;
2956         default:
2957                 DP_NOTICE(p_hwfn, false,
2958                           "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
2959                           mcp_param, opcode);
2960                 return ECORE_INVAL;
2961         }
2962
2963         return ECORE_SUCCESS;
2964 }