net/qede/base: change signature of MCP command
[dpdk.git] / drivers / net / qede / base / ecore_mcp.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "bcm_osal.h"
10 #include "ecore.h"
11 #include "ecore_status.h"
12 #include "ecore_mcp.h"
13 #include "mcp_public.h"
14 #include "reg_addr.h"
15 #include "ecore_hw.h"
16 #include "ecore_init_fw_funcs.h"
17 #include "ecore_sriov.h"
18 #include "ecore_iov_api.h"
19 #include "ecore_gtt_reg_addr.h"
20 #include "ecore_iro.h"
21 #include "ecore_dcbx.h"
22
23 #define CHIP_MCP_RESP_ITER_US 10
24 #define EMUL_MCP_RESP_ITER_US (1000 * 1000)
25
26 #define ECORE_DRV_MB_MAX_RETRIES (500 * 1000)   /* Account for 5 sec */
27 #define ECORE_MCP_RESET_RETRIES (50 * 1000)     /* Account for 500 msec */
28
29 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
30         ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
31                  _val)
32
33 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
34         ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
35
36 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
37         DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
38                      OFFSETOF(struct public_drv_mb, _field), _val)
39
40 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
41         DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
42                      OFFSETOF(struct public_drv_mb, _field))
43
44 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
45         DRV_ID_PDA_COMP_VER_SHIFT)
46
47 #define MCP_BYTES_PER_MBIT_SHIFT 17
48
49 #ifndef ASIC_ONLY
50 static int loaded;
51 static int loaded_port[MAX_NUM_PORTS] = { 0 };
52 #endif
53
54 bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn)
55 {
56         if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
57                 return false;
58         return true;
59 }
60
61 void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
62 {
63         u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
64                                         PUBLIC_PORT);
65         u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr);
66
67         p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
68                                                    MFW_PORT(p_hwfn));
69         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
70                    "port_addr = 0x%x, port_id 0x%02x\n",
71                    p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
72 }
73
74 void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
75 {
76         u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
77         OSAL_BE32 tmp;
78         u32 i;
79
80 #ifndef ASIC_ONLY
81         if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev))
82                 return;
83 #endif
84
85         if (!p_hwfn->mcp_info->public_base)
86                 return;
87
88         for (i = 0; i < length; i++) {
89                 tmp = ecore_rd(p_hwfn, p_ptt,
90                                p_hwfn->mcp_info->mfw_mb_addr +
91                                (i << 2) + sizeof(u32));
92
93                 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
94                     OSAL_BE32_TO_CPU(tmp);
95         }
96 }
97
98 enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
99 {
100         if (p_hwfn->mcp_info) {
101                 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
102                 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
103                 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->lock);
104         }
105         OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
106         p_hwfn->mcp_info = OSAL_NULL;
107
108         return ECORE_SUCCESS;
109 }
110
111 static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
112                                                    struct ecore_ptt *p_ptt)
113 {
114         struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
115         u32 drv_mb_offsize, mfw_mb_offsize;
116         u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
117
118 #ifndef ASIC_ONLY
119         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
120                 DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n");
121                 p_info->public_base = 0;
122                 return ECORE_INVAL;
123         }
124 #endif
125
126         p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
127         if (!p_info->public_base)
128                 return ECORE_INVAL;
129
130         p_info->public_base |= GRCBASE_MCP;
131
132         /* Calculate the driver and MFW mailbox address */
133         drv_mb_offsize = ecore_rd(p_hwfn, p_ptt,
134                                   SECTION_OFFSIZE_ADDR(p_info->public_base,
135                                                        PUBLIC_DRV_MB));
136         p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
137         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
138                    "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x"
139                    " mcp_pf_id = 0x%x\n",
140                    drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
141
142         /* Set the MFW MB address */
143         mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
144                                   SECTION_OFFSIZE_ADDR(p_info->public_base,
145                                                        PUBLIC_MFW_MB));
146         p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
147         p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
148                                                p_info->mfw_mb_addr);
149
150         /* Get the current driver mailbox sequence before sending
151          * the first command
152          */
153         p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
154             DRV_MSG_SEQ_NUMBER_MASK;
155
156         /* Get current FW pulse sequence */
157         p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
158             DRV_PULSE_SEQ_MASK;
159
160         p_info->mcp_hist = (u16)ecore_rd(p_hwfn, p_ptt,
161                                           MISCS_REG_GENERIC_POR_0);
162
163         return ECORE_SUCCESS;
164 }
165
166 enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
167                                         struct ecore_ptt *p_ptt)
168 {
169         struct ecore_mcp_info *p_info;
170         u32 size;
171
172         /* Allocate mcp_info structure */
173         p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
174                                        sizeof(*p_hwfn->mcp_info));
175         if (!p_hwfn->mcp_info)
176                 goto err;
177         p_info = p_hwfn->mcp_info;
178
179         if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
180                 DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
181                 /* Do not free mcp_info here, since public_base indicate that
182                  * the MCP is not initialized
183                  */
184                 return ECORE_SUCCESS;
185         }
186
187         size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
188         p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
189         p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
190         if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
191                 goto err;
192
193         /* Initialize the MFW spinlock */
194         OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->lock);
195         OSAL_SPIN_LOCK_INIT(&p_info->lock);
196
197         return ECORE_SUCCESS;
198
199 err:
200         DP_NOTICE(p_hwfn, true, "Failed to allocate mcp memory\n");
201         ecore_mcp_free(p_hwfn);
202         return ECORE_NOMEM;
203 }
204
205 enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
206                                      struct ecore_ptt *p_ptt)
207 {
208         u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
209         u32 delay = CHIP_MCP_RESP_ITER_US;
210         u32 org_mcp_reset_seq, cnt = 0;
211         enum _ecore_status_t rc = ECORE_SUCCESS;
212
213 #ifndef ASIC_ONLY
214         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
215                 delay = EMUL_MCP_RESP_ITER_US;
216 #endif
217
218         OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
219
220         /* Set drv command along with the updated sequence */
221         org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
222         DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
223
224         do {
225                 /* Wait for MFW response */
226                 OSAL_UDELAY(delay);
227                 /* Give the FW up to 500 second (50*1000*10usec) */
228         } while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt,
229                                                 MISCS_REG_GENERIC_POR_0)) &&
230                  (cnt++ < ECORE_MCP_RESET_RETRIES));
231
232         if (org_mcp_reset_seq !=
233             ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
234                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
235                            "MCP was reset after %d usec\n", cnt * delay);
236         } else {
237                 DP_ERR(p_hwfn, "Failed to reset MCP\n");
238                 rc = ECORE_AGAIN;
239         }
240
241         OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
242
243         return rc;
244 }
245
246 /* Should be called while the dedicated spinlock is acquired */
247 static enum _ecore_status_t ecore_do_mcp_cmd(struct ecore_hwfn *p_hwfn,
248                                              struct ecore_ptt *p_ptt,
249                                              u32 cmd, u32 param,
250                                              u32 *o_mcp_resp,
251                                              u32 *o_mcp_param)
252 {
253         u32 delay = CHIP_MCP_RESP_ITER_US;
254         u32 seq, cnt = 1, actual_mb_seq;
255         enum _ecore_status_t rc = ECORE_SUCCESS;
256
257 #ifndef ASIC_ONLY
258         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
259                 delay = EMUL_MCP_RESP_ITER_US;
260 #endif
261
262         /* Get actual driver mailbox sequence */
263         actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
264             DRV_MSG_SEQ_NUMBER_MASK;
265
266         /* Use MCP history register to check if MCP reset occurred between
267          * init time and now.
268          */
269         if (p_hwfn->mcp_info->mcp_hist !=
270             ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
271                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Rereading MCP offsets\n");
272                 ecore_load_mcp_offsets(p_hwfn, p_ptt);
273                 ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
274         }
275         seq = ++p_hwfn->mcp_info->drv_mb_seq;
276
277         /* Set drv param */
278         DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
279
280         /* Set drv command along with the updated sequence */
281         DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
282
283         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
284                    "wrote command (%x) to MFW MB param 0x%08x\n",
285                    (cmd | seq), param);
286
287         do {
288                 /* Wait for MFW response */
289                 OSAL_UDELAY(delay);
290                 *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
291
292                 /* Give the FW up to 5 second (500*10ms) */
293         } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
294                  (cnt++ < ECORE_DRV_MB_MAX_RETRIES));
295
296         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
297                    "[after %d ms] read (%x) seq is (%x) from FW MB\n",
298                    cnt * delay, *o_mcp_resp, seq);
299
300         /* Is this a reply to our command? */
301         if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
302                 *o_mcp_resp &= FW_MSG_CODE_MASK;
303                 /* Get the MCP param */
304                 *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
305         } else {
306                 /* FW BUG! */
307                 DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
308                        cmd, param);
309                 *o_mcp_resp = 0;
310                 rc = ECORE_AGAIN;
311                 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
312         }
313         return rc;
314 }
315
316
317 static enum _ecore_status_t
318 ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
319                         struct ecore_mcp_mb_params *p_mb_params)
320 {
321         u32 union_data_addr;
322         enum _ecore_status_t rc;
323
324         /* MCP not initialized */
325         if (!ecore_mcp_is_init(p_hwfn)) {
326                 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
327                 return ECORE_BUSY;
328         }
329
330         /* Acquiring a spinlock is needed to ensure that only a single thread
331          * is accessing the mailbox at a certain time.
332          */
333         OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
334
335         union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
336                           OFFSETOF(struct public_drv_mb, union_data);
337
338         if (p_mb_params->p_data_src != OSAL_NULL)
339                 ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr,
340                                 p_mb_params->p_data_src,
341                                 sizeof(*p_mb_params->p_data_src));
342
343         rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
344                               p_mb_params->param, &p_mb_params->mcp_resp,
345                               &p_mb_params->mcp_param);
346
347         if (p_mb_params->p_data_dst != OSAL_NULL)
348                 ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
349                                   union_data_addr,
350                                   sizeof(*p_mb_params->p_data_dst));
351         return rc;
352 }
353
354 enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
355                                    struct ecore_ptt *p_ptt, u32 cmd, u32 param,
356                                    u32 *o_mcp_resp, u32 *o_mcp_param)
357 {
358         struct ecore_mcp_mb_params mb_params;
359         enum _ecore_status_t rc;
360
361 #ifndef ASIC_ONLY
362         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
363                 if (cmd == DRV_MSG_CODE_UNLOAD_REQ) {
364                         loaded--;
365                         loaded_port[p_hwfn->port_id]--;
366                         DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n",
367                                    loaded);
368                 }
369                 return ECORE_SUCCESS;
370         }
371 #endif
372         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
373         mb_params.cmd = cmd;
374         mb_params.param = param;
375         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
376         if (rc != ECORE_SUCCESS)
377                 return rc;
378
379         *o_mcp_resp = mb_params.mcp_resp;
380         *o_mcp_param = mb_params.mcp_param;
381
382         return ECORE_SUCCESS;
383 }
384
385 enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
386                                           struct ecore_ptt *p_ptt,
387                                           u32 cmd,
388                                           u32 param,
389                                           u32 *o_mcp_resp,
390                                           u32 *o_mcp_param,
391                                           u32 i_txn_size, u32 *i_buf)
392 {
393         struct ecore_mcp_mb_params mb_params;
394         union drv_union_data union_data;
395         enum _ecore_status_t rc;
396
397         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
398         mb_params.cmd = cmd;
399         mb_params.param = param;
400         OSAL_MEMCPY(&union_data.raw_data, i_buf, i_txn_size);
401         mb_params.p_data_src = &union_data;
402         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
403         if (rc != ECORE_SUCCESS)
404                 return rc;
405
406         *o_mcp_resp = mb_params.mcp_resp;
407         *o_mcp_param = mb_params.mcp_param;
408
409         return ECORE_SUCCESS;
410 }
411
412 enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
413                                           struct ecore_ptt *p_ptt,
414                                           u32 cmd,
415                                           u32 param,
416                                           u32 *o_mcp_resp,
417                                           u32 *o_mcp_param,
418                                           u32 *o_txn_size, u32 *o_buf)
419 {
420         enum _ecore_status_t rc;
421         u32 i;
422
423         /* MCP not initialized */
424         if (!ecore_mcp_is_init(p_hwfn)) {
425                 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
426                 return ECORE_BUSY;
427         }
428
429         OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
430         rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, cmd, param, o_mcp_resp,
431                               o_mcp_param);
432         if (rc != ECORE_SUCCESS)
433                 goto out;
434
435         /* Get payload after operation completes successfully */
436         *o_txn_size = *o_mcp_param;
437         for (i = 0; i < *o_txn_size; i += 4)
438                 o_buf[i / sizeof(u32)] = DRV_MB_RD(p_hwfn, p_ptt,
439                                                    union_data.raw_data[i]);
440
441 out:
442         OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
443         return rc;
444 }
445
446 #ifndef ASIC_ONLY
447 static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
448                                     u32 *p_load_code)
449 {
450         static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
451
452         if (!loaded)
453                 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
454         else if (!loaded_port[p_hwfn->port_id])
455                 load_phase = FW_MSG_CODE_DRV_LOAD_PORT;
456         else
457                 load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
458
459         /* On CMT, always tell that it's engine */
460         if (p_hwfn->p_dev->num_hwfns > 1)
461                 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
462
463         *p_load_code = load_phase;
464         loaded++;
465         loaded_port[p_hwfn->port_id]++;
466
467         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
468                    "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n",
469                    *p_load_code, loaded, p_hwfn->port_id,
470                    loaded_port[p_hwfn->port_id]);
471 }
472 #endif
473
474 enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
475                                         struct ecore_ptt *p_ptt,
476                                         u32 *p_load_code)
477 {
478         struct ecore_dev *p_dev = p_hwfn->p_dev;
479         struct ecore_mcp_mb_params mb_params;
480         union drv_union_data union_data;
481         u32 param;
482         enum _ecore_status_t rc;
483
484 #ifndef ASIC_ONLY
485         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
486                 ecore_mcp_mf_workaround(p_hwfn, p_load_code);
487                 return ECORE_SUCCESS;
488         }
489 #endif
490
491         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
492         mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
493         mb_params.param = PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
494                           p_dev->drv_type;
495         OSAL_MEMCPY(&union_data.ver_str, p_dev->ver_str, MCP_DRV_VER_STR_SIZE);
496         mb_params.p_data_src = &union_data;
497         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
498
499         /* if mcp fails to respond we must abort */
500         if (rc != ECORE_SUCCESS) {
501                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
502                 return rc;
503         }
504
505         /* If MFW refused (e.g. other port is in diagnostic mode) we
506          * must abort. This can happen in the following cases:
507          * - Other port is in diagnostic mode
508          * - Previously loaded function on the engine is not compliant with
509          *   the requester.
510          * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION.
511          *      -
512          */
513         if (!(*p_load_code) ||
514             ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) ||
515             ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) ||
516             ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) {
517                 DP_ERR(p_hwfn, "MCP refused load request, aborting\n");
518                 return ECORE_BUSY;
519         }
520
521         return ECORE_SUCCESS;
522 }
523
524 static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
525                                     struct ecore_ptt *p_ptt)
526 {
527         u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
528                                         PUBLIC_PATH);
529         u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
530         u32 path_addr = SECTION_ADDR(mfw_path_offsize,
531                                      ECORE_PATH_ID(p_hwfn));
532         u32 disabled_vfs[VF_MAX_STATIC / 32];
533         int i;
534
535         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
536                    "Reading Disabled VF information from [offset %08x],"
537                    " path_addr %08x\n",
538                    mfw_path_offsize, path_addr);
539
540         for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
541                 disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,
542                                            path_addr +
543                                            OFFSETOF(struct public_path,
544                                                     mcp_vf_disabled) +
545                                            sizeof(u32) * i);
546                 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
547                            "FLR-ed VFs [%08x,...,%08x] - %08x\n",
548                            i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
549         }
550
551         if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs))
552                 OSAL_VF_FLR_UPDATE(p_hwfn);
553 }
554
555 enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
556                                           struct ecore_ptt *p_ptt,
557                                           u32 *vfs_to_ack)
558 {
559         u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
560                                         PUBLIC_FUNC);
561         u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);
562         u32 func_addr = SECTION_ADDR(mfw_func_offsize,
563                                      MCP_PF_ID(p_hwfn));
564         struct ecore_mcp_mb_params mb_params;
565         union drv_union_data union_data;
566         u32 resp, param;
567         enum _ecore_status_t rc;
568         int i;
569
570         for (i = 0; i < (VF_MAX_STATIC / 32); i++)
571                 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
572                            "Acking VFs [%08x,...,%08x] - %08x\n",
573                            i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
574
575         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
576         mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
577         OSAL_MEMCPY(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8);
578         mb_params.p_data_src = &union_data;
579         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
580         if (rc != ECORE_SUCCESS) {
581                 DP_NOTICE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
582                           "Failed to pass ACK for VF flr to MFW\n");
583                 return ECORE_TIMEOUT;
584         }
585
586         /* TMP - clear the ACK bits; should be done by MFW */
587         for (i = 0; i < (VF_MAX_STATIC / 32); i++)
588                 ecore_wr(p_hwfn, p_ptt,
589                          func_addr +
590                          OFFSETOF(struct public_func, drv_ack_vf_disabled) +
591                          i * sizeof(u32), 0);
592
593         return rc;
594 }
595
596 static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
597                                                 struct ecore_ptt *p_ptt)
598 {
599         u32 transceiver_state;
600
601         transceiver_state = ecore_rd(p_hwfn, p_ptt,
602                                      p_hwfn->mcp_info->port_addr +
603                                      OFFSETOF(struct public_port,
604                                               transceiver_data));
605
606         DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP),
607                    "Received transceiver state update [0x%08x] from mfw"
608                    " [Addr 0x%x]\n",
609                    transceiver_state, (u32)(p_hwfn->mcp_info->port_addr +
610                                             OFFSETOF(struct public_port,
611                                                      transceiver_data)));
612
613         transceiver_state = GET_FIELD(transceiver_state, PMM_TRANSCEIVER_STATE);
614
615         if (transceiver_state == PMM_TRANSCEIVER_STATE_PRESENT)
616                 DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
617         else
618                 DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
619 }
620
621 static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
622                                          struct ecore_ptt *p_ptt, bool b_reset)
623 {
624         struct ecore_mcp_link_state *p_link;
625         u32 status = 0;
626
627         p_link = &p_hwfn->mcp_info->link_output;
628         OSAL_MEMSET(p_link, 0, sizeof(*p_link));
629         if (!b_reset) {
630                 status = ecore_rd(p_hwfn, p_ptt,
631                                   p_hwfn->mcp_info->port_addr +
632                                   OFFSETOF(struct public_port, link_status));
633                 DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP),
634                            "Received link update [0x%08x] from mfw"
635                            " [Addr 0x%x]\n",
636                            status, (u32)(p_hwfn->mcp_info->port_addr +
637                                           OFFSETOF(struct public_port,
638                                                    link_status)));
639         } else {
640                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
641                            "Resetting link indications\n");
642                 return;
643         }
644
645         if (p_hwfn->b_drv_link_init)
646                 p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
647         else
648                 p_link->link_up = false;
649
650         p_link->full_duplex = true;
651         switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
652         case LINK_STATUS_SPEED_AND_DUPLEX_100G:
653                 p_link->speed = 100000;
654                 break;
655         case LINK_STATUS_SPEED_AND_DUPLEX_50G:
656                 p_link->speed = 50000;
657                 break;
658         case LINK_STATUS_SPEED_AND_DUPLEX_40G:
659                 p_link->speed = 40000;
660                 break;
661         case LINK_STATUS_SPEED_AND_DUPLEX_25G:
662                 p_link->speed = 25000;
663                 break;
664         case LINK_STATUS_SPEED_AND_DUPLEX_20G:
665                 p_link->speed = 20000;
666                 break;
667         case LINK_STATUS_SPEED_AND_DUPLEX_10G:
668                 p_link->speed = 10000;
669                 break;
670         case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
671                 p_link->full_duplex = false;
672                 /* Fall-through */
673         case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
674                 p_link->speed = 1000;
675                 break;
676         default:
677                 p_link->speed = 0;
678         }
679
680         /* We never store total line speed as p_link->speed is
681          * again changes according to bandwidth allocation.
682          */
683         if (p_link->link_up && p_link->speed)
684                 p_link->line_speed = p_link->speed;
685         else
686                 p_link->line_speed = 0;
687
688         /* Correct speed according to bandwidth allocation */
689         if (p_hwfn->mcp_info->func_info.bandwidth_max && p_link->speed) {
690                 u8 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
691
692         __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
693                                            p_link, max_bw);
694         }
695
696         if (p_hwfn->mcp_info->func_info.bandwidth_min && p_link->speed) {
697                 u8 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
698
699         __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
700                                            p_link, min_bw);
701
702         ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev,
703                                               p_link->min_pf_rate);
704         }
705
706         p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
707         p_link->an_complete = !!(status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
708         p_link->parallel_detection = !!(status &
709                                          LINK_STATUS_PARALLEL_DETECTION_USED);
710         p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
711
712         p_link->partner_adv_speed |=
713             (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
714             ECORE_LINK_PARTNER_SPEED_1G_FD : 0;
715         p_link->partner_adv_speed |=
716             (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
717             ECORE_LINK_PARTNER_SPEED_1G_HD : 0;
718         p_link->partner_adv_speed |=
719             (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
720             ECORE_LINK_PARTNER_SPEED_10G : 0;
721         p_link->partner_adv_speed |=
722             (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
723             ECORE_LINK_PARTNER_SPEED_20G : 0;
724         p_link->partner_adv_speed |=
725             (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
726             ECORE_LINK_PARTNER_SPEED_25G : 0;
727         p_link->partner_adv_speed |=
728             (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
729             ECORE_LINK_PARTNER_SPEED_40G : 0;
730         p_link->partner_adv_speed |=
731             (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
732             ECORE_LINK_PARTNER_SPEED_50G : 0;
733         p_link->partner_adv_speed |=
734             (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
735             ECORE_LINK_PARTNER_SPEED_100G : 0;
736
737         p_link->partner_tx_flow_ctrl_en =
738             !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
739         p_link->partner_rx_flow_ctrl_en =
740             !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
741
742         switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
743         case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
744                 p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE;
745                 break;
746         case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
747                 p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE;
748                 break;
749         case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
750                 p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE;
751                 break;
752         default:
753                 p_link->partner_adv_pause = 0;
754         }
755
756         p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
757
758         if (p_link->link_up)
759                 ecore_dcbx_eagle_workaround(p_hwfn, p_ptt, p_link->pfc_enabled);
760
761         OSAL_LINK_UPDATE(p_hwfn);
762 }
763
764 enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
765                                         struct ecore_ptt *p_ptt, bool b_up)
766 {
767         struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
768         struct ecore_mcp_mb_params mb_params;
769         union drv_union_data union_data;
770         struct pmm_phy_cfg *p_phy_cfg;
771         u32 param = 0, reply = 0, cmd;
772         enum _ecore_status_t rc = ECORE_SUCCESS;
773
774 #ifndef ASIC_ONLY
775         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
776                 return ECORE_SUCCESS;
777 #endif
778
779         /* Set the shmem configuration according to params */
780         p_phy_cfg = &union_data.drv_phy_cfg;
781         OSAL_MEMSET(p_phy_cfg, 0, sizeof(*p_phy_cfg));
782         cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
783         if (!params->speed.autoneg)
784                 p_phy_cfg->speed = params->speed.forced_speed;
785         p_phy_cfg->pause |= (params->pause.autoneg) ? PMM_PAUSE_AUTONEG : 0;
786         p_phy_cfg->pause |= (params->pause.forced_rx) ? PMM_PAUSE_RX : 0;
787         p_phy_cfg->pause |= (params->pause.forced_tx) ? PMM_PAUSE_TX : 0;
788         p_phy_cfg->adv_speed = params->speed.advertised_speeds;
789         p_phy_cfg->loopback_mode = params->loopback_mode;
790
791 #ifndef ASIC_ONLY
792         if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
793                 DP_INFO(p_hwfn,
794                         "Link on FPGA - Ask for loopback mode '5' at 10G\n");
795                 p_phy_cfg->loopback_mode = 5;
796                 p_phy_cfg->speed = 10000;
797         }
798 #endif
799
800         p_hwfn->b_drv_link_init = b_up;
801
802         if (b_up)
803                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
804                            "Configuring Link: Speed 0x%08x, Pause 0x%08x,"
805                            " adv_speed 0x%08x, loopback 0x%08x,"
806                            " features 0x%08x\n",
807                            p_phy_cfg->speed, p_phy_cfg->pause,
808                            p_phy_cfg->adv_speed, p_phy_cfg->loopback_mode,
809                            p_phy_cfg->feature_config_flags);
810         else
811                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
812
813         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
814         mb_params.cmd = cmd;
815         mb_params.p_data_src = &union_data;
816         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
817
818         /* if mcp fails to respond we must abort */
819         if (rc != ECORE_SUCCESS) {
820                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
821                 return rc;
822         }
823
824         /* Reset the link status if needed */
825         if (!b_up)
826                 ecore_mcp_handle_link_change(p_hwfn, p_ptt, true);
827
828         return rc;
829 }
830
831 u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
832                                    struct ecore_ptt *p_ptt)
833 {
834         u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
835
836         /* TODO - Add support for VFs */
837         if (IS_VF(p_hwfn->p_dev))
838                 return ECORE_INVAL;
839
840         path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
841                                                  PUBLIC_PATH);
842         path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr);
843         path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn));
844
845         proc_kill_cnt = ecore_rd(p_hwfn, p_ptt,
846                                  path_addr +
847                                  OFFSETOF(struct public_path, process_kill)) &
848             PROCESS_KILL_COUNTER_MASK;
849
850         return proc_kill_cnt;
851 }
852
853 static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn,
854                                           struct ecore_ptt *p_ptt)
855 {
856         struct ecore_dev *p_dev = p_hwfn->p_dev;
857         u32 proc_kill_cnt;
858
859         /* Prevent possible attentions/interrupts during the recovery handling
860          * and till its load phase, during which they will be re-enabled.
861          */
862         ecore_int_igu_disable_int(p_hwfn, p_ptt);
863
864         DP_NOTICE(p_hwfn, false, "Received a process kill indication\n");
865
866         /* The following operations should be done once, and thus in CMT mode
867          * are carried out by only the first HW function.
868          */
869         if (p_hwfn != ECORE_LEADING_HWFN(p_dev))
870                 return;
871
872         if (p_dev->recov_in_prog) {
873                 DP_NOTICE(p_hwfn, false,
874                           "Ignoring the indication since a recovery"
875                           " process is already in progress\n");
876                 return;
877         }
878
879         p_dev->recov_in_prog = true;
880
881         proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt);
882         DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt);
883
884         OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn);
885 }
886
887 static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
888                                           struct ecore_ptt *p_ptt,
889                                           enum MFW_DRV_MSG_TYPE type)
890 {
891         enum ecore_mcp_protocol_type stats_type;
892         union ecore_mcp_protocol_stats stats;
893         struct ecore_mcp_mb_params mb_params;
894         u32 hsi_param, param = 0, reply = 0;
895         union drv_union_data union_data;
896
897         switch (type) {
898         case MFW_DRV_MSG_GET_LAN_STATS:
899                 stats_type = ECORE_MCP_LAN_STATS;
900                 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
901                 break;
902         default:
903                 DP_NOTICE(p_hwfn, false, "Invalid protocol type %d\n", type);
904                 return;
905         }
906
907         OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats);
908
909         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
910         mb_params.cmd = DRV_MSG_CODE_GET_STATS;
911         mb_params.param = hsi_param;
912         OSAL_MEMCPY(&union_data, &stats, sizeof(stats));
913         mb_params.p_data_src = &union_data;
914         ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
915 }
916
917 static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
918                                     struct ecore_ptt *p_ptt,
919                                     struct public_func *p_data, int pfid)
920 {
921         u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
922                                         PUBLIC_FUNC);
923         u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
924         u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
925         u32 i, size;
926
927         OSAL_MEM_ZERO(p_data, sizeof(*p_data));
928
929         size = OSAL_MIN_T(u32, sizeof(*p_data), SECTION_SIZE(mfw_path_offsize));
930         for (i = 0; i < size / sizeof(u32); i++)
931                 ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
932                                               func_addr + (i << 2));
933
934         return size;
935 }
936
937 static void
938 ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
939                         struct public_func *p_shmem_info)
940 {
941         struct ecore_mcp_function_info *p_info;
942
943         p_info = &p_hwfn->mcp_info->func_info;
944
945         /* TODO - bandwidth min/max should have valid values of 1-100,
946          * as well as some indication that the feature is disabled.
947          * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS
948          * limit and correct value to min `1' and max `100' if limit isn't in
949          * range.
950          */
951         p_info->bandwidth_min = (p_shmem_info->config &
952                                  FUNC_MF_CFG_MIN_BW_MASK) >>
953             FUNC_MF_CFG_MIN_BW_SHIFT;
954         if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
955                 DP_INFO(p_hwfn,
956                         "bandwidth minimum out of bounds [%02x]. Set to 1\n",
957                         p_info->bandwidth_min);
958                 p_info->bandwidth_min = 1;
959         }
960
961         p_info->bandwidth_max = (p_shmem_info->config &
962                                  FUNC_MF_CFG_MAX_BW_MASK) >>
963             FUNC_MF_CFG_MAX_BW_SHIFT;
964         if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
965                 DP_INFO(p_hwfn,
966                         "bandwidth maximum out of bounds [%02x]. Set to 100\n",
967                         p_info->bandwidth_max);
968                 p_info->bandwidth_max = 100;
969         }
970 }
971
972 static void
973 ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
974 {
975         struct ecore_mcp_function_info *p_info;
976         struct public_func shmem_info;
977         u32 resp = 0, param = 0;
978
979         ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
980
981         ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
982
983         p_info = &p_hwfn->mcp_info->func_info;
984
985         ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min);
986
987         ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max);
988
989         /* Acknowledge the MFW */
990         ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
991                       &param);
992 }
993
994 static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn,
995                                          struct ecore_ptt *p_ptt)
996 {
997         /* A single notification should be sent to upper driver in CMT mode */
998         if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
999                 return;
1000
1001         DP_NOTICE(p_hwfn, false,
1002                   "Fan failure was detected on the network interface card"
1003                   " and it's going to be shut down.\n");
1004
1005         ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
1006 }
1007
1008 enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
1009                                              struct ecore_ptt *p_ptt)
1010 {
1011         struct ecore_mcp_info *info = p_hwfn->mcp_info;
1012         enum _ecore_status_t rc = ECORE_SUCCESS;
1013         bool found = false;
1014         u16 i;
1015
1016         DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n");
1017
1018         /* Read Messages from MFW */
1019         ecore_mcp_read_mb(p_hwfn, p_ptt);
1020
1021         /* Compare current messages to old ones */
1022         for (i = 0; i < info->mfw_mb_length; i++) {
1023                 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
1024                         continue;
1025
1026                 found = true;
1027
1028                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1029                            "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
1030                            i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
1031
1032                 switch (i) {
1033                 case MFW_DRV_MSG_LINK_CHANGE:
1034                         ecore_mcp_handle_link_change(p_hwfn, p_ptt, false);
1035                         break;
1036                 case MFW_DRV_MSG_VF_DISABLED:
1037                         ecore_mcp_handle_vf_flr(p_hwfn, p_ptt);
1038                         break;
1039                 case MFW_DRV_MSG_LLDP_DATA_UPDATED:
1040                         ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1041                                                     ECORE_DCBX_REMOTE_LLDP_MIB);
1042                         break;
1043                 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
1044                         ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1045                                                     ECORE_DCBX_REMOTE_MIB);
1046                         break;
1047                 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
1048                         ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1049                                                     ECORE_DCBX_OPERATIONAL_MIB);
1050                         break;
1051                 case MFW_DRV_MSG_ERROR_RECOVERY:
1052                         ecore_mcp_handle_process_kill(p_hwfn, p_ptt);
1053                         break;
1054                 case MFW_DRV_MSG_GET_LAN_STATS:
1055                 case MFW_DRV_MSG_GET_FCOE_STATS:
1056                 case MFW_DRV_MSG_GET_ISCSI_STATS:
1057                 case MFW_DRV_MSG_GET_RDMA_STATS:
1058                         ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
1059                         break;
1060                 case MFW_DRV_MSG_BW_UPDATE:
1061                         ecore_mcp_update_bw(p_hwfn, p_ptt);
1062                         break;
1063                 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
1064                         ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
1065                         break;
1066                 case MFW_DRV_MSG_FAILURE_DETECTED:
1067                         ecore_mcp_handle_fan_failure(p_hwfn, p_ptt);
1068                         break;
1069                 default:
1070                         /* @DPDK */
1071                         DP_NOTICE(p_hwfn, false,
1072                                   "Unimplemented MFW message %d\n", i);
1073                         rc = ECORE_INVAL;
1074                 }
1075         }
1076
1077         /* ACK everything */
1078         for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
1079                 OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]);
1080
1081                 /* MFW expect answer in BE, so we force write in that format */
1082                 ecore_wr(p_hwfn, p_ptt,
1083                          info->mfw_mb_addr + sizeof(u32) +
1084                          MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
1085                          sizeof(u32) + i * sizeof(u32), val);
1086         }
1087
1088         if (!found) {
1089                 DP_NOTICE(p_hwfn, false,
1090                           "Received an MFW message indication but no"
1091                           " new message!\n");
1092                 rc = ECORE_INVAL;
1093         }
1094
1095         /* Copy the new mfw messages into the shadow */
1096         OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
1097
1098         return rc;
1099 }
1100
1101 enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_dev *p_dev,
1102                                            struct ecore_ptt *p_ptt,
1103                                            u32 *p_mfw_ver,
1104                                            u32 *p_running_bundle_id)
1105 {
1106         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1107         u32 global_offsize;
1108
1109 #ifndef ASIC_ONLY
1110         if (CHIP_REV_IS_EMUL(p_dev)) {
1111                 DP_NOTICE(p_dev, false, "Emulation - can't get MFW version\n");
1112                 return ECORE_SUCCESS;
1113         }
1114 #endif
1115
1116         if (IS_VF(p_dev)) {
1117                 if (p_hwfn->vf_iov_info) {
1118                         struct pfvf_acquire_resp_tlv *p_resp;
1119
1120                         p_resp = &p_hwfn->vf_iov_info->acquire_resp;
1121                         *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
1122                         return ECORE_SUCCESS;
1123                 }
1124
1125                 DP_VERBOSE(p_dev, ECORE_MSG_IOV,
1126                            "VF requested MFW vers prior to ACQUIRE\n");
1127                         return ECORE_INVAL;
1128                 }
1129
1130         global_offsize = ecore_rd(p_hwfn, p_ptt,
1131                                   SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
1132                                                        public_base,
1133                                                        PUBLIC_GLOBAL));
1134         *p_mfw_ver =
1135             ecore_rd(p_hwfn, p_ptt,
1136                      SECTION_ADDR(global_offsize,
1137                                   0) + OFFSETOF(struct public_global, mfw_ver));
1138
1139         if (p_running_bundle_id != OSAL_NULL) {
1140                 *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt,
1141                                                 SECTION_ADDR(global_offsize,
1142                                                              0) +
1143                                                 OFFSETOF(struct public_global,
1144                                                          running_bundle_id));
1145         }
1146
1147         return ECORE_SUCCESS;
1148 }
1149
1150 enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
1151                                               u32 *p_media_type)
1152 {
1153         struct ecore_hwfn *p_hwfn = &p_dev->hwfns[0];
1154         struct ecore_ptt *p_ptt;
1155
1156         /* TODO - Add support for VFs */
1157         if (IS_VF(p_dev))
1158                 return ECORE_INVAL;
1159
1160         if (!ecore_mcp_is_init(p_hwfn)) {
1161                 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
1162                 return ECORE_BUSY;
1163         }
1164
1165         *p_media_type = MEDIA_UNSPECIFIED;
1166
1167         p_ptt = ecore_ptt_acquire(p_hwfn);
1168         if (!p_ptt)
1169                 return ECORE_BUSY;
1170
1171         *p_media_type = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1172                                  OFFSETOF(struct public_port, media_type));
1173
1174         ecore_ptt_release(p_hwfn, p_ptt);
1175
1176         return ECORE_SUCCESS;
1177 }
1178
1179 static enum _ecore_status_t
1180 ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn,
1181                           struct public_func *p_info,
1182                           enum ecore_pci_personality *p_proto)
1183 {
1184         enum _ecore_status_t rc = ECORE_SUCCESS;
1185
1186         switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
1187         case FUNC_MF_CFG_PROTOCOL_ETHERNET:
1188                 *p_proto = ECORE_PCI_ETH;
1189                 break;
1190         default:
1191                 rc = ECORE_INVAL;
1192         }
1193
1194         return rc;
1195 }
1196
1197 enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
1198                                                     struct ecore_ptt *p_ptt)
1199 {
1200         struct ecore_mcp_function_info *info;
1201         struct public_func shmem_info;
1202
1203         ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1204         info = &p_hwfn->mcp_info->func_info;
1205
1206         info->pause_on_host = (shmem_info.config &
1207                                FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
1208
1209         if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, &info->protocol)) {
1210                 DP_ERR(p_hwfn, "Unknown personality %08x\n",
1211                        (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
1212                 return ECORE_INVAL;
1213         }
1214
1215         ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1216
1217         if (shmem_info.mac_upper || shmem_info.mac_lower) {
1218                 info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
1219                 info->mac[1] = (u8)(shmem_info.mac_upper);
1220                 info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
1221                 info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
1222                 info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
1223                 info->mac[5] = (u8)(shmem_info.mac_lower);
1224         } else {
1225                 /* TODO - are there protocols for which there's no MAC? */
1226                 DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n");
1227         }
1228
1229         info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
1230
1231         DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP),
1232                    "Read configuration from shmem: pause_on_host %02x"
1233                     " protocol %02x BW [%02x - %02x]"
1234                     " MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %" PRIx64
1235                     " node %" PRIx64 " ovlan %04x\n",
1236                    info->pause_on_host, info->protocol,
1237                    info->bandwidth_min, info->bandwidth_max,
1238                    info->mac[0], info->mac[1], info->mac[2],
1239                    info->mac[3], info->mac[4], info->mac[5],
1240                    info->wwn_port, info->wwn_node, info->ovlan);
1241
1242         return ECORE_SUCCESS;
1243 }
1244
1245 struct ecore_mcp_link_params
1246 *ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn)
1247 {
1248         if (!p_hwfn || !p_hwfn->mcp_info)
1249                 return OSAL_NULL;
1250         return &p_hwfn->mcp_info->link_input;
1251 }
1252
1253 struct ecore_mcp_link_state
1254 *ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn)
1255 {
1256         if (!p_hwfn || !p_hwfn->mcp_info)
1257                 return OSAL_NULL;
1258
1259 #ifndef ASIC_ONLY
1260         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
1261                 DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n");
1262                 p_hwfn->mcp_info->link_output.link_up = true;
1263         }
1264 #endif
1265
1266         return &p_hwfn->mcp_info->link_output;
1267 }
1268
1269 struct ecore_mcp_link_capabilities
1270 *ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn)
1271 {
1272         if (!p_hwfn || !p_hwfn->mcp_info)
1273                 return OSAL_NULL;
1274         return &p_hwfn->mcp_info->link_capabilities;
1275 }
1276
1277 enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
1278                                      struct ecore_ptt *p_ptt)
1279 {
1280         enum _ecore_status_t rc;
1281         u32 resp = 0, param = 0;
1282
1283         rc = ecore_mcp_cmd(p_hwfn, p_ptt,
1284                            DRV_MSG_CODE_NIG_DRAIN, 100, &resp, &param);
1285
1286         /* Wait for the drain to complete before returning */
1287         OSAL_MSLEEP(120);
1288
1289         return rc;
1290 }
1291
1292 const struct ecore_mcp_function_info
1293 *ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn)
1294 {
1295         if (!p_hwfn || !p_hwfn->mcp_info)
1296                 return OSAL_NULL;
1297         return &p_hwfn->mcp_info->func_info;
1298 }
1299
1300 enum _ecore_status_t ecore_mcp_nvm_command(struct ecore_hwfn *p_hwfn,
1301                                            struct ecore_ptt *p_ptt,
1302                                            struct ecore_mcp_nvm_params *params)
1303 {
1304         enum _ecore_status_t rc;
1305
1306         switch (params->type) {
1307         case ECORE_MCP_NVM_RD:
1308                 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1309                                           params->nvm_common.offset,
1310                                           &params->nvm_common.resp,
1311                                           &params->nvm_common.param,
1312                                           params->nvm_rd.buf_size,
1313                                           params->nvm_rd.buf);
1314                 break;
1315         case ECORE_MCP_CMD:
1316                 rc = ecore_mcp_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1317                                    params->nvm_common.offset,
1318                                    &params->nvm_common.resp,
1319                                    &params->nvm_common.param);
1320                 break;
1321         case ECORE_MCP_NVM_WR:
1322                 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1323                                           params->nvm_common.offset,
1324                                           &params->nvm_common.resp,
1325                                           &params->nvm_common.param,
1326                                           params->nvm_wr.buf_size,
1327                                           params->nvm_wr.buf);
1328                 break;
1329         default:
1330                 rc = ECORE_NOTIMPL;
1331                 break;
1332         }
1333         return rc;
1334 }
1335
1336 int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
1337                                   struct ecore_ptt *p_ptt, u32 personalities)
1338 {
1339         enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT;
1340         struct public_func shmem_info;
1341         int i, count = 0, num_pfs;
1342
1343         num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
1344
1345         for (i = 0; i < num_pfs; i++) {
1346                 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1347                                          MCP_PF_ID_BY_REL(p_hwfn, i));
1348                 if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
1349                         continue;
1350
1351                 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info,
1352                                               &protocol) != ECORE_SUCCESS)
1353                         continue;
1354
1355                 if ((1 << ((u32)protocol)) & personalities)
1356                         count++;
1357         }
1358
1359         return count;
1360 }
1361
1362 enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
1363                                               struct ecore_ptt *p_ptt,
1364                                               u32 *p_flash_size)
1365 {
1366         u32 flash_size;
1367
1368 #ifndef ASIC_ONLY
1369         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1370                 DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n");
1371                 return ECORE_INVAL;
1372         }
1373 #endif
1374
1375         if (IS_VF(p_hwfn->p_dev))
1376                 return ECORE_INVAL;
1377
1378         flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
1379         flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
1380             MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
1381         flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
1382
1383         *p_flash_size = flash_size;
1384
1385         return ECORE_SUCCESS;
1386 }
1387
1388 enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
1389                                                   struct ecore_ptt *p_ptt)
1390 {
1391         struct ecore_dev *p_dev = p_hwfn->p_dev;
1392
1393         if (p_dev->recov_in_prog) {
1394                 DP_NOTICE(p_hwfn, false,
1395                           "Avoid triggering a recovery since such a process"
1396                           " is already in progress\n");
1397                 return ECORE_AGAIN;
1398         }
1399
1400         DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n");
1401         ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
1402
1403         return ECORE_SUCCESS;
1404 }
1405
1406 enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
1407                                               struct ecore_ptt *p_ptt,
1408                                               u8 vf_id, u8 num)
1409 {
1410         u32 resp = 0, param = 0, rc_param = 0;
1411         enum _ecore_status_t rc;
1412
1413         param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
1414             DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
1415         param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
1416             DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
1417
1418         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
1419                            &resp, &rc_param);
1420
1421         if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
1422                 DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n",
1423                           vf_id);
1424                 rc = ECORE_INVAL;
1425         }
1426
1427         return rc;
1428 }
1429
1430 enum _ecore_status_t
1431 ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1432                            struct ecore_mcp_drv_version *p_ver)
1433 {
1434         u32 param = 0, reply = 0, num_words, i;
1435         struct drv_version_stc *p_drv_version;
1436         struct ecore_mcp_mb_params mb_params;
1437         union drv_union_data union_data;
1438         void *p_name;
1439         OSAL_BE32 val;
1440         enum _ecore_status_t rc;
1441
1442 #ifndef ASIC_ONLY
1443         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
1444                 return ECORE_SUCCESS;
1445 #endif
1446
1447         p_drv_version = &union_data.drv_version;
1448         p_drv_version->version = p_ver->version;
1449         num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4;
1450         for (i = 0; i < num_words; i++) {
1451                 p_name = &p_ver->name[i * sizeof(u32)];
1452                 val = OSAL_CPU_TO_BE32(*(u32 *)p_name);
1453                 *(u32 *)&p_drv_version->name[i * sizeof(u32)] = val;
1454         }
1455
1456         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1457         mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
1458         mb_params.p_data_src = &union_data;
1459         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1460         if (rc != ECORE_SUCCESS)
1461                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1462
1463         return rc;
1464 }
1465
1466 enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
1467                                     struct ecore_ptt *p_ptt)
1468 {
1469         enum _ecore_status_t rc;
1470         u32 resp = 0, param = 0;
1471
1472         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
1473                            &param);
1474         if (rc != ECORE_SUCCESS)
1475                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1476
1477         return rc;
1478 }
1479
1480 enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
1481                                       struct ecore_ptt *p_ptt)
1482 {
1483         u32 value, cpu_mode;
1484
1485         ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
1486
1487         value = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
1488         value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
1489         ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
1490         cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
1491
1492         return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -1 : 0;
1493 }
1494
1495 enum _ecore_status_t
1496 ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
1497                                    struct ecore_ptt *p_ptt,
1498                                    enum ecore_ov_config_method config,
1499                                    enum ecore_ov_client client)
1500 {
1501         enum _ecore_status_t rc;
1502         u32 resp = 0, param = 0;
1503         u32 drv_mb_param;
1504
1505         switch (config) {
1506         case ECORE_OV_CLIENT_DRV:
1507                 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
1508                 break;
1509         case ECORE_OV_CLIENT_USER:
1510                 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
1511                 break;
1512         default:
1513                 DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", config);
1514                 return ECORE_INVAL;
1515         }
1516
1517         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
1518                            drv_mb_param, &resp, &param);
1519         if (rc != ECORE_SUCCESS)
1520                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1521
1522         return rc;
1523 }
1524
1525 enum _ecore_status_t
1526 ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
1527                                  struct ecore_ptt *p_ptt,
1528                                  enum ecore_ov_driver_state drv_state)
1529 {
1530         enum _ecore_status_t rc;
1531         u32 resp = 0, param = 0;
1532         u32 drv_mb_param;
1533
1534         switch (drv_state) {
1535         case ECORE_OV_DRIVER_STATE_NOT_LOADED:
1536                 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
1537                 break;
1538         case ECORE_OV_DRIVER_STATE_DISABLED:
1539                 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
1540                 break;
1541         case ECORE_OV_DRIVER_STATE_ACTIVE:
1542                 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
1543                 break;
1544         default:
1545                 DP_NOTICE(p_hwfn, true, "Invalid driver state %d\n", drv_state);
1546                 return ECORE_INVAL;
1547         }
1548
1549         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
1550                            drv_state, &resp, &param);
1551         if (rc != ECORE_SUCCESS)
1552                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1553
1554         return rc;
1555 }
1556
1557 enum _ecore_status_t
1558 ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1559                          struct ecore_fc_npiv_tbl *p_table)
1560 {
1561         return 0;
1562 }
1563
1564 enum _ecore_status_t
1565 ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn,
1566                         struct ecore_ptt *p_ptt, u16 mtu)
1567 {
1568         return 0;
1569 }
1570
1571 enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
1572                                        struct ecore_ptt *p_ptt,
1573                                        enum ecore_led_mode mode)
1574 {
1575         u32 resp = 0, param = 0, drv_mb_param;
1576         enum _ecore_status_t rc;
1577
1578         switch (mode) {
1579         case ECORE_LED_MODE_ON:
1580                 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
1581                 break;
1582         case ECORE_LED_MODE_OFF:
1583                 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
1584                 break;
1585         case ECORE_LED_MODE_RESTORE:
1586                 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
1587                 break;
1588         default:
1589                 DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode);
1590                 return ECORE_INVAL;
1591         }
1592
1593         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
1594                            drv_mb_param, &resp, &param);
1595         if (rc != ECORE_SUCCESS)
1596                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1597
1598         return rc;
1599 }
1600
1601 enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
1602                                              struct ecore_ptt *p_ptt,
1603                                              u32 mask_parities)
1604 {
1605         enum _ecore_status_t rc;
1606         u32 resp = 0, param = 0;
1607
1608         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
1609                            mask_parities, &resp, &param);
1610
1611         if (rc != ECORE_SUCCESS) {
1612                 DP_ERR(p_hwfn,
1613                        "MCP response failure for mask parities, aborting\n");
1614         } else if (resp != FW_MSG_CODE_OK) {
1615                 DP_ERR(p_hwfn,
1616                        "MCP did not ack mask parity request. Old MFW?\n");
1617                 rc = ECORE_INVAL;
1618         }
1619
1620         return rc;
1621 }
1622
1623 enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
1624                                         u8 *p_buf, u32 len)
1625 {
1626         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1627         u32 bytes_left, offset, bytes_to_copy, buf_size;
1628         struct ecore_mcp_nvm_params params;
1629         struct ecore_ptt *p_ptt;
1630         enum _ecore_status_t rc = ECORE_SUCCESS;
1631
1632         p_ptt = ecore_ptt_acquire(p_hwfn);
1633         if (!p_ptt)
1634                 return ECORE_BUSY;
1635
1636         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
1637         bytes_left = len;
1638         offset = 0;
1639         params.type = ECORE_MCP_NVM_RD;
1640         params.nvm_rd.buf_size = &buf_size;
1641         params.nvm_common.cmd = DRV_MSG_CODE_NVM_READ_NVRAM;
1642         while (bytes_left > 0) {
1643                 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
1644                                            MCP_DRV_NVM_BUF_LEN);
1645                 params.nvm_common.offset = (addr + offset) |
1646                     (bytes_to_copy << DRV_MB_PARAM_NVM_LEN_SHIFT);
1647                 params.nvm_rd.buf = (u32 *)(p_buf + offset);
1648                 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
1649                 if (rc != ECORE_SUCCESS || (params.nvm_common.resp !=
1650                                             FW_MSG_CODE_NVM_OK)) {
1651                         DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
1652                         break;
1653                 }
1654                 offset += *params.nvm_rd.buf_size;
1655                 bytes_left -= *params.nvm_rd.buf_size;
1656         }
1657
1658         p_dev->mcp_nvm_resp = params.nvm_common.resp;
1659         ecore_ptt_release(p_hwfn, p_ptt);
1660
1661         return rc;
1662 }
1663
1664 enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
1665                                         u32 addr, u8 *p_buf, u32 len)
1666 {
1667         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1668         struct ecore_mcp_nvm_params params;
1669         struct ecore_ptt *p_ptt;
1670         enum _ecore_status_t rc;
1671
1672         p_ptt = ecore_ptt_acquire(p_hwfn);
1673         if (!p_ptt)
1674                 return ECORE_BUSY;
1675
1676         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
1677         params.type = ECORE_MCP_NVM_RD;
1678         params.nvm_rd.buf_size = &len;
1679         params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_READ) ?
1680             DRV_MSG_CODE_PHY_CORE_READ : DRV_MSG_CODE_PHY_RAW_READ;
1681         params.nvm_common.offset = addr;
1682         params.nvm_rd.buf = (u32 *)p_buf;
1683         rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
1684         if (rc != ECORE_SUCCESS)
1685                 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
1686
1687         p_dev->mcp_nvm_resp = params.nvm_common.resp;
1688         ecore_ptt_release(p_hwfn, p_ptt);
1689
1690         return rc;
1691 }
1692
1693 enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
1694 {
1695         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1696         struct ecore_mcp_nvm_params params;
1697         struct ecore_ptt *p_ptt;
1698
1699         p_ptt = ecore_ptt_acquire(p_hwfn);
1700         if (!p_ptt)
1701                 return ECORE_BUSY;
1702
1703         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
1704         OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp));
1705         ecore_ptt_release(p_hwfn, p_ptt);
1706
1707         return ECORE_SUCCESS;
1708 }
1709
1710 enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr)
1711 {
1712         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1713         struct ecore_mcp_nvm_params params;
1714         struct ecore_ptt *p_ptt;
1715         enum _ecore_status_t rc;
1716
1717         p_ptt = ecore_ptt_acquire(p_hwfn);
1718         if (!p_ptt)
1719                 return ECORE_BUSY;
1720         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
1721         params.type = ECORE_MCP_CMD;
1722         params.nvm_common.cmd = DRV_MSG_CODE_NVM_DEL_FILE;
1723         params.nvm_common.offset = addr;
1724         rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
1725         p_dev->mcp_nvm_resp = params.nvm_common.resp;
1726         ecore_ptt_release(p_hwfn, p_ptt);
1727
1728         return rc;
1729 }
1730
1731 enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
1732                                                   u32 addr)
1733 {
1734         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1735         struct ecore_mcp_nvm_params params;
1736         struct ecore_ptt *p_ptt;
1737         enum _ecore_status_t rc;
1738
1739         p_ptt = ecore_ptt_acquire(p_hwfn);
1740         if (!p_ptt)
1741                 return ECORE_BUSY;
1742         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
1743         params.type = ECORE_MCP_CMD;
1744         params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN;
1745         params.nvm_common.offset = addr;
1746         rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
1747         p_dev->mcp_nvm_resp = params.nvm_common.resp;
1748         ecore_ptt_release(p_hwfn, p_ptt);
1749
1750         return rc;
1751 }
1752
1753 /* rc receives ECORE_INVAL as default parameter because
1754  * it might not enter the while loop if the len is 0
1755  */
1756 enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
1757                                          u32 addr, u8 *p_buf, u32 len)
1758 {
1759         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1760         enum _ecore_status_t rc = ECORE_INVAL;
1761         struct ecore_mcp_nvm_params params;
1762         struct ecore_ptt *p_ptt;
1763         u32 buf_idx, buf_size;
1764
1765         p_ptt = ecore_ptt_acquire(p_hwfn);
1766         if (!p_ptt)
1767                 return ECORE_BUSY;
1768
1769         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
1770         params.type = ECORE_MCP_NVM_WR;
1771         if (cmd == ECORE_PUT_FILE_DATA)
1772                 params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
1773         else
1774                 params.nvm_common.cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
1775         buf_idx = 0;
1776         while (buf_idx < len) {
1777                 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
1778                                       MCP_DRV_NVM_BUF_LEN);
1779                 params.nvm_common.offset = ((buf_size <<
1780                                              DRV_MB_PARAM_NVM_LEN_SHIFT)
1781                                             | addr) + buf_idx;
1782                 params.nvm_wr.buf_size = buf_size;
1783                 params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
1784                 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
1785                 if (rc != ECORE_SUCCESS ||
1786                     ((params.nvm_common.resp != FW_MSG_CODE_NVM_OK) &&
1787                      (params.nvm_common.resp !=
1788                       FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK)))
1789                         DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
1790
1791                 buf_idx += buf_size;
1792         }
1793
1794         p_dev->mcp_nvm_resp = params.nvm_common.resp;
1795         ecore_ptt_release(p_hwfn, p_ptt);
1796
1797         return rc;
1798 }
1799
1800 enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
1801                                          u32 addr, u8 *p_buf, u32 len)
1802 {
1803         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1804         struct ecore_mcp_nvm_params params;
1805         struct ecore_ptt *p_ptt;
1806         enum _ecore_status_t rc;
1807
1808         p_ptt = ecore_ptt_acquire(p_hwfn);
1809         if (!p_ptt)
1810                 return ECORE_BUSY;
1811
1812         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
1813         params.type = ECORE_MCP_NVM_WR;
1814         params.nvm_wr.buf_size = len;
1815         params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_WRITE) ?
1816             DRV_MSG_CODE_PHY_CORE_WRITE : DRV_MSG_CODE_PHY_RAW_WRITE;
1817         params.nvm_common.offset = addr;
1818         params.nvm_wr.buf = (u32 *)p_buf;
1819         rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
1820         if (rc != ECORE_SUCCESS)
1821                 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
1822         p_dev->mcp_nvm_resp = params.nvm_common.resp;
1823         ecore_ptt_release(p_hwfn, p_ptt);
1824
1825         return rc;
1826 }
1827
1828 enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
1829                                                    u32 addr)
1830 {
1831         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1832         struct ecore_mcp_nvm_params params;
1833         struct ecore_ptt *p_ptt;
1834         enum _ecore_status_t rc;
1835
1836         p_ptt = ecore_ptt_acquire(p_hwfn);
1837         if (!p_ptt)
1838                 return ECORE_BUSY;
1839
1840         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
1841         params.type = ECORE_MCP_CMD;
1842         params.nvm_common.cmd = DRV_MSG_CODE_SET_SECURE_MODE;
1843         params.nvm_common.offset = addr;
1844         rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
1845         p_dev->mcp_nvm_resp = params.nvm_common.resp;
1846         ecore_ptt_release(p_hwfn, p_ptt);
1847
1848         return rc;
1849 }
1850
1851 enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
1852                                             struct ecore_ptt *p_ptt,
1853                                             u32 port, u32 addr, u32 offset,
1854                                             u32 len, u8 *p_buf)
1855 {
1856         struct ecore_mcp_nvm_params params;
1857         enum _ecore_status_t rc;
1858         u32 bytes_left, bytes_to_copy, buf_size;
1859
1860         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
1861         SET_FIELD(params.nvm_common.offset,
1862                   DRV_MB_PARAM_TRANSCEIVER_PORT, port);
1863         SET_FIELD(params.nvm_common.offset,
1864                   DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS, addr);
1865         addr = offset;
1866         offset = 0;
1867         bytes_left = len;
1868         params.type = ECORE_MCP_NVM_RD;
1869         params.nvm_rd.buf_size = &buf_size;
1870         params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_READ;
1871         while (bytes_left > 0) {
1872                 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
1873                                            MAX_I2C_TRANSACTION_SIZE);
1874                 params.nvm_rd.buf = (u32 *)(p_buf + offset);
1875                 SET_FIELD(params.nvm_common.offset,
1876                           DRV_MB_PARAM_TRANSCEIVER_OFFSET, addr + offset);
1877                 SET_FIELD(params.nvm_common.offset,
1878                           DRV_MB_PARAM_TRANSCEIVER_SIZE, bytes_to_copy);
1879                 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
1880                 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
1881                     FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
1882                         return ECORE_NODEV;
1883                 } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
1884                            FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
1885                         return ECORE_UNKNOWN_ERROR;
1886
1887                 offset += *params.nvm_rd.buf_size;
1888                 bytes_left -= *params.nvm_rd.buf_size;
1889         }
1890
1891         return ECORE_SUCCESS;
1892 }
1893
1894 enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
1895                                              struct ecore_ptt *p_ptt,
1896                                              u32 port, u32 addr, u32 offset,
1897                                              u32 len, u8 *p_buf)
1898 {
1899         struct ecore_mcp_nvm_params params;
1900         enum _ecore_status_t rc;
1901         u32 buf_idx, buf_size;
1902
1903         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
1904         SET_FIELD(params.nvm_common.offset,
1905                   DRV_MB_PARAM_TRANSCEIVER_PORT, port);
1906         SET_FIELD(params.nvm_common.offset,
1907                   DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS, addr);
1908         params.type = ECORE_MCP_NVM_WR;
1909         params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_WRITE;
1910         buf_idx = 0;
1911         while (buf_idx < len) {
1912                 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
1913                                       MAX_I2C_TRANSACTION_SIZE);
1914                 SET_FIELD(params.nvm_common.offset,
1915                           DRV_MB_PARAM_TRANSCEIVER_OFFSET, offset + buf_idx);
1916                 SET_FIELD(params.nvm_common.offset,
1917                           DRV_MB_PARAM_TRANSCEIVER_SIZE, buf_size);
1918                 params.nvm_wr.buf_size = buf_size;
1919                 params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
1920                 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
1921                 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
1922                     FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
1923                         return ECORE_NODEV;
1924                 } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
1925                            FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
1926                         return ECORE_UNKNOWN_ERROR;
1927
1928                 buf_idx += buf_size;
1929         }
1930
1931         return ECORE_SUCCESS;
1932 }
1933
1934 enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
1935                                          struct ecore_ptt *p_ptt,
1936                                          u16 gpio, u32 *gpio_val)
1937 {
1938         enum _ecore_status_t rc = ECORE_SUCCESS;
1939         u32 drv_mb_param = 0, rsp;
1940
1941         SET_FIELD(drv_mb_param, DRV_MB_PARAM_GPIO_NUMBER, gpio);
1942
1943         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ,
1944                            drv_mb_param, &rsp, gpio_val);
1945
1946         if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
1947                 return ECORE_UNKNOWN_ERROR;
1948
1949         return ECORE_SUCCESS;
1950 }
1951
1952 enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
1953                                           struct ecore_ptt *p_ptt,
1954                                           u16 gpio, u16 gpio_val)
1955 {
1956         enum _ecore_status_t rc = ECORE_SUCCESS;
1957         u32 drv_mb_param = 0, param, rsp;
1958
1959         SET_FIELD(drv_mb_param, DRV_MB_PARAM_GPIO_NUMBER, gpio);
1960         SET_FIELD(drv_mb_param, DRV_MB_PARAM_GPIO_VALUE, gpio_val);
1961
1962         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE,
1963                            drv_mb_param, &rsp, &param);
1964
1965         if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
1966                 return ECORE_UNKNOWN_ERROR;
1967
1968         return ECORE_SUCCESS;
1969 }