net/qede/base: update formatting and comments
[dpdk.git] / drivers / net / qede / base / ecore_mcp.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "bcm_osal.h"
10 #include "ecore.h"
11 #include "ecore_status.h"
12 #include "ecore_mcp.h"
13 #include "mcp_public.h"
14 #include "reg_addr.h"
15 #include "ecore_hw.h"
16 #include "ecore_init_fw_funcs.h"
17 #include "ecore_sriov.h"
18 #include "ecore_iov_api.h"
19 #include "ecore_gtt_reg_addr.h"
20 #include "ecore_iro.h"
21 #include "ecore_dcbx.h"
22
23 #define CHIP_MCP_RESP_ITER_US 10
24 #define EMUL_MCP_RESP_ITER_US (1000 * 1000)
25
26 #define ECORE_DRV_MB_MAX_RETRIES (500 * 1000)   /* Account for 5 sec */
27 #define ECORE_MCP_RESET_RETRIES (50 * 1000)     /* Account for 500 msec */
28
29 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
30         ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
31                  _val)
32
33 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
34         ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
35
36 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
37         DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
38                      OFFSETOF(struct public_drv_mb, _field), _val)
39
40 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
41         DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
42                      OFFSETOF(struct public_drv_mb, _field))
43
44 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
45         DRV_ID_PDA_COMP_VER_SHIFT)
46
47 #define MCP_BYTES_PER_MBIT_SHIFT 17
48
49 #ifndef ASIC_ONLY
50 static int loaded;
51 static int loaded_port[MAX_NUM_PORTS] = { 0 };
52 #endif
53
54 bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn)
55 {
56         if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
57                 return false;
58         return true;
59 }
60
61 void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
62 {
63         u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
64                                         PUBLIC_PORT);
65         u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr);
66
67         p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
68                                                    MFW_PORT(p_hwfn));
69         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
70                    "port_addr = 0x%x, port_id 0x%02x\n",
71                    p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
72 }
73
74 void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
75 {
76         u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
77         OSAL_BE32 tmp;
78         u32 i;
79
80 #ifndef ASIC_ONLY
81         if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev))
82                 return;
83 #endif
84
85         if (!p_hwfn->mcp_info->public_base)
86                 return;
87
88         for (i = 0; i < length; i++) {
89                 tmp = ecore_rd(p_hwfn, p_ptt,
90                                p_hwfn->mcp_info->mfw_mb_addr +
91                                (i << 2) + sizeof(u32));
92
93                 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
94                     OSAL_BE32_TO_CPU(tmp);
95         }
96 }
97
98 enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
99 {
100         if (p_hwfn->mcp_info) {
101                 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
102                 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
103                 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->lock);
104         }
105         OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
106         p_hwfn->mcp_info = OSAL_NULL;
107
108         return ECORE_SUCCESS;
109 }
110
111 static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
112                                                    struct ecore_ptt *p_ptt)
113 {
114         struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
115         u32 drv_mb_offsize, mfw_mb_offsize;
116         u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
117
118 #ifndef ASIC_ONLY
119         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
120                 DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n");
121                 p_info->public_base = 0;
122                 return ECORE_INVAL;
123         }
124 #endif
125
126         p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
127         if (!p_info->public_base)
128                 return ECORE_INVAL;
129
130         p_info->public_base |= GRCBASE_MCP;
131
132         /* Calculate the driver and MFW mailbox address */
133         drv_mb_offsize = ecore_rd(p_hwfn, p_ptt,
134                                   SECTION_OFFSIZE_ADDR(p_info->public_base,
135                                                        PUBLIC_DRV_MB));
136         p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
137         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
138                    "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x"
139                    " mcp_pf_id = 0x%x\n",
140                    drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
141
142         /* Set the MFW MB address */
143         mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
144                                   SECTION_OFFSIZE_ADDR(p_info->public_base,
145                                                        PUBLIC_MFW_MB));
146         p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
147         p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
148                                                p_info->mfw_mb_addr);
149
150         /* Get the current driver mailbox sequence before sending
151          * the first command
152          */
153         p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
154             DRV_MSG_SEQ_NUMBER_MASK;
155
156         /* Get current FW pulse sequence */
157         p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
158             DRV_PULSE_SEQ_MASK;
159
160         p_info->mcp_hist = (u16)ecore_rd(p_hwfn, p_ptt,
161                                           MISCS_REG_GENERIC_POR_0);
162
163         return ECORE_SUCCESS;
164 }
165
166 enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
167                                         struct ecore_ptt *p_ptt)
168 {
169         struct ecore_mcp_info *p_info;
170         u32 size;
171
172         /* Allocate mcp_info structure */
173         p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
174                                        sizeof(*p_hwfn->mcp_info));
175         if (!p_hwfn->mcp_info)
176                 goto err;
177         p_info = p_hwfn->mcp_info;
178
179         if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
180                 DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
181                 /* Do not free mcp_info here, since public_base indicate that
182                  * the MCP is not initialized
183                  */
184                 return ECORE_SUCCESS;
185         }
186
187         size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
188         p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
189         p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
190         if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
191                 goto err;
192
193         /* Initialize the MFW spinlock */
194         OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->lock);
195         OSAL_SPIN_LOCK_INIT(&p_info->lock);
196
197         return ECORE_SUCCESS;
198
199 err:
200         DP_NOTICE(p_hwfn, true, "Failed to allocate mcp memory\n");
201         ecore_mcp_free(p_hwfn);
202         return ECORE_NOMEM;
203 }
204
205 enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
206                                      struct ecore_ptt *p_ptt)
207 {
208         u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
209         u32 delay = CHIP_MCP_RESP_ITER_US;
210         u32 org_mcp_reset_seq, cnt = 0;
211         enum _ecore_status_t rc = ECORE_SUCCESS;
212
213 #ifndef ASIC_ONLY
214         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
215                 delay = EMUL_MCP_RESP_ITER_US;
216 #endif
217
218         OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
219
220         /* Set drv command along with the updated sequence */
221         org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
222         DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
223
224         do {
225                 /* Wait for MFW response */
226                 OSAL_UDELAY(delay);
227                 /* Give the FW up to 500 second (50*1000*10usec) */
228         } while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt,
229                                                 MISCS_REG_GENERIC_POR_0)) &&
230                  (cnt++ < ECORE_MCP_RESET_RETRIES));
231
232         if (org_mcp_reset_seq !=
233             ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
234                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
235                            "MCP was reset after %d usec\n", cnt * delay);
236         } else {
237                 DP_ERR(p_hwfn, "Failed to reset MCP\n");
238                 rc = ECORE_AGAIN;
239         }
240
241         OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
242
243         return rc;
244 }
245
246 /* Should be called while the dedicated spinlock is acquired */
247 static enum _ecore_status_t ecore_do_mcp_cmd(struct ecore_hwfn *p_hwfn,
248                                              struct ecore_ptt *p_ptt,
249                                              u32 cmd, u32 param,
250                                              u32 *o_mcp_resp,
251                                              u32 *o_mcp_param)
252 {
253         u32 delay = CHIP_MCP_RESP_ITER_US;
254         u32 seq, cnt = 1, actual_mb_seq;
255         enum _ecore_status_t rc = ECORE_SUCCESS;
256
257 #ifndef ASIC_ONLY
258         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
259                 delay = EMUL_MCP_RESP_ITER_US;
260 #endif
261
262         /* Get actual driver mailbox sequence */
263         actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
264             DRV_MSG_SEQ_NUMBER_MASK;
265
266         /* Use MCP history register to check if MCP reset occurred between
267          * init time and now.
268          */
269         if (p_hwfn->mcp_info->mcp_hist !=
270             ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
271                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Rereading MCP offsets\n");
272                 ecore_load_mcp_offsets(p_hwfn, p_ptt);
273                 ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
274         }
275         seq = ++p_hwfn->mcp_info->drv_mb_seq;
276
277         /* Set drv param */
278         DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
279
280         /* Set drv command along with the updated sequence */
281         DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
282
283         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
284                    "wrote command (%x) to MFW MB param 0x%08x\n",
285                    (cmd | seq), param);
286
287         do {
288                 /* Wait for MFW response */
289                 OSAL_UDELAY(delay);
290                 *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
291
292                 /* Give the FW up to 5 second (500*10ms) */
293         } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
294                  (cnt++ < ECORE_DRV_MB_MAX_RETRIES));
295
296         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
297                    "[after %d ms] read (%x) seq is (%x) from FW MB\n",
298                    cnt * delay, *o_mcp_resp, seq);
299
300         /* Is this a reply to our command? */
301         if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
302                 *o_mcp_resp &= FW_MSG_CODE_MASK;
303                 /* Get the MCP param */
304                 *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
305         } else {
306                 /* FW BUG! */
307                 DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
308                        cmd, param);
309                 *o_mcp_resp = 0;
310                 rc = ECORE_AGAIN;
311                 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
312         }
313         return rc;
314 }
315
316 enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
317                                    struct ecore_ptt *p_ptt, u32 cmd, u32 param,
318                                    u32 *o_mcp_resp, u32 *o_mcp_param)
319 {
320 #ifndef ASIC_ONLY
321         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
322                 if (cmd == DRV_MSG_CODE_UNLOAD_REQ) {
323                         loaded--;
324                         loaded_port[p_hwfn->port_id]--;
325                         DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n",
326                                    loaded);
327                 }
328                 return ECORE_SUCCESS;
329         }
330 #endif
331
332         return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, cmd, param, OSAL_NULL,
333                                        o_mcp_resp, o_mcp_param);
334 }
335
336 enum _ecore_status_t ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
337                         struct ecore_ptt *p_ptt,
338                                              u32 cmd, u32 param,
339                                              union drv_union_data *p_union_data,
340                                              u32 *o_mcp_resp,
341                                              u32 *o_mcp_param)
342 {
343         u32 union_data_addr;
344         enum _ecore_status_t rc;
345
346         /* MCP not initialized */
347         if (!ecore_mcp_is_init(p_hwfn)) {
348                 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
349                 return ECORE_BUSY;
350         }
351
352         /* Acquiring a spinlock is needed to ensure that only a single thread
353          * is accessing the mailbox at a certain time.
354          */
355         OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
356
357         if (p_union_data != OSAL_NULL) {
358         union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
359                           OFFSETOF(struct public_drv_mb, union_data);
360                 ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, p_union_data,
361                                 sizeof(*p_union_data));
362 }
363
364         rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, cmd, param, o_mcp_resp,
365                               o_mcp_param);
366
367         OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
368
369                 return rc;
370 }
371
372 enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
373                                           struct ecore_ptt *p_ptt,
374                                           u32 cmd,
375                                           u32 param,
376                                           u32 *o_mcp_resp,
377                                           u32 *o_mcp_param,
378                                           u32 i_txn_size, u32 *i_buf)
379 {
380         union drv_union_data union_data;
381
382         OSAL_MEMCPY((u32 *)&union_data.raw_data, i_buf, i_txn_size);
383
384         return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, cmd, param, &union_data,
385                                        o_mcp_resp, o_mcp_param);
386 }
387
388 enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
389                                           struct ecore_ptt *p_ptt,
390                                           u32 cmd,
391                                           u32 param,
392                                           u32 *o_mcp_resp,
393                                           u32 *o_mcp_param,
394                                           u32 *o_txn_size, u32 *o_buf)
395 {
396         enum _ecore_status_t rc;
397         u32 i;
398
399         /* MCP not initialized */
400         if (!ecore_mcp_is_init(p_hwfn)) {
401                 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
402                 return ECORE_BUSY;
403         }
404
405         OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
406         rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, cmd, param, o_mcp_resp,
407                               o_mcp_param);
408         if (rc != ECORE_SUCCESS)
409                 goto out;
410
411         /* Get payload after operation completes successfully */
412         *o_txn_size = *o_mcp_param;
413         for (i = 0; i < *o_txn_size; i += 4)
414                 o_buf[i / sizeof(u32)] = DRV_MB_RD(p_hwfn, p_ptt,
415                                                    union_data.raw_data[i]);
416
417 out:
418         OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
419         return rc;
420 }
421
422 #ifndef ASIC_ONLY
423 static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
424                                     u32 *p_load_code)
425 {
426         static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
427
428         if (!loaded)
429                 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
430         else if (!loaded_port[p_hwfn->port_id])
431                 load_phase = FW_MSG_CODE_DRV_LOAD_PORT;
432         else
433                 load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
434
435         /* On CMT, always tell that it's engine */
436         if (p_hwfn->p_dev->num_hwfns > 1)
437                 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
438
439         *p_load_code = load_phase;
440         loaded++;
441         loaded_port[p_hwfn->port_id]++;
442
443         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
444                    "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n",
445                    *p_load_code, loaded, p_hwfn->port_id,
446                    loaded_port[p_hwfn->port_id]);
447 }
448 #endif
449
450 enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
451                                         struct ecore_ptt *p_ptt,
452                                         u32 *p_load_code)
453 {
454         struct ecore_dev *p_dev = p_hwfn->p_dev;
455         union drv_union_data union_data;
456         u32 param;
457         enum _ecore_status_t rc;
458
459 #ifndef ASIC_ONLY
460         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
461                 ecore_mcp_mf_workaround(p_hwfn, p_load_code);
462                 return ECORE_SUCCESS;
463         }
464 #endif
465
466         OSAL_MEMCPY(&union_data.ver_str, p_dev->ver_str, MCP_DRV_VER_STR_SIZE);
467
468         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_REQ,
469                                      (PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
470                                       p_dev->drv_type),
471                                      &union_data, p_load_code, &param);
472
473         /* if mcp fails to respond we must abort */
474         if (rc != ECORE_SUCCESS) {
475                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
476                 return rc;
477         }
478
479         /* If MFW refused (e.g. other port is in diagnostic mode) we
480          * must abort. This can happen in the following cases:
481          * - Other port is in diagnostic mode
482          * - Previously loaded function on the engine is not compliant with
483          *   the requester.
484          * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION.
485          *      -
486          */
487         if (!(*p_load_code) ||
488             ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) ||
489             ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) ||
490             ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) {
491                 DP_ERR(p_hwfn, "MCP refused load request, aborting\n");
492                 return ECORE_BUSY;
493         }
494
495         return ECORE_SUCCESS;
496 }
497
498 static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
499                                     struct ecore_ptt *p_ptt)
500 {
501         u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
502                                         PUBLIC_PATH);
503         u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
504         u32 path_addr = SECTION_ADDR(mfw_path_offsize,
505                                      ECORE_PATH_ID(p_hwfn));
506         u32 disabled_vfs[VF_MAX_STATIC / 32];
507         int i;
508
509         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
510                    "Reading Disabled VF information from [offset %08x],"
511                    " path_addr %08x\n",
512                    mfw_path_offsize, path_addr);
513
514         for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
515                 disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,
516                                            path_addr +
517                                            OFFSETOF(struct public_path,
518                                                     mcp_vf_disabled) +
519                                            sizeof(u32) * i);
520                 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
521                            "FLR-ed VFs [%08x,...,%08x] - %08x\n",
522                            i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
523         }
524
525         if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs))
526                 OSAL_VF_FLR_UPDATE(p_hwfn);
527 }
528
529 enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
530                                           struct ecore_ptt *p_ptt,
531                                           u32 *vfs_to_ack)
532 {
533         u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
534                                         PUBLIC_FUNC);
535         u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);
536         u32 func_addr = SECTION_ADDR(mfw_func_offsize,
537                                      MCP_PF_ID(p_hwfn));
538         union drv_union_data union_data;
539         u32 resp, param;
540         enum _ecore_status_t rc;
541         int i;
542
543         for (i = 0; i < (VF_MAX_STATIC / 32); i++)
544                 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
545                            "Acking VFs [%08x,...,%08x] - %08x\n",
546                            i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
547
548         OSAL_MEMCPY(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8);
549
550         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt,
551                                      DRV_MSG_CODE_VF_DISABLED_DONE, 0,
552                                      &union_data, &resp, &param);
553         if (rc != ECORE_SUCCESS) {
554                 DP_NOTICE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
555                           "Failed to pass ACK for VF flr to MFW\n");
556                 return ECORE_TIMEOUT;
557         }
558
559         /* TMP - clear the ACK bits; should be done by MFW */
560         for (i = 0; i < (VF_MAX_STATIC / 32); i++)
561                 ecore_wr(p_hwfn, p_ptt,
562                          func_addr +
563                          OFFSETOF(struct public_func, drv_ack_vf_disabled) +
564                          i * sizeof(u32), 0);
565
566         return rc;
567 }
568
569 static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
570                                                 struct ecore_ptt *p_ptt)
571 {
572         u32 transceiver_state;
573
574         transceiver_state = ecore_rd(p_hwfn, p_ptt,
575                                      p_hwfn->mcp_info->port_addr +
576                                      OFFSETOF(struct public_port,
577                                               transceiver_data));
578
579         DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP),
580                    "Received transceiver state update [0x%08x] from mfw"
581                    " [Addr 0x%x]\n",
582                    transceiver_state, (u32)(p_hwfn->mcp_info->port_addr +
583                                             OFFSETOF(struct public_port,
584                                                      transceiver_data)));
585
586         transceiver_state = GET_FIELD(transceiver_state, PMM_TRANSCEIVER_STATE);
587
588         if (transceiver_state == PMM_TRANSCEIVER_STATE_PRESENT)
589                 DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
590         else
591                 DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
592 }
593
594 static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
595                                          struct ecore_ptt *p_ptt, bool b_reset)
596 {
597         struct ecore_mcp_link_state *p_link;
598         u32 status = 0;
599
600         p_link = &p_hwfn->mcp_info->link_output;
601         OSAL_MEMSET(p_link, 0, sizeof(*p_link));
602         if (!b_reset) {
603                 status = ecore_rd(p_hwfn, p_ptt,
604                                   p_hwfn->mcp_info->port_addr +
605                                   OFFSETOF(struct public_port, link_status));
606                 DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP),
607                            "Received link update [0x%08x] from mfw"
608                            " [Addr 0x%x]\n",
609                            status, (u32)(p_hwfn->mcp_info->port_addr +
610                                           OFFSETOF(struct public_port,
611                                                    link_status)));
612         } else {
613                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
614                            "Resetting link indications\n");
615                 return;
616         }
617
618         if (p_hwfn->b_drv_link_init)
619                 p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
620         else
621                 p_link->link_up = false;
622
623         p_link->full_duplex = true;
624         switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
625         case LINK_STATUS_SPEED_AND_DUPLEX_100G:
626                 p_link->speed = 100000;
627                 break;
628         case LINK_STATUS_SPEED_AND_DUPLEX_50G:
629                 p_link->speed = 50000;
630                 break;
631         case LINK_STATUS_SPEED_AND_DUPLEX_40G:
632                 p_link->speed = 40000;
633                 break;
634         case LINK_STATUS_SPEED_AND_DUPLEX_25G:
635                 p_link->speed = 25000;
636                 break;
637         case LINK_STATUS_SPEED_AND_DUPLEX_20G:
638                 p_link->speed = 20000;
639                 break;
640         case LINK_STATUS_SPEED_AND_DUPLEX_10G:
641                 p_link->speed = 10000;
642                 break;
643         case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
644                 p_link->full_duplex = false;
645                 /* Fall-through */
646         case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
647                 p_link->speed = 1000;
648                 break;
649         default:
650                 p_link->speed = 0;
651         }
652
653         /* We never store total line speed as p_link->speed is
654          * again changes according to bandwidth allocation.
655          */
656         if (p_link->link_up && p_link->speed)
657                 p_link->line_speed = p_link->speed;
658         else
659                 p_link->line_speed = 0;
660
661         /* Correct speed according to bandwidth allocation */
662         if (p_hwfn->mcp_info->func_info.bandwidth_max && p_link->speed) {
663                 u8 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
664
665         __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
666                                            p_link, max_bw);
667         }
668
669         if (p_hwfn->mcp_info->func_info.bandwidth_min && p_link->speed) {
670                 u8 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
671
672         __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
673                                            p_link, min_bw);
674
675         ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev,
676                                               p_link->min_pf_rate);
677         }
678
679         p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
680         p_link->an_complete = !!(status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
681         p_link->parallel_detection = !!(status &
682                                          LINK_STATUS_PARALLEL_DETECTION_USED);
683         p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
684
685         p_link->partner_adv_speed |=
686             (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
687             ECORE_LINK_PARTNER_SPEED_1G_FD : 0;
688         p_link->partner_adv_speed |=
689             (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
690             ECORE_LINK_PARTNER_SPEED_1G_HD : 0;
691         p_link->partner_adv_speed |=
692             (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
693             ECORE_LINK_PARTNER_SPEED_10G : 0;
694         p_link->partner_adv_speed |=
695             (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
696             ECORE_LINK_PARTNER_SPEED_20G : 0;
697         p_link->partner_adv_speed |=
698             (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
699             ECORE_LINK_PARTNER_SPEED_25G : 0;
700         p_link->partner_adv_speed |=
701             (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
702             ECORE_LINK_PARTNER_SPEED_40G : 0;
703         p_link->partner_adv_speed |=
704             (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
705             ECORE_LINK_PARTNER_SPEED_50G : 0;
706         p_link->partner_adv_speed |=
707             (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
708             ECORE_LINK_PARTNER_SPEED_100G : 0;
709
710         p_link->partner_tx_flow_ctrl_en =
711             !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
712         p_link->partner_rx_flow_ctrl_en =
713             !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
714
715         switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
716         case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
717                 p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE;
718                 break;
719         case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
720                 p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE;
721                 break;
722         case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
723                 p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE;
724                 break;
725         default:
726                 p_link->partner_adv_pause = 0;
727         }
728
729         p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
730
731         if (p_link->link_up)
732                 ecore_dcbx_eagle_workaround(p_hwfn, p_ptt, p_link->pfc_enabled);
733
734         OSAL_LINK_UPDATE(p_hwfn);
735 }
736
737 enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
738                                         struct ecore_ptt *p_ptt, bool b_up)
739 {
740         struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
741         union drv_union_data union_data;
742         struct pmm_phy_cfg *p_phy_cfg;
743         u32 param = 0, reply = 0, cmd;
744         enum _ecore_status_t rc = ECORE_SUCCESS;
745
746 #ifndef ASIC_ONLY
747         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
748                 return ECORE_SUCCESS;
749 #endif
750
751         /* Set the shmem configuration according to params */
752         p_phy_cfg = &union_data.drv_phy_cfg;
753         OSAL_MEMSET(p_phy_cfg, 0, sizeof(*p_phy_cfg));
754         cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
755         if (!params->speed.autoneg)
756                 p_phy_cfg->speed = params->speed.forced_speed;
757         p_phy_cfg->pause |= (params->pause.autoneg) ? PMM_PAUSE_AUTONEG : 0;
758         p_phy_cfg->pause |= (params->pause.forced_rx) ? PMM_PAUSE_RX : 0;
759         p_phy_cfg->pause |= (params->pause.forced_tx) ? PMM_PAUSE_TX : 0;
760         p_phy_cfg->adv_speed = params->speed.advertised_speeds;
761         p_phy_cfg->loopback_mode = params->loopback_mode;
762
763 #ifndef ASIC_ONLY
764         if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
765                 DP_INFO(p_hwfn,
766                         "Link on FPGA - Ask for loopback mode '5' at 10G\n");
767                 p_phy_cfg->loopback_mode = 5;
768                 p_phy_cfg->speed = 10000;
769         }
770 #endif
771
772         p_hwfn->b_drv_link_init = b_up;
773
774         if (b_up)
775                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
776                            "Configuring Link: Speed 0x%08x, Pause 0x%08x,"
777                            " adv_speed 0x%08x, loopback 0x%08x,"
778                            " features 0x%08x\n",
779                            p_phy_cfg->speed, p_phy_cfg->pause,
780                            p_phy_cfg->adv_speed, p_phy_cfg->loopback_mode,
781                            p_phy_cfg->feature_config_flags);
782         else
783                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
784
785         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, cmd, 0, &union_data, &reply,
786                                      &param);
787
788         /* if mcp fails to respond we must abort */
789         if (rc != ECORE_SUCCESS) {
790                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
791                 return rc;
792         }
793
794         /* Reset the link status if needed */
795         if (!b_up)
796                 ecore_mcp_handle_link_change(p_hwfn, p_ptt, true);
797
798         return rc;
799 }
800
801 u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
802                                    struct ecore_ptt *p_ptt)
803 {
804         u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
805
806         /* TODO - Add support for VFs */
807         if (IS_VF(p_hwfn->p_dev))
808                 return ECORE_INVAL;
809
810         path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
811                                                  PUBLIC_PATH);
812         path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr);
813         path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn));
814
815         proc_kill_cnt = ecore_rd(p_hwfn, p_ptt,
816                                  path_addr +
817                                  OFFSETOF(struct public_path, process_kill)) &
818             PROCESS_KILL_COUNTER_MASK;
819
820         return proc_kill_cnt;
821 }
822
823 static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn,
824                                           struct ecore_ptt *p_ptt)
825 {
826         struct ecore_dev *p_dev = p_hwfn->p_dev;
827         u32 proc_kill_cnt;
828
829         /* Prevent possible attentions/interrupts during the recovery handling
830          * and till its load phase, during which they will be re-enabled.
831          */
832         ecore_int_igu_disable_int(p_hwfn, p_ptt);
833
834         DP_NOTICE(p_hwfn, false, "Received a process kill indication\n");
835
836         /* The following operations should be done once, and thus in CMT mode
837          * are carried out by only the first HW function.
838          */
839         if (p_hwfn != ECORE_LEADING_HWFN(p_dev))
840                 return;
841
842         if (p_dev->recov_in_prog) {
843                 DP_NOTICE(p_hwfn, false,
844                           "Ignoring the indication since a recovery"
845                           " process is already in progress\n");
846                 return;
847         }
848
849         p_dev->recov_in_prog = true;
850
851         proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt);
852         DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt);
853
854         OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn);
855 }
856
857 static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
858                                           struct ecore_ptt *p_ptt,
859                                           enum MFW_DRV_MSG_TYPE type)
860 {
861         enum ecore_mcp_protocol_type stats_type;
862         union ecore_mcp_protocol_stats stats;
863         u32 hsi_param, param = 0, reply = 0;
864         union drv_union_data union_data;
865
866         switch (type) {
867         case MFW_DRV_MSG_GET_LAN_STATS:
868                 stats_type = ECORE_MCP_LAN_STATS;
869                 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
870                 break;
871         default:
872                 DP_NOTICE(p_hwfn, false, "Invalid protocol type %d\n", type);
873                 return;
874         }
875
876         OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats);
877
878         OSAL_MEMCPY(&union_data, &stats, sizeof(stats));
879
880         ecore_mcp_cmd_and_union(p_hwfn, p_ptt, DRV_MSG_CODE_GET_STATS,
881                                 hsi_param, &union_data, &reply, &param);
882 }
883
884 static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
885                                     struct ecore_ptt *p_ptt,
886                                     struct public_func *p_data, int pfid)
887 {
888         u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
889                                         PUBLIC_FUNC);
890         u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
891         u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
892         u32 i, size;
893
894         OSAL_MEM_ZERO(p_data, sizeof(*p_data));
895
896         size = OSAL_MIN_T(u32, sizeof(*p_data), SECTION_SIZE(mfw_path_offsize));
897         for (i = 0; i < size / sizeof(u32); i++)
898                 ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
899                                               func_addr + (i << 2));
900
901         return size;
902 }
903
904 static void
905 ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
906                         struct public_func *p_shmem_info)
907 {
908         struct ecore_mcp_function_info *p_info;
909
910         p_info = &p_hwfn->mcp_info->func_info;
911
912         /* TODO - bandwidth min/max should have valid values of 1-100,
913          * as well as some indication that the feature is disabled.
914          * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS
915          * limit and correct value to min `1' and max `100' if limit isn't in
916          * range.
917          */
918         p_info->bandwidth_min = (p_shmem_info->config &
919                                  FUNC_MF_CFG_MIN_BW_MASK) >>
920             FUNC_MF_CFG_MIN_BW_SHIFT;
921         if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
922                 DP_INFO(p_hwfn,
923                         "bandwidth minimum out of bounds [%02x]. Set to 1\n",
924                         p_info->bandwidth_min);
925                 p_info->bandwidth_min = 1;
926         }
927
928         p_info->bandwidth_max = (p_shmem_info->config &
929                                  FUNC_MF_CFG_MAX_BW_MASK) >>
930             FUNC_MF_CFG_MAX_BW_SHIFT;
931         if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
932                 DP_INFO(p_hwfn,
933                         "bandwidth maximum out of bounds [%02x]. Set to 100\n",
934                         p_info->bandwidth_max);
935                 p_info->bandwidth_max = 100;
936         }
937 }
938
939 static void
940 ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
941 {
942         struct ecore_mcp_function_info *p_info;
943         struct public_func shmem_info;
944         u32 resp = 0, param = 0;
945
946         ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
947
948         ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
949
950         p_info = &p_hwfn->mcp_info->func_info;
951
952         ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min);
953
954         ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max);
955
956         /* Acknowledge the MFW */
957         ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
958                       &param);
959 }
960
961 static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn,
962                                          struct ecore_ptt *p_ptt)
963 {
964         /* A single notification should be sent to upper driver in CMT mode */
965         if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
966                 return;
967
968         DP_NOTICE(p_hwfn, false,
969                   "Fan failure was detected on the network interface card"
970                   " and it's going to be shut down.\n");
971
972         ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
973 }
974
975 enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
976                                              struct ecore_ptt *p_ptt)
977 {
978         struct ecore_mcp_info *info = p_hwfn->mcp_info;
979         enum _ecore_status_t rc = ECORE_SUCCESS;
980         bool found = false;
981         u16 i;
982
983         DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n");
984
985         /* Read Messages from MFW */
986         ecore_mcp_read_mb(p_hwfn, p_ptt);
987
988         /* Compare current messages to old ones */
989         for (i = 0; i < info->mfw_mb_length; i++) {
990                 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
991                         continue;
992
993                 found = true;
994
995                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
996                            "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
997                            i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
998
999                 switch (i) {
1000                 case MFW_DRV_MSG_LINK_CHANGE:
1001                         ecore_mcp_handle_link_change(p_hwfn, p_ptt, false);
1002                         break;
1003                 case MFW_DRV_MSG_VF_DISABLED:
1004                         ecore_mcp_handle_vf_flr(p_hwfn, p_ptt);
1005                         break;
1006                 case MFW_DRV_MSG_LLDP_DATA_UPDATED:
1007                         ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1008                                                     ECORE_DCBX_REMOTE_LLDP_MIB);
1009                         break;
1010                 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
1011                         ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1012                                                     ECORE_DCBX_REMOTE_MIB);
1013                         break;
1014                 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
1015                         ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1016                                                     ECORE_DCBX_OPERATIONAL_MIB);
1017                         break;
1018                 case MFW_DRV_MSG_ERROR_RECOVERY:
1019                         ecore_mcp_handle_process_kill(p_hwfn, p_ptt);
1020                         break;
1021                 case MFW_DRV_MSG_GET_LAN_STATS:
1022                 case MFW_DRV_MSG_GET_FCOE_STATS:
1023                 case MFW_DRV_MSG_GET_ISCSI_STATS:
1024                 case MFW_DRV_MSG_GET_RDMA_STATS:
1025                         ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
1026                         break;
1027                 case MFW_DRV_MSG_BW_UPDATE:
1028                         ecore_mcp_update_bw(p_hwfn, p_ptt);
1029                         break;
1030                 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
1031                         ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
1032                         break;
1033                 case MFW_DRV_MSG_FAILURE_DETECTED:
1034                         ecore_mcp_handle_fan_failure(p_hwfn, p_ptt);
1035                         break;
1036                 default:
1037                         /* @DPDK */
1038                         DP_NOTICE(p_hwfn, false,
1039                                   "Unimplemented MFW message %d\n", i);
1040                         rc = ECORE_INVAL;
1041                 }
1042         }
1043
1044         /* ACK everything */
1045         for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
1046                 OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]);
1047
1048                 /* MFW expect answer in BE, so we force write in that format */
1049                 ecore_wr(p_hwfn, p_ptt,
1050                          info->mfw_mb_addr + sizeof(u32) +
1051                          MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
1052                          sizeof(u32) + i * sizeof(u32), val);
1053         }
1054
1055         if (!found) {
1056                 DP_NOTICE(p_hwfn, false,
1057                           "Received an MFW message indication but no"
1058                           " new message!\n");
1059                 rc = ECORE_INVAL;
1060         }
1061
1062         /* Copy the new mfw messages into the shadow */
1063         OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
1064
1065         return rc;
1066 }
1067
1068 enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_dev *p_dev,
1069                                            struct ecore_ptt *p_ptt,
1070                                            u32 *p_mfw_ver,
1071                                            u32 *p_running_bundle_id)
1072 {
1073         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1074         u32 global_offsize;
1075
1076 #ifndef ASIC_ONLY
1077         if (CHIP_REV_IS_EMUL(p_dev)) {
1078                 DP_NOTICE(p_dev, false, "Emulation - can't get MFW version\n");
1079                 return ECORE_SUCCESS;
1080         }
1081 #endif
1082
1083         if (IS_VF(p_dev)) {
1084                 if (p_hwfn->vf_iov_info) {
1085                         struct pfvf_acquire_resp_tlv *p_resp;
1086
1087                         p_resp = &p_hwfn->vf_iov_info->acquire_resp;
1088                         *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
1089                         return ECORE_SUCCESS;
1090                 }
1091
1092                 DP_VERBOSE(p_dev, ECORE_MSG_IOV,
1093                            "VF requested MFW vers prior to ACQUIRE\n");
1094                         return ECORE_INVAL;
1095                 }
1096
1097         global_offsize = ecore_rd(p_hwfn, p_ptt,
1098                                   SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
1099                                                        public_base,
1100                                                        PUBLIC_GLOBAL));
1101         *p_mfw_ver =
1102             ecore_rd(p_hwfn, p_ptt,
1103                      SECTION_ADDR(global_offsize,
1104                                   0) + OFFSETOF(struct public_global, mfw_ver));
1105
1106         if (p_running_bundle_id != OSAL_NULL) {
1107                 *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt,
1108                                                 SECTION_ADDR(global_offsize,
1109                                                              0) +
1110                                                 OFFSETOF(struct public_global,
1111                                                          running_bundle_id));
1112         }
1113
1114         return ECORE_SUCCESS;
1115 }
1116
1117 enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
1118                                               u32 *p_media_type)
1119 {
1120         struct ecore_hwfn *p_hwfn = &p_dev->hwfns[0];
1121         struct ecore_ptt *p_ptt;
1122
1123         /* TODO - Add support for VFs */
1124         if (IS_VF(p_dev))
1125                 return ECORE_INVAL;
1126
1127         if (!ecore_mcp_is_init(p_hwfn)) {
1128                 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
1129                 return ECORE_BUSY;
1130         }
1131
1132         *p_media_type = MEDIA_UNSPECIFIED;
1133
1134         p_ptt = ecore_ptt_acquire(p_hwfn);
1135         if (!p_ptt)
1136                 return ECORE_BUSY;
1137
1138         *p_media_type = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1139                                  OFFSETOF(struct public_port, media_type));
1140
1141         ecore_ptt_release(p_hwfn, p_ptt);
1142
1143         return ECORE_SUCCESS;
1144 }
1145
1146 static enum _ecore_status_t
1147 ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn,
1148                           struct public_func *p_info,
1149                           enum ecore_pci_personality *p_proto)
1150 {
1151         enum _ecore_status_t rc = ECORE_SUCCESS;
1152
1153         switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
1154         case FUNC_MF_CFG_PROTOCOL_ETHERNET:
1155                 *p_proto = ECORE_PCI_ETH;
1156                 break;
1157         default:
1158                 rc = ECORE_INVAL;
1159         }
1160
1161         return rc;
1162 }
1163
1164 enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
1165                                                     struct ecore_ptt *p_ptt)
1166 {
1167         struct ecore_mcp_function_info *info;
1168         struct public_func shmem_info;
1169
1170         ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1171         info = &p_hwfn->mcp_info->func_info;
1172
1173         info->pause_on_host = (shmem_info.config &
1174                                FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
1175
1176         if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, &info->protocol)) {
1177                 DP_ERR(p_hwfn, "Unknown personality %08x\n",
1178                        (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
1179                 return ECORE_INVAL;
1180         }
1181
1182         ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1183
1184         if (shmem_info.mac_upper || shmem_info.mac_lower) {
1185                 info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
1186                 info->mac[1] = (u8)(shmem_info.mac_upper);
1187                 info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
1188                 info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
1189                 info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
1190                 info->mac[5] = (u8)(shmem_info.mac_lower);
1191         } else {
1192                 /* TODO - are there protocols for which there's no MAC? */
1193                 DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n");
1194         }
1195
1196         info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
1197
1198         DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP),
1199                    "Read configuration from shmem: pause_on_host %02x"
1200                     " protocol %02x BW [%02x - %02x]"
1201                     " MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %" PRIx64
1202                     " node %" PRIx64 " ovlan %04x\n",
1203                    info->pause_on_host, info->protocol,
1204                    info->bandwidth_min, info->bandwidth_max,
1205                    info->mac[0], info->mac[1], info->mac[2],
1206                    info->mac[3], info->mac[4], info->mac[5],
1207                    info->wwn_port, info->wwn_node, info->ovlan);
1208
1209         return ECORE_SUCCESS;
1210 }
1211
1212 struct ecore_mcp_link_params
1213 *ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn)
1214 {
1215         if (!p_hwfn || !p_hwfn->mcp_info)
1216                 return OSAL_NULL;
1217         return &p_hwfn->mcp_info->link_input;
1218 }
1219
1220 struct ecore_mcp_link_state
1221 *ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn)
1222 {
1223         if (!p_hwfn || !p_hwfn->mcp_info)
1224                 return OSAL_NULL;
1225
1226 #ifndef ASIC_ONLY
1227         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
1228                 DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n");
1229                 p_hwfn->mcp_info->link_output.link_up = true;
1230         }
1231 #endif
1232
1233         return &p_hwfn->mcp_info->link_output;
1234 }
1235
1236 struct ecore_mcp_link_capabilities
1237 *ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn)
1238 {
1239         if (!p_hwfn || !p_hwfn->mcp_info)
1240                 return OSAL_NULL;
1241         return &p_hwfn->mcp_info->link_capabilities;
1242 }
1243
1244 enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
1245                                      struct ecore_ptt *p_ptt)
1246 {
1247         enum _ecore_status_t rc;
1248         u32 resp = 0, param = 0;
1249
1250         rc = ecore_mcp_cmd(p_hwfn, p_ptt,
1251                            DRV_MSG_CODE_NIG_DRAIN, 100, &resp, &param);
1252
1253         /* Wait for the drain to complete before returning */
1254         OSAL_MSLEEP(120);
1255
1256         return rc;
1257 }
1258
1259 const struct ecore_mcp_function_info
1260 *ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn)
1261 {
1262         if (!p_hwfn || !p_hwfn->mcp_info)
1263                 return OSAL_NULL;
1264         return &p_hwfn->mcp_info->func_info;
1265 }
1266
1267 enum _ecore_status_t ecore_mcp_nvm_command(struct ecore_hwfn *p_hwfn,
1268                                            struct ecore_ptt *p_ptt,
1269                                            struct ecore_mcp_nvm_params *params)
1270 {
1271         enum _ecore_status_t rc;
1272
1273         switch (params->type) {
1274         case ECORE_MCP_NVM_RD:
1275                 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1276                                           params->nvm_common.offset,
1277                                           &params->nvm_common.resp,
1278                                           &params->nvm_common.param,
1279                                           params->nvm_rd.buf_size,
1280                                           params->nvm_rd.buf);
1281                 break;
1282         case ECORE_MCP_CMD:
1283                 rc = ecore_mcp_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1284                                    params->nvm_common.offset,
1285                                    &params->nvm_common.resp,
1286                                    &params->nvm_common.param);
1287                 break;
1288         case ECORE_MCP_NVM_WR:
1289                 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1290                                           params->nvm_common.offset,
1291                                           &params->nvm_common.resp,
1292                                           &params->nvm_common.param,
1293                                           params->nvm_wr.buf_size,
1294                                           params->nvm_wr.buf);
1295                 break;
1296         default:
1297                 rc = ECORE_NOTIMPL;
1298                 break;
1299         }
1300         return rc;
1301 }
1302
1303 int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
1304                                   struct ecore_ptt *p_ptt, u32 personalities)
1305 {
1306         enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT;
1307         struct public_func shmem_info;
1308         int i, count = 0, num_pfs;
1309
1310         num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
1311
1312         for (i = 0; i < num_pfs; i++) {
1313                 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1314                                          MCP_PF_ID_BY_REL(p_hwfn, i));
1315                 if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
1316                         continue;
1317
1318                 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info,
1319                                               &protocol) != ECORE_SUCCESS)
1320                         continue;
1321
1322                 if ((1 << ((u32)protocol)) & personalities)
1323                         count++;
1324         }
1325
1326         return count;
1327 }
1328
1329 enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
1330                                               struct ecore_ptt *p_ptt,
1331                                               u32 *p_flash_size)
1332 {
1333         u32 flash_size;
1334
1335 #ifndef ASIC_ONLY
1336         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1337                 DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n");
1338                 return ECORE_INVAL;
1339         }
1340 #endif
1341
1342         if (IS_VF(p_hwfn->p_dev))
1343                 return ECORE_INVAL;
1344
1345         flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
1346         flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
1347             MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
1348         flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
1349
1350         *p_flash_size = flash_size;
1351
1352         return ECORE_SUCCESS;
1353 }
1354
1355 enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
1356                                                   struct ecore_ptt *p_ptt)
1357 {
1358         struct ecore_dev *p_dev = p_hwfn->p_dev;
1359
1360         if (p_dev->recov_in_prog) {
1361                 DP_NOTICE(p_hwfn, false,
1362                           "Avoid triggering a recovery since such a process"
1363                           " is already in progress\n");
1364                 return ECORE_AGAIN;
1365         }
1366
1367         DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n");
1368         ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
1369
1370         return ECORE_SUCCESS;
1371 }
1372
1373 enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
1374                                               struct ecore_ptt *p_ptt,
1375                                               u8 vf_id, u8 num)
1376 {
1377         u32 resp = 0, param = 0, rc_param = 0;
1378         enum _ecore_status_t rc;
1379
1380         param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
1381             DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
1382         param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
1383             DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
1384
1385         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
1386                            &resp, &rc_param);
1387
1388         if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
1389                 DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n",
1390                           vf_id);
1391                 rc = ECORE_INVAL;
1392         }
1393
1394         return rc;
1395 }
1396
1397 enum _ecore_status_t
1398 ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1399                            struct ecore_mcp_drv_version *p_ver)
1400 {
1401         u32 param = 0, reply = 0, num_words, i;
1402         struct drv_version_stc *p_drv_version;
1403         union drv_union_data union_data;
1404         void *p_name;
1405         OSAL_BE32 val;
1406         enum _ecore_status_t rc;
1407
1408 #ifndef ASIC_ONLY
1409         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
1410                 return ECORE_SUCCESS;
1411 #endif
1412
1413         p_drv_version = &union_data.drv_version;
1414         p_drv_version->version = p_ver->version;
1415         num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4;
1416         for (i = 0; i < num_words; i++) {
1417                 p_name = &p_ver->name[i * sizeof(u32)];
1418                 val = OSAL_CPU_TO_BE32(*(u32 *)p_name);
1419                 *(u32 *)&p_drv_version->name[i * sizeof(u32)] = val;
1420         }
1421
1422         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, DRV_MSG_CODE_SET_VERSION, 0,
1423                                      &union_data, &reply, &param);
1424         if (rc != ECORE_SUCCESS)
1425                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1426
1427         return rc;
1428 }
1429
1430 enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
1431                                     struct ecore_ptt *p_ptt)
1432 {
1433         enum _ecore_status_t rc;
1434         u32 resp = 0, param = 0;
1435
1436         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
1437                            &param);
1438         if (rc != ECORE_SUCCESS)
1439                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1440
1441         return rc;
1442 }
1443
1444 enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
1445                                       struct ecore_ptt *p_ptt)
1446 {
1447         u32 value, cpu_mode;
1448
1449         ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
1450
1451         value = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
1452         value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
1453         ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
1454         cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
1455
1456         return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -1 : 0;
1457 }
1458
1459 enum _ecore_status_t
1460 ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
1461                                    struct ecore_ptt *p_ptt,
1462                                    enum ecore_ov_config_method config,
1463                                    enum ecore_ov_client client)
1464 {
1465         enum _ecore_status_t rc;
1466         u32 resp = 0, param = 0;
1467         u32 drv_mb_param;
1468
1469         switch (config) {
1470         case ECORE_OV_CLIENT_DRV:
1471                 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
1472                 break;
1473         case ECORE_OV_CLIENT_USER:
1474                 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
1475                 break;
1476         default:
1477                 DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", config);
1478                 return ECORE_INVAL;
1479         }
1480
1481         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
1482                            drv_mb_param, &resp, &param);
1483         if (rc != ECORE_SUCCESS)
1484                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1485
1486         return rc;
1487 }
1488
1489 enum _ecore_status_t
1490 ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
1491                                  struct ecore_ptt *p_ptt,
1492                                  enum ecore_ov_driver_state drv_state)
1493 {
1494         enum _ecore_status_t rc;
1495         u32 resp = 0, param = 0;
1496         u32 drv_mb_param;
1497
1498         switch (drv_state) {
1499         case ECORE_OV_DRIVER_STATE_NOT_LOADED:
1500                 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
1501                 break;
1502         case ECORE_OV_DRIVER_STATE_DISABLED:
1503                 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
1504                 break;
1505         case ECORE_OV_DRIVER_STATE_ACTIVE:
1506                 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
1507                 break;
1508         default:
1509                 DP_NOTICE(p_hwfn, true, "Invalid driver state %d\n", drv_state);
1510                 return ECORE_INVAL;
1511         }
1512
1513         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
1514                            drv_state, &resp, &param);
1515         if (rc != ECORE_SUCCESS)
1516                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1517
1518         return rc;
1519 }
1520
1521 enum _ecore_status_t
1522 ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1523                          struct ecore_fc_npiv_tbl *p_table)
1524 {
1525         return 0;
1526 }
1527
1528 enum _ecore_status_t
1529 ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn,
1530                         struct ecore_ptt *p_ptt, u16 mtu)
1531 {
1532         return 0;
1533 }
1534
1535 enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
1536                                        struct ecore_ptt *p_ptt,
1537                                        enum ecore_led_mode mode)
1538 {
1539         u32 resp = 0, param = 0, drv_mb_param;
1540         enum _ecore_status_t rc;
1541
1542         switch (mode) {
1543         case ECORE_LED_MODE_ON:
1544                 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
1545                 break;
1546         case ECORE_LED_MODE_OFF:
1547                 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
1548                 break;
1549         case ECORE_LED_MODE_RESTORE:
1550                 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
1551                 break;
1552         default:
1553                 DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode);
1554                 return ECORE_INVAL;
1555         }
1556
1557         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
1558                            drv_mb_param, &resp, &param);
1559         if (rc != ECORE_SUCCESS)
1560                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1561
1562         return rc;
1563 }
1564
1565 enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
1566                                              struct ecore_ptt *p_ptt,
1567                                              u32 mask_parities)
1568 {
1569         enum _ecore_status_t rc;
1570         u32 resp = 0, param = 0;
1571
1572         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
1573                            mask_parities, &resp, &param);
1574
1575         if (rc != ECORE_SUCCESS) {
1576                 DP_ERR(p_hwfn,
1577                        "MCP response failure for mask parities, aborting\n");
1578         } else if (resp != FW_MSG_CODE_OK) {
1579                 DP_ERR(p_hwfn,
1580                        "MCP did not ack mask parity request. Old MFW?\n");
1581                 rc = ECORE_INVAL;
1582         }
1583
1584         return rc;
1585 }
1586
1587 enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
1588                                         u8 *p_buf, u32 len)
1589 {
1590         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1591         u32 bytes_left, offset, bytes_to_copy, buf_size;
1592         struct ecore_mcp_nvm_params params;
1593         struct ecore_ptt *p_ptt;
1594         enum _ecore_status_t rc = ECORE_SUCCESS;
1595
1596         p_ptt = ecore_ptt_acquire(p_hwfn);
1597         if (!p_ptt)
1598                 return ECORE_BUSY;
1599
1600         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
1601         bytes_left = len;
1602         offset = 0;
1603         params.type = ECORE_MCP_NVM_RD;
1604         params.nvm_rd.buf_size = &buf_size;
1605         params.nvm_common.cmd = DRV_MSG_CODE_NVM_READ_NVRAM;
1606         while (bytes_left > 0) {
1607                 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
1608                                            MCP_DRV_NVM_BUF_LEN);
1609                 params.nvm_common.offset = (addr + offset) |
1610                     (bytes_to_copy << DRV_MB_PARAM_NVM_LEN_SHIFT);
1611                 params.nvm_rd.buf = (u32 *)(p_buf + offset);
1612                 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
1613                 if (rc != ECORE_SUCCESS || (params.nvm_common.resp !=
1614                                             FW_MSG_CODE_NVM_OK)) {
1615                         DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
1616                         break;
1617                 }
1618                 offset += *params.nvm_rd.buf_size;
1619                 bytes_left -= *params.nvm_rd.buf_size;
1620         }
1621
1622         p_dev->mcp_nvm_resp = params.nvm_common.resp;
1623         ecore_ptt_release(p_hwfn, p_ptt);
1624
1625         return rc;
1626 }
1627
1628 enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
1629                                         u32 addr, u8 *p_buf, u32 len)
1630 {
1631         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1632         struct ecore_mcp_nvm_params params;
1633         struct ecore_ptt *p_ptt;
1634         enum _ecore_status_t rc;
1635
1636         p_ptt = ecore_ptt_acquire(p_hwfn);
1637         if (!p_ptt)
1638                 return ECORE_BUSY;
1639
1640         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
1641         params.type = ECORE_MCP_NVM_RD;
1642         params.nvm_rd.buf_size = &len;
1643         params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_READ) ?
1644             DRV_MSG_CODE_PHY_CORE_READ : DRV_MSG_CODE_PHY_RAW_READ;
1645         params.nvm_common.offset = addr;
1646         params.nvm_rd.buf = (u32 *)p_buf;
1647         rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
1648         if (rc != ECORE_SUCCESS)
1649                 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
1650
1651         p_dev->mcp_nvm_resp = params.nvm_common.resp;
1652         ecore_ptt_release(p_hwfn, p_ptt);
1653
1654         return rc;
1655 }
1656
1657 enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
1658 {
1659         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1660         struct ecore_mcp_nvm_params params;
1661         struct ecore_ptt *p_ptt;
1662
1663         p_ptt = ecore_ptt_acquire(p_hwfn);
1664         if (!p_ptt)
1665                 return ECORE_BUSY;
1666
1667         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
1668         OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp));
1669         ecore_ptt_release(p_hwfn, p_ptt);
1670
1671         return ECORE_SUCCESS;
1672 }
1673
1674 enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr)
1675 {
1676         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1677         struct ecore_mcp_nvm_params params;
1678         struct ecore_ptt *p_ptt;
1679         enum _ecore_status_t rc;
1680
1681         p_ptt = ecore_ptt_acquire(p_hwfn);
1682         if (!p_ptt)
1683                 return ECORE_BUSY;
1684         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
1685         params.type = ECORE_MCP_CMD;
1686         params.nvm_common.cmd = DRV_MSG_CODE_NVM_DEL_FILE;
1687         params.nvm_common.offset = addr;
1688         rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
1689         p_dev->mcp_nvm_resp = params.nvm_common.resp;
1690         ecore_ptt_release(p_hwfn, p_ptt);
1691
1692         return rc;
1693 }
1694
1695 enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
1696                                                   u32 addr)
1697 {
1698         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1699         struct ecore_mcp_nvm_params params;
1700         struct ecore_ptt *p_ptt;
1701         enum _ecore_status_t rc;
1702
1703         p_ptt = ecore_ptt_acquire(p_hwfn);
1704         if (!p_ptt)
1705                 return ECORE_BUSY;
1706         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
1707         params.type = ECORE_MCP_CMD;
1708         params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN;
1709         params.nvm_common.offset = addr;
1710         rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
1711         p_dev->mcp_nvm_resp = params.nvm_common.resp;
1712         ecore_ptt_release(p_hwfn, p_ptt);
1713
1714         return rc;
1715 }
1716
1717 /* rc receives ECORE_INVAL as default parameter because
1718  * it might not enter the while loop if the len is 0
1719  */
1720 enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
1721                                          u32 addr, u8 *p_buf, u32 len)
1722 {
1723         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1724         enum _ecore_status_t rc = ECORE_INVAL;
1725         struct ecore_mcp_nvm_params params;
1726         struct ecore_ptt *p_ptt;
1727         u32 buf_idx, buf_size;
1728
1729         p_ptt = ecore_ptt_acquire(p_hwfn);
1730         if (!p_ptt)
1731                 return ECORE_BUSY;
1732
1733         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
1734         params.type = ECORE_MCP_NVM_WR;
1735         if (cmd == ECORE_PUT_FILE_DATA)
1736                 params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
1737         else
1738                 params.nvm_common.cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
1739         buf_idx = 0;
1740         while (buf_idx < len) {
1741                 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
1742                                       MCP_DRV_NVM_BUF_LEN);
1743                 params.nvm_common.offset = ((buf_size <<
1744                                              DRV_MB_PARAM_NVM_LEN_SHIFT)
1745                                             | addr) + buf_idx;
1746                 params.nvm_wr.buf_size = buf_size;
1747                 params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
1748                 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
1749                 if (rc != ECORE_SUCCESS ||
1750                     ((params.nvm_common.resp != FW_MSG_CODE_NVM_OK) &&
1751                      (params.nvm_common.resp !=
1752                       FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK)))
1753                         DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
1754
1755                 buf_idx += buf_size;
1756         }
1757
1758         p_dev->mcp_nvm_resp = params.nvm_common.resp;
1759         ecore_ptt_release(p_hwfn, p_ptt);
1760
1761         return rc;
1762 }
1763
1764 enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
1765                                          u32 addr, u8 *p_buf, u32 len)
1766 {
1767         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1768         struct ecore_mcp_nvm_params params;
1769         struct ecore_ptt *p_ptt;
1770         enum _ecore_status_t rc;
1771
1772         p_ptt = ecore_ptt_acquire(p_hwfn);
1773         if (!p_ptt)
1774                 return ECORE_BUSY;
1775
1776         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
1777         params.type = ECORE_MCP_NVM_WR;
1778         params.nvm_wr.buf_size = len;
1779         params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_WRITE) ?
1780             DRV_MSG_CODE_PHY_CORE_WRITE : DRV_MSG_CODE_PHY_RAW_WRITE;
1781         params.nvm_common.offset = addr;
1782         params.nvm_wr.buf = (u32 *)p_buf;
1783         rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
1784         if (rc != ECORE_SUCCESS)
1785                 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
1786         p_dev->mcp_nvm_resp = params.nvm_common.resp;
1787         ecore_ptt_release(p_hwfn, p_ptt);
1788
1789         return rc;
1790 }
1791
1792 enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
1793                                                    u32 addr)
1794 {
1795         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1796         struct ecore_mcp_nvm_params params;
1797         struct ecore_ptt *p_ptt;
1798         enum _ecore_status_t rc;
1799
1800         p_ptt = ecore_ptt_acquire(p_hwfn);
1801         if (!p_ptt)
1802                 return ECORE_BUSY;
1803
1804         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
1805         params.type = ECORE_MCP_CMD;
1806         params.nvm_common.cmd = DRV_MSG_CODE_SET_SECURE_MODE;
1807         params.nvm_common.offset = addr;
1808         rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
1809         p_dev->mcp_nvm_resp = params.nvm_common.resp;
1810         ecore_ptt_release(p_hwfn, p_ptt);
1811
1812         return rc;
1813 }
1814
1815 enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
1816                                             struct ecore_ptt *p_ptt,
1817                                             u32 port, u32 addr, u32 offset,
1818                                             u32 len, u8 *p_buf)
1819 {
1820         struct ecore_mcp_nvm_params params;
1821         enum _ecore_status_t rc;
1822         u32 bytes_left, bytes_to_copy, buf_size;
1823
1824         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
1825         SET_FIELD(params.nvm_common.offset,
1826                   DRV_MB_PARAM_TRANSCEIVER_PORT, port);
1827         SET_FIELD(params.nvm_common.offset,
1828                   DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS, addr);
1829         addr = offset;
1830         offset = 0;
1831         bytes_left = len;
1832         params.type = ECORE_MCP_NVM_RD;
1833         params.nvm_rd.buf_size = &buf_size;
1834         params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_READ;
1835         while (bytes_left > 0) {
1836                 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
1837                                            MAX_I2C_TRANSACTION_SIZE);
1838                 params.nvm_rd.buf = (u32 *)(p_buf + offset);
1839                 SET_FIELD(params.nvm_common.offset,
1840                           DRV_MB_PARAM_TRANSCEIVER_OFFSET, addr + offset);
1841                 SET_FIELD(params.nvm_common.offset,
1842                           DRV_MB_PARAM_TRANSCEIVER_SIZE, bytes_to_copy);
1843                 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
1844                 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
1845                     FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
1846                         return ECORE_NODEV;
1847                 } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
1848                            FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
1849                         return ECORE_UNKNOWN_ERROR;
1850
1851                 offset += *params.nvm_rd.buf_size;
1852                 bytes_left -= *params.nvm_rd.buf_size;
1853         }
1854
1855         return ECORE_SUCCESS;
1856 }
1857
1858 enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
1859                                              struct ecore_ptt *p_ptt,
1860                                              u32 port, u32 addr, u32 offset,
1861                                              u32 len, u8 *p_buf)
1862 {
1863         struct ecore_mcp_nvm_params params;
1864         enum _ecore_status_t rc;
1865         u32 buf_idx, buf_size;
1866
1867         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
1868         SET_FIELD(params.nvm_common.offset,
1869                   DRV_MB_PARAM_TRANSCEIVER_PORT, port);
1870         SET_FIELD(params.nvm_common.offset,
1871                   DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS, addr);
1872         params.type = ECORE_MCP_NVM_WR;
1873         params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_WRITE;
1874         buf_idx = 0;
1875         while (buf_idx < len) {
1876                 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
1877                                       MAX_I2C_TRANSACTION_SIZE);
1878                 SET_FIELD(params.nvm_common.offset,
1879                           DRV_MB_PARAM_TRANSCEIVER_OFFSET, offset + buf_idx);
1880                 SET_FIELD(params.nvm_common.offset,
1881                           DRV_MB_PARAM_TRANSCEIVER_SIZE, buf_size);
1882                 params.nvm_wr.buf_size = buf_size;
1883                 params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
1884                 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
1885                 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
1886                     FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
1887                         return ECORE_NODEV;
1888                 } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
1889                            FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
1890                         return ECORE_UNKNOWN_ERROR;
1891
1892                 buf_idx += buf_size;
1893         }
1894
1895         return ECORE_SUCCESS;
1896 }
1897
1898 enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
1899                                          struct ecore_ptt *p_ptt,
1900                                          u16 gpio, u32 *gpio_val)
1901 {
1902         enum _ecore_status_t rc = ECORE_SUCCESS;
1903         u32 drv_mb_param = 0, rsp;
1904
1905         SET_FIELD(drv_mb_param, DRV_MB_PARAM_GPIO_NUMBER, gpio);
1906
1907         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ,
1908                            drv_mb_param, &rsp, gpio_val);
1909
1910         if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
1911                 return ECORE_UNKNOWN_ERROR;
1912
1913         return ECORE_SUCCESS;
1914 }
1915
1916 enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
1917                                           struct ecore_ptt *p_ptt,
1918                                           u16 gpio, u16 gpio_val)
1919 {
1920         enum _ecore_status_t rc = ECORE_SUCCESS;
1921         u32 drv_mb_param = 0, param, rsp;
1922
1923         SET_FIELD(drv_mb_param, DRV_MB_PARAM_GPIO_NUMBER, gpio);
1924         SET_FIELD(drv_mb_param, DRV_MB_PARAM_GPIO_VALUE, gpio_val);
1925
1926         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE,
1927                            drv_mb_param, &rsp, &param);
1928
1929         if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
1930                 return ECORE_UNKNOWN_ERROR;
1931
1932         return ECORE_SUCCESS;
1933 }