net/qede/base: add return code check
[dpdk.git] / drivers / net / qede / base / ecore_mcp.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "bcm_osal.h"
10 #include "ecore.h"
11 #include "ecore_status.h"
12 #include "ecore_mcp.h"
13 #include "mcp_public.h"
14 #include "reg_addr.h"
15 #include "ecore_hw.h"
16 #include "ecore_init_fw_funcs.h"
17 #include "ecore_sriov.h"
18 #include "ecore_vf.h"
19 #include "ecore_iov_api.h"
20 #include "ecore_gtt_reg_addr.h"
21 #include "ecore_iro.h"
22 #include "ecore_dcbx.h"
23
24 #define CHIP_MCP_RESP_ITER_US 10
25 #define EMUL_MCP_RESP_ITER_US (1000 * 1000)
26
27 #define ECORE_DRV_MB_MAX_RETRIES (500 * 1000)   /* Account for 5 sec */
28 #define ECORE_MCP_RESET_RETRIES (50 * 1000)     /* Account for 500 msec */
29
30 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
31         ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
32                  _val)
33
34 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
35         ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
36
37 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
38         DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
39                      OFFSETOF(struct public_drv_mb, _field), _val)
40
41 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
42         DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
43                      OFFSETOF(struct public_drv_mb, _field))
44
45 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
46         DRV_ID_PDA_COMP_VER_SHIFT)
47
48 #define MCP_BYTES_PER_MBIT_SHIFT 17
49
50 #ifndef ASIC_ONLY
51 static int loaded;
52 static int loaded_port[MAX_NUM_PORTS] = { 0 };
53 #endif
54
55 bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn)
56 {
57         if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
58                 return false;
59         return true;
60 }
61
62 void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
63 {
64         u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
65                                         PUBLIC_PORT);
66         u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr);
67
68         p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
69                                                    MFW_PORT(p_hwfn));
70         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
71                    "port_addr = 0x%x, port_id 0x%02x\n",
72                    p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
73 }
74
75 void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
76 {
77         u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
78         OSAL_BE32 tmp;
79         u32 i;
80
81 #ifndef ASIC_ONLY
82         if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev))
83                 return;
84 #endif
85
86         if (!p_hwfn->mcp_info->public_base)
87                 return;
88
89         for (i = 0; i < length; i++) {
90                 tmp = ecore_rd(p_hwfn, p_ptt,
91                                p_hwfn->mcp_info->mfw_mb_addr +
92                                (i << 2) + sizeof(u32));
93
94                 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
95                     OSAL_BE32_TO_CPU(tmp);
96         }
97 }
98
99 enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
100 {
101         if (p_hwfn->mcp_info) {
102                 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
103                 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
104                 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->lock);
105         }
106         OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
107
108         return ECORE_SUCCESS;
109 }
110
111 static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
112                                                    struct ecore_ptt *p_ptt)
113 {
114         struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
115         u32 drv_mb_offsize, mfw_mb_offsize;
116         u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
117
118 #ifndef ASIC_ONLY
119         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
120                 DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n");
121                 p_info->public_base = 0;
122                 return ECORE_INVAL;
123         }
124 #endif
125
126         p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
127         if (!p_info->public_base)
128                 return ECORE_INVAL;
129
130         p_info->public_base |= GRCBASE_MCP;
131
132         /* Calculate the driver and MFW mailbox address */
133         drv_mb_offsize = ecore_rd(p_hwfn, p_ptt,
134                                   SECTION_OFFSIZE_ADDR(p_info->public_base,
135                                                        PUBLIC_DRV_MB));
136         p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
137         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
138                    "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x"
139                    " mcp_pf_id = 0x%x\n",
140                    drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
141
142         /* Set the MFW MB address */
143         mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
144                                   SECTION_OFFSIZE_ADDR(p_info->public_base,
145                                                        PUBLIC_MFW_MB));
146         p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
147         p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
148                                                p_info->mfw_mb_addr);
149
150         /* Get the current driver mailbox sequence before sending
151          * the first command
152          */
153         p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
154             DRV_MSG_SEQ_NUMBER_MASK;
155
156         /* Get current FW pulse sequence */
157         p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
158             DRV_PULSE_SEQ_MASK;
159
160         p_info->mcp_hist = (u16)ecore_rd(p_hwfn, p_ptt,
161                                           MISCS_REG_GENERIC_POR_0);
162
163         return ECORE_SUCCESS;
164 }
165
166 enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
167                                         struct ecore_ptt *p_ptt)
168 {
169         struct ecore_mcp_info *p_info;
170         u32 size;
171
172         /* Allocate mcp_info structure */
173         p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
174                                        sizeof(*p_hwfn->mcp_info));
175         if (!p_hwfn->mcp_info)
176                 goto err;
177         p_info = p_hwfn->mcp_info;
178
179         if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
180                 DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
181                 /* Do not free mcp_info here, since public_base indicate that
182                  * the MCP is not initialized
183                  */
184                 return ECORE_SUCCESS;
185         }
186
187         size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
188         p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
189         p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
190         if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
191                 goto err;
192
193         /* Initialize the MFW spinlock */
194         OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->lock);
195         OSAL_SPIN_LOCK_INIT(&p_info->lock);
196
197         return ECORE_SUCCESS;
198
199 err:
200         DP_NOTICE(p_hwfn, true, "Failed to allocate mcp memory\n");
201         ecore_mcp_free(p_hwfn);
202         return ECORE_NOMEM;
203 }
204
205 /* Locks the MFW mailbox of a PF to ensure a single access.
206  * The lock is achieved in most cases by holding a spinlock, causing other
207  * threads to wait till a previous access is done.
208  * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single
209  * access is achieved by setting a blocking flag, which will fail other
210  * competing contexts to send their mailboxes.
211  */
212 static enum _ecore_status_t ecore_mcp_mb_lock(struct ecore_hwfn *p_hwfn,
213                                               u32 cmd)
214 {
215         OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
216
217         /* The spinlock shouldn't be acquired when the mailbox command is
218          * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel
219          * pending [UN]LOAD_REQ command of another PF together with a spinlock
220          * (i.e. interrupts are disabled) - can lead to a deadlock.
221          * It is assumed that for a single PF, no other mailbox commands can be
222          * sent from another context while sending LOAD_REQ, and that any
223          * parallel commands to UNLOAD_REQ can be cancelled.
224          */
225         if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE)
226                 p_hwfn->mcp_info->block_mb_sending = false;
227
228         if (p_hwfn->mcp_info->block_mb_sending) {
229                 DP_NOTICE(p_hwfn, false,
230                           "Trying to send a MFW mailbox command [0x%x]"
231                           " in parallel to [UN]LOAD_REQ. Aborting.\n",
232                           cmd);
233                 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
234                 return ECORE_BUSY;
235         }
236
237         if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) {
238                 p_hwfn->mcp_info->block_mb_sending = true;
239                 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
240         }
241
242         return ECORE_SUCCESS;
243 }
244
245 static void ecore_mcp_mb_unlock(struct ecore_hwfn *p_hwfn, u32 cmd)
246 {
247         if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
248                 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
249 }
250
251 enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
252                                      struct ecore_ptt *p_ptt)
253 {
254         u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
255         u32 delay = CHIP_MCP_RESP_ITER_US;
256         u32 org_mcp_reset_seq, cnt = 0;
257         enum _ecore_status_t rc = ECORE_SUCCESS;
258
259 #ifndef ASIC_ONLY
260         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
261                 delay = EMUL_MCP_RESP_ITER_US;
262 #endif
263
264         /* Ensure that only a single thread is accessing the mailbox at a
265          * certain time.
266          */
267         rc = ecore_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
268         if (rc != ECORE_SUCCESS)
269                 return rc;
270
271         /* Set drv command along with the updated sequence */
272         org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
273         DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
274
275         do {
276                 /* Wait for MFW response */
277                 OSAL_UDELAY(delay);
278                 /* Give the FW up to 500 second (50*1000*10usec) */
279         } while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt,
280                                                 MISCS_REG_GENERIC_POR_0)) &&
281                  (cnt++ < ECORE_MCP_RESET_RETRIES));
282
283         if (org_mcp_reset_seq !=
284             ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
285                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
286                            "MCP was reset after %d usec\n", cnt * delay);
287         } else {
288                 DP_ERR(p_hwfn, "Failed to reset MCP\n");
289                 rc = ECORE_AGAIN;
290         }
291
292         ecore_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
293
294         return rc;
295 }
296
297 static enum _ecore_status_t ecore_do_mcp_cmd(struct ecore_hwfn *p_hwfn,
298                                              struct ecore_ptt *p_ptt,
299                                              u32 cmd, u32 param,
300                                              u32 *o_mcp_resp,
301                                              u32 *o_mcp_param)
302 {
303         u32 delay = CHIP_MCP_RESP_ITER_US;
304         u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
305         u32 seq, cnt = 1, actual_mb_seq;
306         enum _ecore_status_t rc = ECORE_SUCCESS;
307
308 #ifndef ASIC_ONLY
309         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
310                 delay = EMUL_MCP_RESP_ITER_US;
311         /* There is a built-in delay of 100usec in each MFW response read */
312         if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
313                 max_retries /= 10;
314 #endif
315
316         /* Get actual driver mailbox sequence */
317         actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
318             DRV_MSG_SEQ_NUMBER_MASK;
319
320         /* Use MCP history register to check if MCP reset occurred between
321          * init time and now.
322          */
323         if (p_hwfn->mcp_info->mcp_hist !=
324             ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
325                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Rereading MCP offsets\n");
326                 ecore_load_mcp_offsets(p_hwfn, p_ptt);
327                 ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
328         }
329         seq = ++p_hwfn->mcp_info->drv_mb_seq;
330
331         /* Set drv param */
332         DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
333
334         /* Set drv command along with the updated sequence */
335         DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
336
337         do {
338                 /* Wait for MFW response */
339                 OSAL_UDELAY(delay);
340                 *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
341
342                 /* Give the FW up to 5 second (500*10ms) */
343         } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
344                  (cnt++ < max_retries));
345
346         /* Is this a reply to our command? */
347         if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
348                 *o_mcp_resp &= FW_MSG_CODE_MASK;
349                 /* Get the MCP param */
350                 *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
351         } else {
352                 /* FW BUG! */
353                 DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
354                        cmd, param);
355                 *o_mcp_resp = 0;
356                 rc = ECORE_AGAIN;
357                 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
358         }
359         return rc;
360 }
361
362 static enum _ecore_status_t
363 ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
364                         struct ecore_ptt *p_ptt,
365                         struct ecore_mcp_mb_params *p_mb_params)
366 {
367         u32 union_data_addr;
368         enum _ecore_status_t rc;
369
370         /* MCP not initialized */
371         if (!ecore_mcp_is_init(p_hwfn)) {
372                 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
373                 return ECORE_BUSY;
374         }
375
376         union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
377                           OFFSETOF(struct public_drv_mb, union_data);
378
379         /* Ensure that only a single thread is accessing the mailbox at a
380          * certain time.
381          */
382         rc = ecore_mcp_mb_lock(p_hwfn, p_mb_params->cmd);
383         if (rc != ECORE_SUCCESS)
384                 return rc;
385
386         if (p_mb_params->p_data_src != OSAL_NULL)
387                 ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr,
388                                 p_mb_params->p_data_src,
389                                 sizeof(*p_mb_params->p_data_src));
390
391         rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
392                               p_mb_params->param, &p_mb_params->mcp_resp,
393                               &p_mb_params->mcp_param);
394
395         if (p_mb_params->p_data_dst != OSAL_NULL)
396                 ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
397                                   union_data_addr,
398                                   sizeof(*p_mb_params->p_data_dst));
399
400         ecore_mcp_mb_unlock(p_hwfn, p_mb_params->cmd);
401
402         return rc;
403 }
404
405 enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
406                                    struct ecore_ptt *p_ptt, u32 cmd, u32 param,
407                                    u32 *o_mcp_resp, u32 *o_mcp_param)
408 {
409         struct ecore_mcp_mb_params mb_params;
410         enum _ecore_status_t rc;
411
412 #ifndef ASIC_ONLY
413         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
414                 if (cmd == DRV_MSG_CODE_UNLOAD_REQ) {
415                         loaded--;
416                         loaded_port[p_hwfn->port_id]--;
417                         DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n",
418                                    loaded);
419                 }
420                 return ECORE_SUCCESS;
421         }
422 #endif
423
424         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
425         mb_params.cmd = cmd;
426         mb_params.param = param;
427         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
428         if (rc != ECORE_SUCCESS)
429                 return rc;
430
431         *o_mcp_resp = mb_params.mcp_resp;
432         *o_mcp_param = mb_params.mcp_param;
433
434         return ECORE_SUCCESS;
435 }
436
437 enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
438                                           struct ecore_ptt *p_ptt,
439                                           u32 cmd,
440                                           u32 param,
441                                           u32 *o_mcp_resp,
442                                           u32 *o_mcp_param,
443                                           u32 i_txn_size, u32 *i_buf)
444 {
445         struct ecore_mcp_mb_params mb_params;
446         union drv_union_data union_data;
447         enum _ecore_status_t rc;
448
449         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
450         mb_params.cmd = cmd;
451         mb_params.param = param;
452         OSAL_MEMCPY((u32 *)&union_data.raw_data, i_buf, i_txn_size);
453         mb_params.p_data_src = &union_data;
454         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
455         if (rc != ECORE_SUCCESS)
456                 return rc;
457
458         *o_mcp_resp = mb_params.mcp_resp;
459         *o_mcp_param = mb_params.mcp_param;
460
461         return ECORE_SUCCESS;
462 }
463
464 enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
465                                           struct ecore_ptt *p_ptt,
466                                           u32 cmd,
467                                           u32 param,
468                                           u32 *o_mcp_resp,
469                                           u32 *o_mcp_param,
470                                           u32 *o_txn_size, u32 *o_buf)
471 {
472         struct ecore_mcp_mb_params mb_params;
473         union drv_union_data union_data;
474         enum _ecore_status_t rc;
475
476         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
477         mb_params.cmd = cmd;
478         mb_params.param = param;
479         mb_params.p_data_dst = &union_data;
480         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
481         if (rc != ECORE_SUCCESS)
482                 return rc;
483
484         *o_mcp_resp = mb_params.mcp_resp;
485         *o_mcp_param = mb_params.mcp_param;
486
487         *o_txn_size = *o_mcp_param;
488         OSAL_MEMCPY(o_buf, (u32 *)&union_data.raw_data, *o_txn_size);
489
490         return ECORE_SUCCESS;
491 }
492
493 #ifndef ASIC_ONLY
494 static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
495                                     u32 *p_load_code)
496 {
497         static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
498
499         if (!loaded)
500                 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
501         else if (!loaded_port[p_hwfn->port_id])
502                 load_phase = FW_MSG_CODE_DRV_LOAD_PORT;
503         else
504                 load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
505
506         /* On CMT, always tell that it's engine */
507         if (p_hwfn->p_dev->num_hwfns > 1)
508                 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
509
510         *p_load_code = load_phase;
511         loaded++;
512         loaded_port[p_hwfn->port_id]++;
513
514         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
515                    "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n",
516                    *p_load_code, loaded, p_hwfn->port_id,
517                    loaded_port[p_hwfn->port_id]);
518 }
519 #endif
520
521 static bool ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role)
522 {
523         return (drv_role == DRV_ROLE_OS &&
524                 exist_drv_role == DRV_ROLE_PREBOOT) ||
525                (drv_role == DRV_ROLE_KDUMP && exist_drv_role == DRV_ROLE_OS);
526 }
527
528 static enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn,
529                                                       struct ecore_ptt *p_ptt)
530 {
531         u32 resp = 0, param = 0;
532         enum _ecore_status_t rc;
533
534         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
535                            &resp, &param);
536         if (rc != ECORE_SUCCESS)
537                 DP_NOTICE(p_hwfn, false,
538                           "Failed to send cancel load request, rc = %d\n", rc);
539
540         return rc;
541 }
542
543 #define CONFIG_ECORE_L2_BITMAP_IDX      (0x1 << 0)
544 #define CONFIG_ECORE_SRIOV_BITMAP_IDX   (0x1 << 1)
545 #define CONFIG_ECORE_ROCE_BITMAP_IDX    (0x1 << 2)
546 #define CONFIG_ECORE_IWARP_BITMAP_IDX   (0x1 << 3)
547 #define CONFIG_ECORE_FCOE_BITMAP_IDX    (0x1 << 4)
548 #define CONFIG_ECORE_ISCSI_BITMAP_IDX   (0x1 << 5)
549 #define CONFIG_ECORE_LL2_BITMAP_IDX     (0x1 << 6)
550
551 static u32 ecore_get_config_bitmap(void)
552 {
553         u32 config_bitmap = 0x0;
554
555 #ifdef CONFIG_ECORE_L2
556         config_bitmap |= CONFIG_ECORE_L2_BITMAP_IDX;
557 #endif
558 #ifdef CONFIG_ECORE_SRIOV
559         config_bitmap |= CONFIG_ECORE_SRIOV_BITMAP_IDX;
560 #endif
561 #ifdef CONFIG_ECORE_ROCE
562         config_bitmap |= CONFIG_ECORE_ROCE_BITMAP_IDX;
563 #endif
564 #ifdef CONFIG_ECORE_IWARP
565         config_bitmap |= CONFIG_ECORE_IWARP_BITMAP_IDX;
566 #endif
567 #ifdef CONFIG_ECORE_FCOE
568         config_bitmap |= CONFIG_ECORE_FCOE_BITMAP_IDX;
569 #endif
570 #ifdef CONFIG_ECORE_ISCSI
571         config_bitmap |= CONFIG_ECORE_ISCSI_BITMAP_IDX;
572 #endif
573 #ifdef CONFIG_ECORE_LL2
574         config_bitmap |= CONFIG_ECORE_LL2_BITMAP_IDX;
575 #endif
576
577         return config_bitmap;
578 }
579
580 struct ecore_load_req_in_params {
581         u8 hsi_ver;
582 #define ECORE_LOAD_REQ_HSI_VER_DEFAULT  0
583 #define ECORE_LOAD_REQ_HSI_VER_1        1
584         u32 drv_ver_0;
585         u32 drv_ver_1;
586         u32 fw_ver;
587         u8 drv_role;
588         u8 timeout_val;
589         u8 force_cmd;
590         bool avoid_eng_reset;
591 };
592
593 struct ecore_load_req_out_params {
594         u32 load_code;
595         u32 exist_drv_ver_0;
596         u32 exist_drv_ver_1;
597         u32 exist_fw_ver;
598         u8 exist_drv_role;
599         u8 mfw_hsi_ver;
600         bool drv_exists;
601 };
602
603 static enum _ecore_status_t
604 __ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
605                      struct ecore_load_req_in_params *p_in_params,
606                      struct ecore_load_req_out_params *p_out_params)
607 {
608         union drv_union_data union_data_src, union_data_dst;
609         struct ecore_mcp_mb_params mb_params;
610         struct load_req_stc *p_load_req;
611         struct load_rsp_stc *p_load_rsp;
612         u32 hsi_ver;
613         enum _ecore_status_t rc;
614
615         p_load_req = &union_data_src.load_req;
616         OSAL_MEM_ZERO(p_load_req, sizeof(*p_load_req));
617         p_load_req->drv_ver_0 = p_in_params->drv_ver_0;
618         p_load_req->drv_ver_1 = p_in_params->drv_ver_1;
619         p_load_req->fw_ver = p_in_params->fw_ver;
620         ECORE_MFW_SET_FIELD(p_load_req->misc0, LOAD_REQ_ROLE,
621                             p_in_params->drv_role);
622         ECORE_MFW_SET_FIELD(p_load_req->misc0, LOAD_REQ_LOCK_TO,
623                             p_in_params->timeout_val);
624         ECORE_MFW_SET_FIELD(p_load_req->misc0, LOAD_REQ_FORCE,
625                             p_in_params->force_cmd);
626         ECORE_MFW_SET_FIELD(p_load_req->misc0, LOAD_REQ_FLAGS0,
627                             p_in_params->avoid_eng_reset);
628
629         hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ?
630                   DRV_ID_MCP_HSI_VER_CURRENT :
631                   (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT);
632
633         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
634         mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
635         mb_params.param = PDA_COMP | hsi_ver | p_hwfn->p_dev->drv_type;
636         mb_params.p_data_src = &union_data_src;
637         mb_params.p_data_dst = &union_data_dst;
638
639         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
640                    "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
641                    mb_params.param,
642                    ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
643                    ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
644                    ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
645                    ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
646
647         if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1)
648                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
649                            "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
650                            p_load_req->drv_ver_0, p_load_req->drv_ver_1,
651                            p_load_req->fw_ver, p_load_req->misc0,
652                            ECORE_MFW_GET_FIELD(p_load_req->misc0,
653                                                LOAD_REQ_ROLE),
654                            ECORE_MFW_GET_FIELD(p_load_req->misc0,
655                                                LOAD_REQ_LOCK_TO),
656                            ECORE_MFW_GET_FIELD(p_load_req->misc0,
657                                                LOAD_REQ_FORCE),
658                            ECORE_MFW_GET_FIELD(p_load_req->misc0,
659                                                LOAD_REQ_FLAGS0));
660
661         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
662         if (rc != ECORE_SUCCESS) {
663                 DP_NOTICE(p_hwfn, false,
664                           "Failed to send load request, rc = %d\n", rc);
665                 return rc;
666         }
667
668         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
669                    "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
670         p_out_params->load_code = mb_params.mcp_resp;
671
672         if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
673             p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
674                 p_load_rsp = &union_data_dst.load_rsp;
675                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
676                            "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
677                            p_load_rsp->drv_ver_0, p_load_rsp->drv_ver_1,
678                            p_load_rsp->fw_ver, p_load_rsp->misc0,
679                            ECORE_MFW_GET_FIELD(p_load_rsp->misc0,
680                                                LOAD_RSP_ROLE),
681                            ECORE_MFW_GET_FIELD(p_load_rsp->misc0,
682                                                LOAD_RSP_HSI),
683                            ECORE_MFW_GET_FIELD(p_load_rsp->misc0,
684                                                LOAD_RSP_FLAGS0));
685
686                 p_out_params->exist_drv_ver_0 = p_load_rsp->drv_ver_0;
687                 p_out_params->exist_drv_ver_1 = p_load_rsp->drv_ver_1;
688                 p_out_params->exist_fw_ver = p_load_rsp->fw_ver;
689                 p_out_params->exist_drv_role =
690                         ECORE_MFW_GET_FIELD(p_load_rsp->misc0, LOAD_RSP_ROLE);
691                 p_out_params->mfw_hsi_ver =
692                         ECORE_MFW_GET_FIELD(p_load_rsp->misc0, LOAD_RSP_HSI);
693                 p_out_params->drv_exists =
694                         ECORE_MFW_GET_FIELD(p_load_rsp->misc0,
695                                             LOAD_RSP_FLAGS0) &
696                         LOAD_RSP_FLAGS0_DRV_EXISTS;
697         }
698
699         return ECORE_SUCCESS;
700 }
701
702 static enum _ecore_status_t eocre_get_mfw_drv_role(struct ecore_hwfn *p_hwfn,
703                                                    enum ecore_drv_role drv_role,
704                                                    u8 *p_mfw_drv_role)
705 {
706         switch (drv_role) {
707         case ECORE_DRV_ROLE_OS:
708                 *p_mfw_drv_role = DRV_ROLE_OS;
709                 break;
710         case ECORE_DRV_ROLE_KDUMP:
711                 *p_mfw_drv_role = DRV_ROLE_KDUMP;
712                 break;
713         default:
714                 DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role);
715                 return ECORE_INVAL;
716         }
717
718         return ECORE_SUCCESS;
719 }
720
721 enum ecore_load_req_force {
722         ECORE_LOAD_REQ_FORCE_NONE,
723         ECORE_LOAD_REQ_FORCE_PF,
724         ECORE_LOAD_REQ_FORCE_ALL,
725 };
726
727 static enum _ecore_status_t
728 ecore_get_mfw_force_cmd(struct ecore_hwfn *p_hwfn,
729                         enum ecore_load_req_force force_cmd,
730                         u8 *p_mfw_force_cmd)
731 {
732         switch (force_cmd) {
733         case ECORE_LOAD_REQ_FORCE_NONE:
734                 *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
735                 break;
736         case ECORE_LOAD_REQ_FORCE_PF:
737                 *p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
738                 break;
739         case ECORE_LOAD_REQ_FORCE_ALL:
740                 *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
741                 break;
742         default:
743                 DP_ERR(p_hwfn, "Unexpected force value %d\n", force_cmd);
744                 return ECORE_INVAL;
745         }
746
747         return ECORE_SUCCESS;
748 }
749
750 enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
751                                         struct ecore_ptt *p_ptt,
752                                         struct ecore_load_req_params *p_params)
753 {
754         struct ecore_load_req_out_params out_params;
755         struct ecore_load_req_in_params in_params;
756         u8 mfw_drv_role, mfw_force_cmd;
757         enum _ecore_status_t rc;
758
759 #ifndef ASIC_ONLY
760         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
761                 ecore_mcp_mf_workaround(p_hwfn, &p_params->load_code);
762                 return ECORE_SUCCESS;
763         }
764 #endif
765
766         OSAL_MEM_ZERO(&in_params, sizeof(in_params));
767         in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT;
768         in_params.drv_ver_0 = ECORE_VERSION;
769         in_params.drv_ver_1 = ecore_get_config_bitmap();
770         in_params.fw_ver = STORM_FW_VERSION;
771         rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
772         if (rc != ECORE_SUCCESS)
773                 return rc;
774
775         in_params.drv_role = mfw_drv_role;
776         in_params.timeout_val = p_params->timeout_val;
777         rc = ecore_get_mfw_force_cmd(p_hwfn, ECORE_LOAD_REQ_FORCE_NONE,
778                                      &mfw_force_cmd);
779         if (rc != ECORE_SUCCESS)
780                 return rc;
781
782         in_params.force_cmd = mfw_force_cmd;
783         in_params.avoid_eng_reset = p_params->avoid_eng_reset;
784
785         OSAL_MEM_ZERO(&out_params, sizeof(out_params));
786         rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
787         if (rc != ECORE_SUCCESS)
788                 return rc;
789
790         /* First handle cases where another load request should/might be sent:
791          * - MFW expects the old interface [HSI version = 1]
792          * - MFW responds that a force load request is required
793          */
794         if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
795                 DP_INFO(p_hwfn,
796                         "MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n");
797
798                 /* The previous load request set the mailbox blocking */
799                 p_hwfn->mcp_info->block_mb_sending = false;
800
801                 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1;
802                 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
803                 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
804                                           &out_params);
805                 if (rc != ECORE_SUCCESS)
806                         return rc;
807         } else if (out_params.load_code ==
808                    FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
809                 /* The previous load request set the mailbox blocking */
810                 p_hwfn->mcp_info->block_mb_sending = false;
811
812                 if (ecore_mcp_can_force_load(in_params.drv_role,
813                                              out_params.exist_drv_role)) {
814                         DP_INFO(p_hwfn,
815                                 "A force load is required [existing: role %d, fw_ver 0x%08x, drv_ver 0x%08x_0x%08x]. Sending a force load request.\n",
816                                 out_params.exist_drv_role,
817                                 out_params.exist_fw_ver,
818                                 out_params.exist_drv_ver_0,
819                                 out_params.exist_drv_ver_1);
820
821                         rc = ecore_get_mfw_force_cmd(p_hwfn,
822                                                      ECORE_LOAD_REQ_FORCE_ALL,
823                                                      &mfw_force_cmd);
824                         if (rc != ECORE_SUCCESS)
825                                 return rc;
826
827                         in_params.force_cmd = mfw_force_cmd;
828                         OSAL_MEM_ZERO(&out_params, sizeof(out_params));
829                         rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
830                                                   &out_params);
831                         if (rc != ECORE_SUCCESS)
832                                 return rc;
833                 } else {
834                         DP_NOTICE(p_hwfn, false,
835                                   "A force load is required [existing: role %d, fw_ver 0x%08x, drv_ver 0x%08x_0x%08x]. Avoiding to prevent disruption of active PFs.\n",
836                                   out_params.exist_drv_role,
837                                   out_params.exist_fw_ver,
838                                   out_params.exist_drv_ver_0,
839                                   out_params.exist_drv_ver_1);
840
841                         ecore_mcp_cancel_load_req(p_hwfn, p_ptt);
842                         return ECORE_BUSY;
843                 }
844         }
845
846         /* Now handle the other types of responses.
847          * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
848          * expected here after the additional revised load requests were sent.
849          */
850         switch (out_params.load_code) {
851         case FW_MSG_CODE_DRV_LOAD_ENGINE:
852         case FW_MSG_CODE_DRV_LOAD_PORT:
853         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
854                 if (out_params.mfw_hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
855                     out_params.drv_exists) {
856                         /* The role and fw/driver version match, but the PF is
857                          * already loaded and has not been unloaded gracefully.
858                          * This is unexpected since a quasi-FLR request was
859                          * previously sent as part of ecore_hw_prepare().
860                          */
861                         DP_NOTICE(p_hwfn, false,
862                                   "PF is already loaded - shouldn't have got here since a quasi-FLR request was previously sent!\n");
863                         return ECORE_INVAL;
864                 }
865                 break;
866         case FW_MSG_CODE_DRV_LOAD_REFUSED_PDA:
867         case FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG:
868         case FW_MSG_CODE_DRV_LOAD_REFUSED_HSI:
869         case FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT:
870                 DP_NOTICE(p_hwfn, false,
871                           "MFW refused a load request [resp 0x%08x]. Aborting.\n",
872                           out_params.load_code);
873                 return ECORE_BUSY;
874         default:
875                 DP_NOTICE(p_hwfn, false,
876                           "Unexpected response to load request [resp 0x%08x]. Aborting.\n",
877                           out_params.load_code);
878                 break;
879         }
880
881         p_params->load_code = out_params.load_code;
882
883         return ECORE_SUCCESS;
884 }
885
886 static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
887                                     struct ecore_ptt *p_ptt)
888 {
889         u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
890                                         PUBLIC_PATH);
891         u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
892         u32 path_addr = SECTION_ADDR(mfw_path_offsize,
893                                      ECORE_PATH_ID(p_hwfn));
894         u32 disabled_vfs[VF_MAX_STATIC / 32];
895         int i;
896
897         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
898                    "Reading Disabled VF information from [offset %08x],"
899                    " path_addr %08x\n",
900                    mfw_path_offsize, path_addr);
901
902         for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
903                 disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,
904                                            path_addr +
905                                            OFFSETOF(struct public_path,
906                                                     mcp_vf_disabled) +
907                                            sizeof(u32) * i);
908                 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
909                            "FLR-ed VFs [%08x,...,%08x] - %08x\n",
910                            i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
911         }
912
913         if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs))
914                 OSAL_VF_FLR_UPDATE(p_hwfn);
915 }
916
917 enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
918                                           struct ecore_ptt *p_ptt,
919                                           u32 *vfs_to_ack)
920 {
921         u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
922                                         PUBLIC_FUNC);
923         u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);
924         u32 func_addr = SECTION_ADDR(mfw_func_offsize,
925                                      MCP_PF_ID(p_hwfn));
926         struct ecore_mcp_mb_params mb_params;
927         union drv_union_data union_data;
928         enum _ecore_status_t rc;
929         int i;
930
931         for (i = 0; i < (VF_MAX_STATIC / 32); i++)
932                 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
933                            "Acking VFs [%08x,...,%08x] - %08x\n",
934                            i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
935
936         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
937         mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
938         OSAL_MEMCPY(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8);
939         mb_params.p_data_src = &union_data;
940         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt,
941                                      &mb_params);
942         if (rc != ECORE_SUCCESS) {
943                 DP_NOTICE(p_hwfn, false,
944                           "Failed to pass ACK for VF flr to MFW\n");
945                 return ECORE_TIMEOUT;
946         }
947
948         /* TMP - clear the ACK bits; should be done by MFW */
949         for (i = 0; i < (VF_MAX_STATIC / 32); i++)
950                 ecore_wr(p_hwfn, p_ptt,
951                          func_addr +
952                          OFFSETOF(struct public_func, drv_ack_vf_disabled) +
953                          i * sizeof(u32), 0);
954
955         return rc;
956 }
957
958 static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
959                                                 struct ecore_ptt *p_ptt)
960 {
961         u32 transceiver_state;
962
963         transceiver_state = ecore_rd(p_hwfn, p_ptt,
964                                      p_hwfn->mcp_info->port_addr +
965                                      OFFSETOF(struct public_port,
966                                               transceiver_data));
967
968         DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP),
969                    "Received transceiver state update [0x%08x] from mfw"
970                    " [Addr 0x%x]\n",
971                    transceiver_state, (u32)(p_hwfn->mcp_info->port_addr +
972                                             OFFSETOF(struct public_port,
973                                                      transceiver_data)));
974
975         transceiver_state = GET_FIELD(transceiver_state, ETH_TRANSCEIVER_STATE);
976
977         if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
978                 DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
979         else
980                 DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
981 }
982
983 static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
984                                          struct ecore_ptt *p_ptt,
985                                          bool b_reset)
986 {
987         struct ecore_mcp_link_state *p_link;
988         u8 max_bw, min_bw;
989         u32 status = 0;
990
991         p_link = &p_hwfn->mcp_info->link_output;
992         OSAL_MEMSET(p_link, 0, sizeof(*p_link));
993         if (!b_reset) {
994                 status = ecore_rd(p_hwfn, p_ptt,
995                                   p_hwfn->mcp_info->port_addr +
996                                   OFFSETOF(struct public_port, link_status));
997                 DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP),
998                            "Received link update [0x%08x] from mfw"
999                            " [Addr 0x%x]\n",
1000                            status, (u32)(p_hwfn->mcp_info->port_addr +
1001                                           OFFSETOF(struct public_port,
1002                                                    link_status)));
1003         } else {
1004                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1005                            "Resetting link indications\n");
1006                 return;
1007         }
1008
1009         if (p_hwfn->b_drv_link_init)
1010                 p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1011         else
1012                 p_link->link_up = false;
1013
1014         p_link->full_duplex = true;
1015         switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
1016         case LINK_STATUS_SPEED_AND_DUPLEX_100G:
1017                 p_link->speed = 100000;
1018                 break;
1019         case LINK_STATUS_SPEED_AND_DUPLEX_50G:
1020                 p_link->speed = 50000;
1021                 break;
1022         case LINK_STATUS_SPEED_AND_DUPLEX_40G:
1023                 p_link->speed = 40000;
1024                 break;
1025         case LINK_STATUS_SPEED_AND_DUPLEX_25G:
1026                 p_link->speed = 25000;
1027                 break;
1028         case LINK_STATUS_SPEED_AND_DUPLEX_20G:
1029                 p_link->speed = 20000;
1030                 break;
1031         case LINK_STATUS_SPEED_AND_DUPLEX_10G:
1032                 p_link->speed = 10000;
1033                 break;
1034         case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
1035                 p_link->full_duplex = false;
1036                 /* Fall-through */
1037         case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
1038                 p_link->speed = 1000;
1039                 break;
1040         default:
1041                 p_link->speed = 0;
1042         }
1043
1044         /* We never store total line speed as p_link->speed is
1045          * again changes according to bandwidth allocation.
1046          */
1047         if (p_link->link_up && p_link->speed)
1048                 p_link->line_speed = p_link->speed;
1049         else
1050                 p_link->line_speed = 0;
1051
1052         max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
1053         min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
1054
1055         /* Max bandwidth configuration */
1056         __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
1057                                            p_link, max_bw);
1058
1059         /* Mintz bandwidth configuration */
1060         __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
1061                                            p_link, min_bw);
1062         ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev,
1063                                               p_link->min_pf_rate);
1064
1065         p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
1066         p_link->an_complete = !!(status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
1067         p_link->parallel_detection = !!(status &
1068                                          LINK_STATUS_PARALLEL_DETECTION_USED);
1069         p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
1070
1071         p_link->partner_adv_speed |=
1072             (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
1073             ECORE_LINK_PARTNER_SPEED_1G_FD : 0;
1074         p_link->partner_adv_speed |=
1075             (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
1076             ECORE_LINK_PARTNER_SPEED_1G_HD : 0;
1077         p_link->partner_adv_speed |=
1078             (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
1079             ECORE_LINK_PARTNER_SPEED_10G : 0;
1080         p_link->partner_adv_speed |=
1081             (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
1082             ECORE_LINK_PARTNER_SPEED_20G : 0;
1083         p_link->partner_adv_speed |=
1084             (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
1085             ECORE_LINK_PARTNER_SPEED_25G : 0;
1086         p_link->partner_adv_speed |=
1087             (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
1088             ECORE_LINK_PARTNER_SPEED_40G : 0;
1089         p_link->partner_adv_speed |=
1090             (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
1091             ECORE_LINK_PARTNER_SPEED_50G : 0;
1092         p_link->partner_adv_speed |=
1093             (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
1094             ECORE_LINK_PARTNER_SPEED_100G : 0;
1095
1096         p_link->partner_tx_flow_ctrl_en =
1097             !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
1098         p_link->partner_rx_flow_ctrl_en =
1099             !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
1100
1101         switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
1102         case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
1103                 p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE;
1104                 break;
1105         case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
1106                 p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE;
1107                 break;
1108         case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
1109                 p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE;
1110                 break;
1111         default:
1112                 p_link->partner_adv_pause = 0;
1113         }
1114
1115         p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
1116
1117         OSAL_LINK_UPDATE(p_hwfn);
1118 }
1119
1120 enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
1121                                         struct ecore_ptt *p_ptt, bool b_up)
1122 {
1123         struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
1124         struct ecore_mcp_mb_params mb_params;
1125         union drv_union_data union_data;
1126         struct eth_phy_cfg *p_phy_cfg;
1127         enum _ecore_status_t rc = ECORE_SUCCESS;
1128         u32 cmd;
1129
1130 #ifndef ASIC_ONLY
1131         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
1132                 return ECORE_SUCCESS;
1133 #endif
1134
1135         /* Set the shmem configuration according to params */
1136         p_phy_cfg = &union_data.drv_phy_cfg;
1137         OSAL_MEMSET(p_phy_cfg, 0, sizeof(*p_phy_cfg));
1138         cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
1139         if (!params->speed.autoneg)
1140                 p_phy_cfg->speed = params->speed.forced_speed;
1141         p_phy_cfg->pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
1142         p_phy_cfg->pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
1143         p_phy_cfg->pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1144         p_phy_cfg->adv_speed = params->speed.advertised_speeds;
1145         p_phy_cfg->loopback_mode = params->loopback_mode;
1146         p_hwfn->b_drv_link_init = b_up;
1147
1148         if (b_up)
1149                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1150                            "Configuring Link: Speed 0x%08x, Pause 0x%08x,"
1151                            " adv_speed 0x%08x, loopback 0x%08x\n",
1152                            p_phy_cfg->speed, p_phy_cfg->pause,
1153                            p_phy_cfg->adv_speed, p_phy_cfg->loopback_mode);
1154         else
1155                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
1156
1157         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1158         mb_params.cmd = cmd;
1159         mb_params.p_data_src = &union_data;
1160         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1161
1162         /* if mcp fails to respond we must abort */
1163         if (rc != ECORE_SUCCESS) {
1164                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1165                 return rc;
1166         }
1167
1168         /* Reset the link status if needed */
1169         if (!b_up)
1170                 ecore_mcp_handle_link_change(p_hwfn, p_ptt, true);
1171
1172         return rc;
1173 }
1174
1175 u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
1176                                    struct ecore_ptt *p_ptt)
1177 {
1178         u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
1179
1180         /* TODO - Add support for VFs */
1181         if (IS_VF(p_hwfn->p_dev))
1182                 return ECORE_INVAL;
1183
1184         path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1185                                                  PUBLIC_PATH);
1186         path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr);
1187         path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn));
1188
1189         proc_kill_cnt = ecore_rd(p_hwfn, p_ptt,
1190                                  path_addr +
1191                                  OFFSETOF(struct public_path, process_kill)) &
1192             PROCESS_KILL_COUNTER_MASK;
1193
1194         return proc_kill_cnt;
1195 }
1196
1197 static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn,
1198                                           struct ecore_ptt *p_ptt)
1199 {
1200         struct ecore_dev *p_dev = p_hwfn->p_dev;
1201         u32 proc_kill_cnt;
1202
1203         /* Prevent possible attentions/interrupts during the recovery handling
1204          * and till its load phase, during which they will be re-enabled.
1205          */
1206         ecore_int_igu_disable_int(p_hwfn, p_ptt);
1207
1208         DP_NOTICE(p_hwfn, false, "Received a process kill indication\n");
1209
1210         /* The following operations should be done once, and thus in CMT mode
1211          * are carried out by only the first HW function.
1212          */
1213         if (p_hwfn != ECORE_LEADING_HWFN(p_dev))
1214                 return;
1215
1216         if (p_dev->recov_in_prog) {
1217                 DP_NOTICE(p_hwfn, false,
1218                           "Ignoring the indication since a recovery"
1219                           " process is already in progress\n");
1220                 return;
1221         }
1222
1223         p_dev->recov_in_prog = true;
1224
1225         proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt);
1226         DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt);
1227
1228         OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn);
1229 }
1230
1231 static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
1232                                           struct ecore_ptt *p_ptt,
1233                                           enum MFW_DRV_MSG_TYPE type)
1234 {
1235         enum ecore_mcp_protocol_type stats_type;
1236         union ecore_mcp_protocol_stats stats;
1237         struct ecore_mcp_mb_params mb_params;
1238         union drv_union_data union_data;
1239         u32 hsi_param;
1240         enum _ecore_status_t rc;
1241
1242         switch (type) {
1243         case MFW_DRV_MSG_GET_LAN_STATS:
1244                 stats_type = ECORE_MCP_LAN_STATS;
1245                 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1246                 break;
1247         default:
1248                 DP_INFO(p_hwfn, "Invalid protocol type %d\n", type);
1249                 return;
1250         }
1251
1252         OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats);
1253
1254         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1255         mb_params.cmd = DRV_MSG_CODE_GET_STATS;
1256         mb_params.param = hsi_param;
1257         OSAL_MEMCPY(&union_data, &stats, sizeof(stats));
1258         mb_params.p_data_src = &union_data;
1259         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1260         if (rc != ECORE_SUCCESS)
1261                 DP_ERR(p_hwfn, "Failed to send protocol stats, rc = %d\n", rc);
1262 }
1263
1264 static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
1265                                     struct public_func *p_shmem_info)
1266 {
1267         struct ecore_mcp_function_info *p_info;
1268
1269         p_info = &p_hwfn->mcp_info->func_info;
1270
1271         /* TODO - bandwidth min/max should have valid values of 1-100,
1272          * as well as some indication that the feature is disabled.
1273          * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS
1274          * limit and correct value to min `1' and max `100' if limit isn't in
1275          * range.
1276          */
1277         p_info->bandwidth_min = (p_shmem_info->config &
1278                                  FUNC_MF_CFG_MIN_BW_MASK) >>
1279             FUNC_MF_CFG_MIN_BW_SHIFT;
1280         if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1281                 DP_INFO(p_hwfn,
1282                         "bandwidth minimum out of bounds [%02x]. Set to 1\n",
1283                         p_info->bandwidth_min);
1284                 p_info->bandwidth_min = 1;
1285         }
1286
1287         p_info->bandwidth_max = (p_shmem_info->config &
1288                                  FUNC_MF_CFG_MAX_BW_MASK) >>
1289             FUNC_MF_CFG_MAX_BW_SHIFT;
1290         if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1291                 DP_INFO(p_hwfn,
1292                         "bandwidth maximum out of bounds [%02x]. Set to 100\n",
1293                         p_info->bandwidth_max);
1294                 p_info->bandwidth_max = 100;
1295         }
1296 }
1297
1298 static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
1299                                     struct ecore_ptt *p_ptt,
1300                                     struct public_func *p_data,
1301                                     int pfid)
1302 {
1303         u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1304                                         PUBLIC_FUNC);
1305         u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1306         u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1307         u32 i, size;
1308
1309         OSAL_MEM_ZERO(p_data, sizeof(*p_data));
1310
1311         size = OSAL_MIN_T(u32, sizeof(*p_data),
1312                           SECTION_SIZE(mfw_path_offsize));
1313         for (i = 0; i < size / sizeof(u32); i++)
1314                 ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
1315                                               func_addr + (i << 2));
1316
1317         return size;
1318 }
1319
1320 static void
1321 ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1322 {
1323         struct ecore_mcp_function_info *p_info;
1324         struct public_func shmem_info;
1325         u32 resp = 0, param = 0;
1326
1327         ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1328
1329         ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1330
1331         p_info = &p_hwfn->mcp_info->func_info;
1332
1333         ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min);
1334
1335         ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max);
1336
1337         /* Acknowledge the MFW */
1338         ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1339                       &param);
1340 }
1341
1342 static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn,
1343                                          struct ecore_ptt *p_ptt)
1344 {
1345         /* A single notification should be sent to upper driver in CMT mode */
1346         if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1347                 return;
1348
1349         DP_NOTICE(p_hwfn, false,
1350                   "Fan failure was detected on the network interface card"
1351                   " and it's going to be shut down.\n");
1352
1353         ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
1354 }
1355
1356 static enum _ecore_status_t
1357 ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1358                     u32 mdump_cmd, union drv_union_data *p_data_src,
1359                     union drv_union_data *p_data_dst, u32 *p_mcp_resp)
1360 {
1361         struct ecore_mcp_mb_params mb_params;
1362         enum _ecore_status_t rc;
1363
1364         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1365         mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
1366         mb_params.param = mdump_cmd;
1367         mb_params.p_data_src = p_data_src;
1368         mb_params.p_data_dst = p_data_dst;
1369         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1370         if (rc != ECORE_SUCCESS)
1371                 return rc;
1372
1373         *p_mcp_resp = mb_params.mcp_resp;
1374         if (*p_mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
1375                 DP_NOTICE(p_hwfn, false,
1376                           "MFW claims that the mdump command is illegal [mdump_cmd 0x%x]\n",
1377                           mdump_cmd);
1378                 rc = ECORE_INVAL;
1379         }
1380
1381         return rc;
1382 }
1383
1384 static enum _ecore_status_t ecore_mcp_mdump_ack(struct ecore_hwfn *p_hwfn,
1385                                                 struct ecore_ptt *p_ptt)
1386 {
1387         u32 mcp_resp;
1388
1389         return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_ACK,
1390                                    OSAL_NULL, OSAL_NULL, &mcp_resp);
1391 }
1392
1393 enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn,
1394                                                 struct ecore_ptt *p_ptt,
1395                                                 u32 epoch)
1396 {
1397         union drv_union_data union_data;
1398         u32 mcp_resp;
1399
1400         OSAL_MEMCPY(&union_data.raw_data, &epoch, sizeof(epoch));
1401
1402         return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_SET_VALUES,
1403                                    &union_data, OSAL_NULL, &mcp_resp);
1404 }
1405
1406 enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
1407                                              struct ecore_ptt *p_ptt)
1408 {
1409         u32 mcp_resp;
1410
1411         p_hwfn->p_dev->mdump_en = true;
1412
1413         return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_TRIGGER,
1414                                    OSAL_NULL, OSAL_NULL, &mcp_resp);
1415 }
1416
1417 static enum _ecore_status_t
1418 ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1419                            struct mdump_config_stc *p_mdump_config)
1420 {
1421         union drv_union_data union_data;
1422         u32 mcp_resp;
1423         enum _ecore_status_t rc;
1424
1425         rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_GET_CONFIG,
1426                                  OSAL_NULL, &union_data, &mcp_resp);
1427         if (rc != ECORE_SUCCESS)
1428                 return rc;
1429
1430         if (mcp_resp == FW_MSG_CODE_UNSUPPORTED)
1431                 return ECORE_NOTIMPL;
1432
1433         if (mcp_resp != FW_MSG_CODE_OK) {
1434                 DP_NOTICE(p_hwfn, false,
1435                           "Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n",
1436                           mcp_resp);
1437                 rc = ECORE_UNKNOWN_ERROR;
1438         }
1439
1440         OSAL_MEMCPY(p_mdump_config, &union_data.mdump_config,
1441                     sizeof(*p_mdump_config));
1442
1443         return rc;
1444 }
1445
1446 enum _ecore_status_t
1447 ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1448                          struct ecore_mdump_info *p_mdump_info)
1449 {
1450         u32 addr, global_offsize, global_addr;
1451         struct mdump_config_stc mdump_config;
1452         enum _ecore_status_t rc;
1453
1454         OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info));
1455
1456         addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1457                                     PUBLIC_GLOBAL);
1458         global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1459         global_addr = SECTION_ADDR(global_offsize, 0);
1460         p_mdump_info->reason = ecore_rd(p_hwfn, p_ptt,
1461                                         global_addr +
1462                                         OFFSETOF(struct public_global,
1463                                                  mdump_reason));
1464
1465         if (p_mdump_info->reason) {
1466                 rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config);
1467                 if (rc != ECORE_SUCCESS)
1468                         return rc;
1469
1470                 p_mdump_info->version = mdump_config.version;
1471                 p_mdump_info->config = mdump_config.config;
1472                 p_mdump_info->epoch = mdump_config.epoc;
1473                 p_mdump_info->num_of_logs = mdump_config.num_of_logs;
1474                 p_mdump_info->valid_logs = mdump_config.valid_logs;
1475
1476                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1477                            "MFW mdump info: reason %d, version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n",
1478                            p_mdump_info->reason, p_mdump_info->version,
1479                            p_mdump_info->config, p_mdump_info->epoch,
1480                            p_mdump_info->num_of_logs, p_mdump_info->valid_logs);
1481         } else {
1482                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1483                            "MFW mdump info: reason %d\n", p_mdump_info->reason);
1484         }
1485
1486         return ECORE_SUCCESS;
1487 }
1488
1489 enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
1490                                                 struct ecore_ptt *p_ptt)
1491 {
1492         u32 mcp_resp;
1493
1494         return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_CLEAR_LOGS,
1495                                    OSAL_NULL, OSAL_NULL, &mcp_resp);
1496 }
1497
1498 static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
1499                                             struct ecore_ptt *p_ptt)
1500 {
1501         /* In CMT mode - no need for more than a single acknowledgment to the
1502          * MFW, and no more than a single notification to the upper driver.
1503          */
1504         if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1505                 return;
1506
1507         DP_NOTICE(p_hwfn, false,
1508                   "Received a critical error notification from the MFW!\n");
1509
1510         if (p_hwfn->p_dev->mdump_en) {
1511                 DP_NOTICE(p_hwfn, false,
1512                           "Not acknowledging the notification to allow the MFW crash dump\n");
1513                 p_hwfn->p_dev->mdump_en = false;
1514                 return;
1515         }
1516
1517         ecore_mcp_mdump_ack(p_hwfn, p_ptt);
1518         ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
1519 }
1520
1521 enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
1522                                              struct ecore_ptt *p_ptt)
1523 {
1524         struct ecore_mcp_info *info = p_hwfn->mcp_info;
1525         enum _ecore_status_t rc = ECORE_SUCCESS;
1526         bool found = false;
1527         u16 i;
1528
1529         DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n");
1530
1531         /* Read Messages from MFW */
1532         ecore_mcp_read_mb(p_hwfn, p_ptt);
1533
1534         /* Compare current messages to old ones */
1535         for (i = 0; i < info->mfw_mb_length; i++) {
1536                 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
1537                         continue;
1538
1539                 found = true;
1540
1541                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1542                            "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
1543                            i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
1544
1545                 switch (i) {
1546                 case MFW_DRV_MSG_LINK_CHANGE:
1547                         ecore_mcp_handle_link_change(p_hwfn, p_ptt, false);
1548                         break;
1549                 case MFW_DRV_MSG_VF_DISABLED:
1550                         ecore_mcp_handle_vf_flr(p_hwfn, p_ptt);
1551                         break;
1552                 case MFW_DRV_MSG_LLDP_DATA_UPDATED:
1553                         ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1554                                                     ECORE_DCBX_REMOTE_LLDP_MIB);
1555                         break;
1556                 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
1557                         ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1558                                                     ECORE_DCBX_REMOTE_MIB);
1559                         break;
1560                 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
1561                         ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1562                                                     ECORE_DCBX_OPERATIONAL_MIB);
1563                         break;
1564                 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
1565                         ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
1566                         break;
1567                 case MFW_DRV_MSG_ERROR_RECOVERY:
1568                         ecore_mcp_handle_process_kill(p_hwfn, p_ptt);
1569                         break;
1570                 case MFW_DRV_MSG_GET_LAN_STATS:
1571                 case MFW_DRV_MSG_GET_FCOE_STATS:
1572                 case MFW_DRV_MSG_GET_ISCSI_STATS:
1573                 case MFW_DRV_MSG_GET_RDMA_STATS:
1574                         ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
1575                         break;
1576                 case MFW_DRV_MSG_BW_UPDATE:
1577                         ecore_mcp_update_bw(p_hwfn, p_ptt);
1578                         break;
1579                 case MFW_DRV_MSG_FAILURE_DETECTED:
1580                         ecore_mcp_handle_fan_failure(p_hwfn, p_ptt);
1581                         break;
1582                 case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
1583                         ecore_mcp_handle_critical_error(p_hwfn, p_ptt);
1584                         break;
1585                 default:
1586                         DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
1587                         rc = ECORE_INVAL;
1588                 }
1589         }
1590
1591         /* ACK everything */
1592         for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
1593                 OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]);
1594
1595                 /* MFW expect answer in BE, so we force write in that format */
1596                 ecore_wr(p_hwfn, p_ptt,
1597                          info->mfw_mb_addr + sizeof(u32) +
1598                          MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
1599                          sizeof(u32) + i * sizeof(u32), val);
1600         }
1601
1602         if (!found) {
1603                 DP_NOTICE(p_hwfn, false,
1604                           "Received an MFW message indication but no"
1605                           " new message!\n");
1606                 rc = ECORE_INVAL;
1607         }
1608
1609         /* Copy the new mfw messages into the shadow */
1610         OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
1611
1612         return rc;
1613 }
1614
1615 enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
1616                                            struct ecore_ptt *p_ptt,
1617                                            u32 *p_mfw_ver,
1618                                            u32 *p_running_bundle_id)
1619 {
1620         u32 global_offsize;
1621
1622 #ifndef ASIC_ONLY
1623         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1624                 DP_NOTICE(p_hwfn, false, "Emulation - can't get MFW version\n");
1625                 return ECORE_SUCCESS;
1626         }
1627 #endif
1628
1629         if (IS_VF(p_hwfn->p_dev)) {
1630                 if (p_hwfn->vf_iov_info) {
1631                         struct pfvf_acquire_resp_tlv *p_resp;
1632
1633                         p_resp = &p_hwfn->vf_iov_info->acquire_resp;
1634                         *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
1635                         return ECORE_SUCCESS;
1636                 } else {
1637                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1638                                    "VF requested MFW version prior to ACQUIRE\n");
1639                         return ECORE_INVAL;
1640                 }
1641         }
1642
1643         global_offsize = ecore_rd(p_hwfn, p_ptt,
1644                                   SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
1645                                                        public_base,
1646                                                        PUBLIC_GLOBAL));
1647         *p_mfw_ver =
1648             ecore_rd(p_hwfn, p_ptt,
1649                      SECTION_ADDR(global_offsize,
1650                                   0) + OFFSETOF(struct public_global, mfw_ver));
1651
1652         if (p_running_bundle_id != OSAL_NULL) {
1653                 *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt,
1654                                                 SECTION_ADDR(global_offsize,
1655                                                              0) +
1656                                                 OFFSETOF(struct public_global,
1657                                                          running_bundle_id));
1658         }
1659
1660         return ECORE_SUCCESS;
1661 }
1662
1663 enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
1664                                               u32 *p_media_type)
1665 {
1666         struct ecore_hwfn *p_hwfn = &p_dev->hwfns[0];
1667         struct ecore_ptt *p_ptt;
1668
1669         /* TODO - Add support for VFs */
1670         if (IS_VF(p_dev))
1671                 return ECORE_INVAL;
1672
1673         if (!ecore_mcp_is_init(p_hwfn)) {
1674                 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
1675                 return ECORE_BUSY;
1676         }
1677
1678         *p_media_type = MEDIA_UNSPECIFIED;
1679
1680         p_ptt = ecore_ptt_acquire(p_hwfn);
1681         if (!p_ptt)
1682                 return ECORE_BUSY;
1683
1684         *p_media_type = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1685                                  OFFSETOF(struct public_port, media_type));
1686
1687         ecore_ptt_release(p_hwfn, p_ptt);
1688
1689         return ECORE_SUCCESS;
1690 }
1691
1692 /* @DPDK */
1693 /* Old MFW has a global configuration for all PFs regarding RDMA support */
1694 static void
1695 ecore_mcp_get_shmem_proto_legacy(struct ecore_hwfn *p_hwfn,
1696                                  enum ecore_pci_personality *p_proto)
1697 {
1698         *p_proto = ECORE_PCI_ETH;
1699
1700         DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
1701                    "According to Legacy capabilities, L2 personality is %08x\n",
1702                    (u32)*p_proto);
1703 }
1704
1705 /* @DPDK */
1706 static enum _ecore_status_t
1707 ecore_mcp_get_shmem_proto_mfw(struct ecore_hwfn *p_hwfn,
1708                               struct ecore_ptt *p_ptt,
1709                               enum ecore_pci_personality *p_proto)
1710 {
1711         u32 resp = 0, param = 0;
1712         enum _ecore_status_t rc;
1713
1714         DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
1715                    "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
1716                    (u32)*p_proto, resp, param);
1717         return ECORE_SUCCESS;
1718 }
1719
1720 static enum _ecore_status_t
1721 ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn,
1722                           struct public_func *p_info,
1723                           struct ecore_ptt *p_ptt,
1724                           enum ecore_pci_personality *p_proto)
1725 {
1726         enum _ecore_status_t rc = ECORE_SUCCESS;
1727
1728         switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
1729         case FUNC_MF_CFG_PROTOCOL_ETHERNET:
1730                 if (ecore_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto) !=
1731                     ECORE_SUCCESS)
1732                         ecore_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
1733                 break;
1734         default:
1735                 rc = ECORE_INVAL;
1736         }
1737
1738         return rc;
1739 }
1740
1741 enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
1742                                                     struct ecore_ptt *p_ptt)
1743 {
1744         struct ecore_mcp_function_info *info;
1745         struct public_func shmem_info;
1746
1747         ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1748         info = &p_hwfn->mcp_info->func_info;
1749
1750         info->pause_on_host = (shmem_info.config &
1751                                FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
1752
1753         if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
1754                                       &info->protocol)) {
1755                 DP_ERR(p_hwfn, "Unknown personality %08x\n",
1756                        (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
1757                 return ECORE_INVAL;
1758         }
1759
1760         ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1761
1762         if (shmem_info.mac_upper || shmem_info.mac_lower) {
1763                 info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
1764                 info->mac[1] = (u8)(shmem_info.mac_upper);
1765                 info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
1766                 info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
1767                 info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
1768                 info->mac[5] = (u8)(shmem_info.mac_lower);
1769         } else {
1770                 /* TODO - are there protocols for which there's no MAC? */
1771                 DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n");
1772         }
1773
1774         /* TODO - are these calculations true for BE machine? */
1775         info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
1776                          (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
1777         info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
1778                          (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
1779
1780         info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
1781
1782         info->mtu = (u16)shmem_info.mtu_size;
1783
1784         if (info->mtu == 0)
1785                 info->mtu = 1500;
1786
1787         info->mtu = (u16)shmem_info.mtu_size;
1788
1789         DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP),
1790                    "Read configuration from shmem: pause_on_host %02x"
1791                     " protocol %02x BW [%02x - %02x]"
1792                     " MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %lx"
1793                     " node %lx ovlan %04x\n",
1794                    info->pause_on_host, info->protocol,
1795                    info->bandwidth_min, info->bandwidth_max,
1796                    info->mac[0], info->mac[1], info->mac[2],
1797                    info->mac[3], info->mac[4], info->mac[5],
1798                    (unsigned long)info->wwn_port,
1799                    (unsigned long)info->wwn_node, info->ovlan);
1800
1801         return ECORE_SUCCESS;
1802 }
1803
1804 struct ecore_mcp_link_params
1805 *ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn)
1806 {
1807         if (!p_hwfn || !p_hwfn->mcp_info)
1808                 return OSAL_NULL;
1809         return &p_hwfn->mcp_info->link_input;
1810 }
1811
1812 struct ecore_mcp_link_state
1813 *ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn)
1814 {
1815         if (!p_hwfn || !p_hwfn->mcp_info)
1816                 return OSAL_NULL;
1817
1818 #ifndef ASIC_ONLY
1819         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
1820                 DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n");
1821                 p_hwfn->mcp_info->link_output.link_up = true;
1822         }
1823 #endif
1824
1825         return &p_hwfn->mcp_info->link_output;
1826 }
1827
1828 struct ecore_mcp_link_capabilities
1829 *ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn)
1830 {
1831         if (!p_hwfn || !p_hwfn->mcp_info)
1832                 return OSAL_NULL;
1833         return &p_hwfn->mcp_info->link_capabilities;
1834 }
1835
1836 enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
1837                                      struct ecore_ptt *p_ptt)
1838 {
1839         u32 resp = 0, param = 0;
1840         enum _ecore_status_t rc;
1841
1842         rc = ecore_mcp_cmd(p_hwfn, p_ptt,
1843                            DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, &param);
1844
1845         /* Wait for the drain to complete before returning */
1846         OSAL_MSLEEP(1020);
1847
1848         return rc;
1849 }
1850
1851 const struct ecore_mcp_function_info
1852 *ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn)
1853 {
1854         if (!p_hwfn || !p_hwfn->mcp_info)
1855                 return OSAL_NULL;
1856         return &p_hwfn->mcp_info->func_info;
1857 }
1858
1859 enum _ecore_status_t ecore_mcp_nvm_command(struct ecore_hwfn *p_hwfn,
1860                                            struct ecore_ptt *p_ptt,
1861                                            struct ecore_mcp_nvm_params *params)
1862 {
1863         enum _ecore_status_t rc;
1864
1865         switch (params->type) {
1866         case ECORE_MCP_NVM_RD:
1867                 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1868                                           params->nvm_common.offset,
1869                                           &params->nvm_common.resp,
1870                                           &params->nvm_common.param,
1871                                           params->nvm_rd.buf_size,
1872                                           params->nvm_rd.buf);
1873                 break;
1874         case ECORE_MCP_CMD:
1875                 rc = ecore_mcp_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1876                                    params->nvm_common.offset,
1877                                    &params->nvm_common.resp,
1878                                    &params->nvm_common.param);
1879                 break;
1880         case ECORE_MCP_NVM_WR:
1881                 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1882                                           params->nvm_common.offset,
1883                                           &params->nvm_common.resp,
1884                                           &params->nvm_common.param,
1885                                           params->nvm_wr.buf_size,
1886                                           params->nvm_wr.buf);
1887                 break;
1888         default:
1889                 rc = ECORE_NOTIMPL;
1890                 break;
1891         }
1892         return rc;
1893 }
1894
1895 int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
1896                                   struct ecore_ptt *p_ptt, u32 personalities)
1897 {
1898         enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT;
1899         struct public_func shmem_info;
1900         int i, count = 0, num_pfs;
1901
1902         num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
1903
1904         for (i = 0; i < num_pfs; i++) {
1905                 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1906                                          MCP_PF_ID_BY_REL(p_hwfn, i));
1907                 if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
1908                         continue;
1909
1910                 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
1911                                               &protocol) !=
1912                     ECORE_SUCCESS)
1913                         continue;
1914
1915                 if ((1 << ((u32)protocol)) & personalities)
1916                         count++;
1917         }
1918
1919         return count;
1920 }
1921
1922 enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
1923                                               struct ecore_ptt *p_ptt,
1924                                               u32 *p_flash_size)
1925 {
1926         u32 flash_size;
1927
1928 #ifndef ASIC_ONLY
1929         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1930                 DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n");
1931                 return ECORE_INVAL;
1932         }
1933 #endif
1934
1935         if (IS_VF(p_hwfn->p_dev))
1936                 return ECORE_INVAL;
1937
1938         flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
1939         flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
1940             MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
1941         flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
1942
1943         *p_flash_size = flash_size;
1944
1945         return ECORE_SUCCESS;
1946 }
1947
1948 enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
1949                                                   struct ecore_ptt *p_ptt)
1950 {
1951         struct ecore_dev *p_dev = p_hwfn->p_dev;
1952
1953         if (p_dev->recov_in_prog) {
1954                 DP_NOTICE(p_hwfn, false,
1955                           "Avoid triggering a recovery since such a process"
1956                           " is already in progress\n");
1957                 return ECORE_AGAIN;
1958         }
1959
1960         DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n");
1961         ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
1962
1963         return ECORE_SUCCESS;
1964 }
1965
1966 enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
1967                                               struct ecore_ptt *p_ptt,
1968                                               u8 vf_id, u8 num)
1969 {
1970         u32 resp = 0, param = 0, rc_param = 0;
1971         enum _ecore_status_t rc;
1972
1973 /* Only Leader can configure MSIX, and need to take CMT into account */
1974
1975         if (!IS_LEAD_HWFN(p_hwfn))
1976                 return ECORE_SUCCESS;
1977         num *= p_hwfn->p_dev->num_hwfns;
1978
1979         param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
1980             DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
1981         param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
1982             DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
1983
1984         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
1985                            &resp, &rc_param);
1986
1987         if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
1988                 DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n",
1989                           vf_id);
1990                 rc = ECORE_INVAL;
1991         } else {
1992                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1993                            "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
1994                             num, vf_id);
1995         }
1996
1997         return rc;
1998 }
1999
2000 enum _ecore_status_t
2001 ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2002                            struct ecore_mcp_drv_version *p_ver)
2003 {
2004         struct drv_version_stc *p_drv_version;
2005         struct ecore_mcp_mb_params mb_params;
2006         union drv_union_data union_data;
2007         u32 num_words, i;
2008         void *p_name;
2009         OSAL_BE32 val;
2010         enum _ecore_status_t rc;
2011
2012 #ifndef ASIC_ONLY
2013         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
2014                 return ECORE_SUCCESS;
2015 #endif
2016
2017         p_drv_version = &union_data.drv_version;
2018         p_drv_version->version = p_ver->version;
2019         num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4;
2020         for (i = 0; i < num_words; i++) {
2021                 /* The driver name is expected to be in a big-endian format */
2022                 p_name = &p_ver->name[i * sizeof(u32)];
2023                 val = OSAL_CPU_TO_BE32(*(u32 *)p_name);
2024                 *(u32 *)&p_drv_version->name[i * sizeof(u32)] = val;
2025         }
2026
2027         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2028         mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
2029         mb_params.p_data_src = &union_data;
2030         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2031         if (rc != ECORE_SUCCESS)
2032                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2033
2034         return rc;
2035 }
2036
2037 enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
2038                                     struct ecore_ptt *p_ptt)
2039 {
2040         enum _ecore_status_t rc;
2041         u32 resp = 0, param = 0;
2042
2043         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2044                            &param);
2045         if (rc != ECORE_SUCCESS)
2046                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2047
2048         return rc;
2049 }
2050
2051 enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
2052                                       struct ecore_ptt *p_ptt)
2053 {
2054         u32 value, cpu_mode;
2055
2056         ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
2057
2058         value = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2059         value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2060         ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
2061         cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2062
2063         return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -1 : 0;
2064 }
2065
2066 enum _ecore_status_t
2067 ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
2068                                    struct ecore_ptt *p_ptt,
2069                                    enum ecore_ov_client client)
2070 {
2071         enum _ecore_status_t rc;
2072         u32 resp = 0, param = 0;
2073         u32 drv_mb_param;
2074
2075         switch (client) {
2076         case ECORE_OV_CLIENT_DRV:
2077                 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
2078                 break;
2079         case ECORE_OV_CLIENT_USER:
2080                 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
2081                 break;
2082         case ECORE_OV_CLIENT_VENDOR_SPEC:
2083                 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
2084                 break;
2085         default:
2086                 DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", client);
2087                 return ECORE_INVAL;
2088         }
2089
2090         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
2091                            drv_mb_param, &resp, &param);
2092         if (rc != ECORE_SUCCESS)
2093                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2094
2095         return rc;
2096 }
2097
2098 enum _ecore_status_t
2099 ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
2100                                  struct ecore_ptt *p_ptt,
2101                                  enum ecore_ov_driver_state drv_state)
2102 {
2103         enum _ecore_status_t rc;
2104         u32 resp = 0, param = 0;
2105         u32 drv_mb_param;
2106
2107         switch (drv_state) {
2108         case ECORE_OV_DRIVER_STATE_NOT_LOADED:
2109                 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
2110                 break;
2111         case ECORE_OV_DRIVER_STATE_DISABLED:
2112                 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
2113                 break;
2114         case ECORE_OV_DRIVER_STATE_ACTIVE:
2115                 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
2116                 break;
2117         default:
2118                 DP_NOTICE(p_hwfn, true, "Invalid driver state %d\n", drv_state);
2119                 return ECORE_INVAL;
2120         }
2121
2122         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
2123                            drv_mb_param, &resp, &param);
2124         if (rc != ECORE_SUCCESS)
2125                 DP_ERR(p_hwfn, "Failed to send driver state\n");
2126
2127         return rc;
2128 }
2129
2130 enum _ecore_status_t
2131 ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2132                          struct ecore_fc_npiv_tbl *p_table)
2133 {
2134         return 0;
2135 }
2136
2137 enum _ecore_status_t
2138 ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn,
2139                         struct ecore_ptt *p_ptt, u16 mtu)
2140 {
2141         return 0;
2142 }
2143
2144 enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
2145                                        struct ecore_ptt *p_ptt,
2146                                        enum ecore_led_mode mode)
2147 {
2148         u32 resp = 0, param = 0, drv_mb_param;
2149         enum _ecore_status_t rc;
2150
2151         switch (mode) {
2152         case ECORE_LED_MODE_ON:
2153                 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
2154                 break;
2155         case ECORE_LED_MODE_OFF:
2156                 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
2157                 break;
2158         case ECORE_LED_MODE_RESTORE:
2159                 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
2160                 break;
2161         default:
2162                 DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode);
2163                 return ECORE_INVAL;
2164         }
2165
2166         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
2167                            drv_mb_param, &resp, &param);
2168         if (rc != ECORE_SUCCESS)
2169                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2170
2171         return rc;
2172 }
2173
2174 enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
2175                                              struct ecore_ptt *p_ptt,
2176                                              u32 mask_parities)
2177 {
2178         enum _ecore_status_t rc;
2179         u32 resp = 0, param = 0;
2180
2181         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
2182                            mask_parities, &resp, &param);
2183
2184         if (rc != ECORE_SUCCESS) {
2185                 DP_ERR(p_hwfn,
2186                        "MCP response failure for mask parities, aborting\n");
2187         } else if (resp != FW_MSG_CODE_OK) {
2188                 DP_ERR(p_hwfn,
2189                        "MCP did not ack mask parity request. Old MFW?\n");
2190                 rc = ECORE_INVAL;
2191         }
2192
2193         return rc;
2194 }
2195
2196 enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
2197                                         u8 *p_buf, u32 len)
2198 {
2199         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2200         u32 bytes_left, offset, bytes_to_copy, buf_size;
2201         struct ecore_mcp_nvm_params params;
2202         struct ecore_ptt *p_ptt;
2203         enum _ecore_status_t rc = ECORE_SUCCESS;
2204
2205         p_ptt = ecore_ptt_acquire(p_hwfn);
2206         if (!p_ptt)
2207                 return ECORE_BUSY;
2208
2209         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2210         bytes_left = len;
2211         offset = 0;
2212         params.type = ECORE_MCP_NVM_RD;
2213         params.nvm_rd.buf_size = &buf_size;
2214         params.nvm_common.cmd = DRV_MSG_CODE_NVM_READ_NVRAM;
2215         while (bytes_left > 0) {
2216                 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
2217                                            MCP_DRV_NVM_BUF_LEN);
2218                 params.nvm_common.offset = (addr + offset) |
2219                     (bytes_to_copy << DRV_MB_PARAM_NVM_LEN_SHIFT);
2220                 params.nvm_rd.buf = (u32 *)(p_buf + offset);
2221                 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2222                 if (rc != ECORE_SUCCESS || (params.nvm_common.resp !=
2223                                             FW_MSG_CODE_NVM_OK)) {
2224                         DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2225                         break;
2226                 }
2227
2228                 /* This can be a lengthy process, and it's possible scheduler
2229                  * isn't preemptible. Sleep a bit to prevent CPU hogging.
2230                  */
2231                 if (bytes_left % 0x1000 <
2232                     (bytes_left - *params.nvm_rd.buf_size) % 0x1000)
2233                         OSAL_MSLEEP(1);
2234
2235                 offset += *params.nvm_rd.buf_size;
2236                 bytes_left -= *params.nvm_rd.buf_size;
2237         }
2238
2239         p_dev->mcp_nvm_resp = params.nvm_common.resp;
2240         ecore_ptt_release(p_hwfn, p_ptt);
2241
2242         return rc;
2243 }
2244
2245 enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
2246                                         u32 addr, u8 *p_buf, u32 len)
2247 {
2248         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2249         struct ecore_mcp_nvm_params params;
2250         struct ecore_ptt *p_ptt;
2251         enum _ecore_status_t rc;
2252
2253         p_ptt = ecore_ptt_acquire(p_hwfn);
2254         if (!p_ptt)
2255                 return ECORE_BUSY;
2256
2257         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2258         params.type = ECORE_MCP_NVM_RD;
2259         params.nvm_rd.buf_size = &len;
2260         params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_READ) ?
2261             DRV_MSG_CODE_PHY_CORE_READ : DRV_MSG_CODE_PHY_RAW_READ;
2262         params.nvm_common.offset = addr;
2263         params.nvm_rd.buf = (u32 *)p_buf;
2264         rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2265         if (rc != ECORE_SUCCESS)
2266                 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2267
2268         p_dev->mcp_nvm_resp = params.nvm_common.resp;
2269         ecore_ptt_release(p_hwfn, p_ptt);
2270
2271         return rc;
2272 }
2273
2274 enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
2275 {
2276         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2277         struct ecore_mcp_nvm_params params;
2278         struct ecore_ptt *p_ptt;
2279
2280         p_ptt = ecore_ptt_acquire(p_hwfn);
2281         if (!p_ptt)
2282                 return ECORE_BUSY;
2283
2284         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2285         OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp));
2286         ecore_ptt_release(p_hwfn, p_ptt);
2287
2288         return ECORE_SUCCESS;
2289 }
2290
2291 enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr)
2292 {
2293         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2294         struct ecore_mcp_nvm_params params;
2295         struct ecore_ptt *p_ptt;
2296         enum _ecore_status_t rc;
2297
2298         p_ptt = ecore_ptt_acquire(p_hwfn);
2299         if (!p_ptt)
2300                 return ECORE_BUSY;
2301         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2302         params.type = ECORE_MCP_CMD;
2303         params.nvm_common.cmd = DRV_MSG_CODE_NVM_DEL_FILE;
2304         params.nvm_common.offset = addr;
2305         rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2306         p_dev->mcp_nvm_resp = params.nvm_common.resp;
2307         ecore_ptt_release(p_hwfn, p_ptt);
2308
2309         return rc;
2310 }
2311
2312 enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
2313                                                   u32 addr)
2314 {
2315         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2316         struct ecore_mcp_nvm_params params;
2317         struct ecore_ptt *p_ptt;
2318         enum _ecore_status_t rc;
2319
2320         p_ptt = ecore_ptt_acquire(p_hwfn);
2321         if (!p_ptt)
2322                 return ECORE_BUSY;
2323         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2324         params.type = ECORE_MCP_CMD;
2325         params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN;
2326         params.nvm_common.offset = addr;
2327         rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2328         p_dev->mcp_nvm_resp = params.nvm_common.resp;
2329         ecore_ptt_release(p_hwfn, p_ptt);
2330
2331         return rc;
2332 }
2333
2334 /* rc receives ECORE_INVAL as default parameter because
2335  * it might not enter the while loop if the len is 0
2336  */
2337 enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
2338                                          u32 addr, u8 *p_buf, u32 len)
2339 {
2340         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2341         enum _ecore_status_t rc = ECORE_INVAL;
2342         struct ecore_mcp_nvm_params params;
2343         struct ecore_ptt *p_ptt;
2344         u32 buf_idx, buf_size;
2345
2346         p_ptt = ecore_ptt_acquire(p_hwfn);
2347         if (!p_ptt)
2348                 return ECORE_BUSY;
2349
2350         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2351         params.type = ECORE_MCP_NVM_WR;
2352         if (cmd == ECORE_PUT_FILE_DATA)
2353                 params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
2354         else
2355                 params.nvm_common.cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
2356         buf_idx = 0;
2357         while (buf_idx < len) {
2358                 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
2359                                       MCP_DRV_NVM_BUF_LEN);
2360                 params.nvm_common.offset = ((buf_size <<
2361                                              DRV_MB_PARAM_NVM_LEN_SHIFT)
2362                                             | addr) + buf_idx;
2363                 params.nvm_wr.buf_size = buf_size;
2364                 params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
2365                 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2366                 if (rc != ECORE_SUCCESS ||
2367                     ((params.nvm_common.resp != FW_MSG_CODE_NVM_OK) &&
2368                      (params.nvm_common.resp !=
2369                       FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK)))
2370                         DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2371
2372                 /* This can be a lengthy process, and it's possible scheduler
2373                  * isn't preemptible. Sleep a bit to prevent CPU hogging.
2374                  */
2375                 if (buf_idx % 0x1000 >
2376                     (buf_idx + buf_size) % 0x1000)
2377                         OSAL_MSLEEP(1);
2378
2379                 buf_idx += buf_size;
2380         }
2381
2382         p_dev->mcp_nvm_resp = params.nvm_common.resp;
2383         ecore_ptt_release(p_hwfn, p_ptt);
2384
2385         return rc;
2386 }
2387
2388 enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
2389                                          u32 addr, u8 *p_buf, u32 len)
2390 {
2391         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2392         struct ecore_mcp_nvm_params params;
2393         struct ecore_ptt *p_ptt;
2394         enum _ecore_status_t rc;
2395
2396         p_ptt = ecore_ptt_acquire(p_hwfn);
2397         if (!p_ptt)
2398                 return ECORE_BUSY;
2399
2400         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2401         params.type = ECORE_MCP_NVM_WR;
2402         params.nvm_wr.buf_size = len;
2403         params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_WRITE) ?
2404             DRV_MSG_CODE_PHY_CORE_WRITE : DRV_MSG_CODE_PHY_RAW_WRITE;
2405         params.nvm_common.offset = addr;
2406         params.nvm_wr.buf = (u32 *)p_buf;
2407         rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2408         if (rc != ECORE_SUCCESS)
2409                 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2410         p_dev->mcp_nvm_resp = params.nvm_common.resp;
2411         ecore_ptt_release(p_hwfn, p_ptt);
2412
2413         return rc;
2414 }
2415
2416 enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
2417                                                    u32 addr)
2418 {
2419         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2420         struct ecore_mcp_nvm_params params;
2421         struct ecore_ptt *p_ptt;
2422         enum _ecore_status_t rc;
2423
2424         p_ptt = ecore_ptt_acquire(p_hwfn);
2425         if (!p_ptt)
2426                 return ECORE_BUSY;
2427
2428         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2429         params.type = ECORE_MCP_CMD;
2430         params.nvm_common.cmd = DRV_MSG_CODE_SET_SECURE_MODE;
2431         params.nvm_common.offset = addr;
2432         rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2433         p_dev->mcp_nvm_resp = params.nvm_common.resp;
2434         ecore_ptt_release(p_hwfn, p_ptt);
2435
2436         return rc;
2437 }
2438
2439 enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
2440                                             struct ecore_ptt *p_ptt,
2441                                             u32 port, u32 addr, u32 offset,
2442                                             u32 len, u8 *p_buf)
2443 {
2444         struct ecore_mcp_nvm_params params;
2445         enum _ecore_status_t rc;
2446         u32 bytes_left, bytes_to_copy, buf_size;
2447
2448         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2449         params.nvm_common.offset =
2450                 (port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) |
2451                 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT);
2452         addr = offset;
2453         offset = 0;
2454         bytes_left = len;
2455         params.type = ECORE_MCP_NVM_RD;
2456         params.nvm_rd.buf_size = &buf_size;
2457         params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_READ;
2458         while (bytes_left > 0) {
2459                 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
2460                                            MAX_I2C_TRANSACTION_SIZE);
2461                 params.nvm_rd.buf = (u32 *)(p_buf + offset);
2462                 params.nvm_common.offset &=
2463                         (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
2464                          DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
2465                 params.nvm_common.offset |=
2466                         ((addr + offset) <<
2467                          DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT);
2468                 params.nvm_common.offset |=
2469                         (bytes_to_copy << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT);
2470                 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2471                 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
2472                     FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
2473                         return ECORE_NODEV;
2474                 } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
2475                            FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
2476                         return ECORE_UNKNOWN_ERROR;
2477
2478                 offset += *params.nvm_rd.buf_size;
2479                 bytes_left -= *params.nvm_rd.buf_size;
2480         }
2481
2482         return ECORE_SUCCESS;
2483 }
2484
2485 enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
2486                                              struct ecore_ptt *p_ptt,
2487                                              u32 port, u32 addr, u32 offset,
2488                                              u32 len, u8 *p_buf)
2489 {
2490         struct ecore_mcp_nvm_params params;
2491         enum _ecore_status_t rc;
2492         u32 buf_idx, buf_size;
2493
2494         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2495         params.nvm_common.offset =
2496                 (port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) |
2497                 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT);
2498         params.type = ECORE_MCP_NVM_WR;
2499         params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_WRITE;
2500         buf_idx = 0;
2501         while (buf_idx < len) {
2502                 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
2503                                       MAX_I2C_TRANSACTION_SIZE);
2504                 params.nvm_common.offset &=
2505                         (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
2506                          DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
2507                 params.nvm_common.offset |=
2508                         ((offset + buf_idx) <<
2509                          DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT);
2510                 params.nvm_common.offset |=
2511                         (buf_size << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT);
2512                 params.nvm_wr.buf_size = buf_size;
2513                 params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
2514                 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2515                 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
2516                     FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
2517                         return ECORE_NODEV;
2518                 } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
2519                            FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
2520                         return ECORE_UNKNOWN_ERROR;
2521
2522                 buf_idx += buf_size;
2523         }
2524
2525         return ECORE_SUCCESS;
2526 }
2527
2528 enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
2529                                          struct ecore_ptt *p_ptt,
2530                                          u16 gpio, u32 *gpio_val)
2531 {
2532         enum _ecore_status_t rc = ECORE_SUCCESS;
2533         u32 drv_mb_param = 0, rsp;
2534
2535         drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT);
2536
2537         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ,
2538                            drv_mb_param, &rsp, gpio_val);
2539
2540         if (rc != ECORE_SUCCESS)
2541                 return rc;
2542
2543         if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
2544                 return ECORE_UNKNOWN_ERROR;
2545
2546         return ECORE_SUCCESS;
2547 }
2548
2549 enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
2550                                           struct ecore_ptt *p_ptt,
2551                                           u16 gpio, u16 gpio_val)
2552 {
2553         enum _ecore_status_t rc = ECORE_SUCCESS;
2554         u32 drv_mb_param = 0, param, rsp;
2555
2556         drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT) |
2557                 (gpio_val << DRV_MB_PARAM_GPIO_VALUE_SHIFT);
2558
2559         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE,
2560                            drv_mb_param, &rsp, &param);
2561
2562         if (rc != ECORE_SUCCESS)
2563                 return rc;
2564
2565         if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
2566                 return ECORE_UNKNOWN_ERROR;
2567
2568         return ECORE_SUCCESS;
2569 }
2570
2571 enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn,
2572                                          struct ecore_ptt *p_ptt,
2573                                          u16 gpio, u32 *gpio_direction,
2574                                          u32 *gpio_ctrl)
2575 {
2576         u32 drv_mb_param = 0, rsp, val = 0;
2577         enum _ecore_status_t rc = ECORE_SUCCESS;
2578
2579         drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT;
2580
2581         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO,
2582                            drv_mb_param, &rsp, &val);
2583         if (rc != ECORE_SUCCESS)
2584                 return rc;
2585
2586         *gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >>
2587                            DRV_MB_PARAM_GPIO_DIRECTION_SHIFT;
2588         *gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >>
2589                       DRV_MB_PARAM_GPIO_CTRL_SHIFT;
2590
2591         if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
2592                 return ECORE_UNKNOWN_ERROR;
2593
2594         return ECORE_SUCCESS;
2595 }
2596
2597 enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn,
2598                                                   struct ecore_ptt *p_ptt)
2599 {
2600         u32 drv_mb_param = 0, rsp, param;
2601         enum _ecore_status_t rc = ECORE_SUCCESS;
2602
2603         drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
2604                         DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2605
2606         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2607                            drv_mb_param, &rsp, &param);
2608
2609         if (rc != ECORE_SUCCESS)
2610                 return rc;
2611
2612         if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2613             (param != DRV_MB_PARAM_BIST_RC_PASSED))
2614                 rc = ECORE_UNKNOWN_ERROR;
2615
2616         return rc;
2617 }
2618
2619 enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn,
2620                                                struct ecore_ptt *p_ptt)
2621 {
2622         u32 drv_mb_param, rsp, param;
2623         enum _ecore_status_t rc = ECORE_SUCCESS;
2624
2625         drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
2626                         DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2627
2628         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2629                            drv_mb_param, &rsp, &param);
2630
2631         if (rc != ECORE_SUCCESS)
2632                 return rc;
2633
2634         if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2635             (param != DRV_MB_PARAM_BIST_RC_PASSED))
2636                 rc = ECORE_UNKNOWN_ERROR;
2637
2638         return rc;
2639 }
2640
2641 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images(
2642         struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *num_images)
2643 {
2644         u32 drv_mb_param = 0, rsp;
2645         enum _ecore_status_t rc = ECORE_SUCCESS;
2646
2647         drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
2648                         DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2649
2650         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2651                            drv_mb_param, &rsp, num_images);
2652
2653         if (rc != ECORE_SUCCESS)
2654                 return rc;
2655
2656         if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
2657                 rc = ECORE_UNKNOWN_ERROR;
2658
2659         return rc;
2660 }
2661
2662 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att(
2663         struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2664         struct bist_nvm_image_att *p_image_att, u32 image_index)
2665 {
2666         struct ecore_mcp_nvm_params params;
2667         enum _ecore_status_t rc;
2668         u32 buf_size;
2669
2670         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2671         params.nvm_common.offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
2672                                     DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2673         params.nvm_common.offset |= (image_index <<
2674                                     DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT);
2675
2676         params.type = ECORE_MCP_NVM_RD;
2677         params.nvm_rd.buf_size = &buf_size;
2678         params.nvm_common.cmd = DRV_MSG_CODE_BIST_TEST;
2679         params.nvm_rd.buf = (u32 *)p_image_att;
2680
2681         rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2682         if (rc != ECORE_SUCCESS)
2683                 return rc;
2684
2685         if (((params.nvm_common.resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2686             (p_image_att->return_code != 1))
2687                 rc = ECORE_UNKNOWN_ERROR;
2688
2689         return rc;
2690 }
2691
2692 enum _ecore_status_t
2693 ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn,
2694                                struct ecore_ptt *p_ptt,
2695                                struct ecore_temperature_info *p_temp_info)
2696 {
2697         struct ecore_temperature_sensor *p_temp_sensor;
2698         struct temperature_status_stc *p_mfw_temp_info;
2699         struct ecore_mcp_mb_params mb_params;
2700         union drv_union_data union_data;
2701         u32 val;
2702         enum _ecore_status_t rc;
2703         u8 i;
2704
2705         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2706         mb_params.cmd = DRV_MSG_CODE_GET_TEMPERATURE;
2707         mb_params.p_data_dst = &union_data;
2708         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2709         if (rc != ECORE_SUCCESS)
2710                 return rc;
2711
2712         p_mfw_temp_info = &union_data.temp_info;
2713
2714         OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS != MAX_NUM_OF_SENSORS);
2715         p_temp_info->num_sensors = OSAL_MIN_T(u32,
2716                                               p_mfw_temp_info->num_of_sensors,
2717                                               ECORE_MAX_NUM_OF_SENSORS);
2718         for (i = 0; i < p_temp_info->num_sensors; i++) {
2719                 val = p_mfw_temp_info->sensor[i];
2720                 p_temp_sensor = &p_temp_info->sensors[i];
2721                 p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >>
2722                                                  SENSOR_LOCATION_SHIFT;
2723                 p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >>
2724                                                 THRESHOLD_HIGH_SHIFT;
2725                 p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >>
2726                                           CRITICAL_TEMPERATURE_SHIFT;
2727                 p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >>
2728                                               CURRENT_TEMP_SHIFT;
2729         }
2730
2731         return ECORE_SUCCESS;
2732 }
2733
2734 enum _ecore_status_t ecore_mcp_get_mba_versions(
2735         struct ecore_hwfn *p_hwfn,
2736         struct ecore_ptt *p_ptt,
2737         struct ecore_mba_vers *p_mba_vers)
2738 {
2739         struct ecore_mcp_nvm_params params;
2740         enum _ecore_status_t rc;
2741         u32 buf_size;
2742
2743         OSAL_MEM_ZERO(&params, sizeof(params));
2744         params.type = ECORE_MCP_NVM_RD;
2745         params.nvm_common.cmd = DRV_MSG_CODE_GET_MBA_VERSION;
2746         params.nvm_common.offset = 0;
2747         params.nvm_rd.buf = &p_mba_vers->mba_vers[0];
2748         params.nvm_rd.buf_size = &buf_size;
2749         rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2750
2751         if (rc != ECORE_SUCCESS)
2752                 return rc;
2753
2754         if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
2755             FW_MSG_CODE_NVM_OK)
2756                 rc = ECORE_UNKNOWN_ERROR;
2757
2758         if (buf_size != MCP_DRV_NVM_BUF_LEN)
2759                 rc = ECORE_UNKNOWN_ERROR;
2760
2761         return rc;
2762 }
2763
2764 enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
2765                                               struct ecore_ptt *p_ptt,
2766                                               u64 *num_events)
2767 {
2768         u32 rsp;
2769
2770         return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MEM_ECC_EVENTS,
2771                              0, &rsp, (u32 *)num_events);
2772 }
2773
2774 static enum resource_id_enum
2775 ecore_mcp_get_mfw_res_id(enum ecore_resources res_id)
2776 {
2777         enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
2778
2779         switch (res_id) {
2780         case ECORE_SB:
2781                 mfw_res_id = RESOURCE_NUM_SB_E;
2782                 break;
2783         case ECORE_L2_QUEUE:
2784                 mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
2785                 break;
2786         case ECORE_VPORT:
2787                 mfw_res_id = RESOURCE_NUM_VPORT_E;
2788                 break;
2789         case ECORE_RSS_ENG:
2790                 mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
2791                 break;
2792         case ECORE_PQ:
2793                 mfw_res_id = RESOURCE_NUM_PQ_E;
2794                 break;
2795         case ECORE_RL:
2796                 mfw_res_id = RESOURCE_NUM_RL_E;
2797                 break;
2798         case ECORE_MAC:
2799         case ECORE_VLAN:
2800                 /* Each VFC resource can accommodate both a MAC and a VLAN */
2801                 mfw_res_id = RESOURCE_VFC_FILTER_E;
2802                 break;
2803         case ECORE_ILT:
2804                 mfw_res_id = RESOURCE_ILT_E;
2805                 break;
2806         case ECORE_LL2_QUEUE:
2807                 mfw_res_id = RESOURCE_LL2_QUEUE_E;
2808                 break;
2809         case ECORE_RDMA_CNQ_RAM:
2810         case ECORE_CMDQS_CQS:
2811                 /* CNQ/CMDQS are the same resource */
2812                 mfw_res_id = RESOURCE_CQS_E;
2813                 break;
2814         case ECORE_RDMA_STATS_QUEUE:
2815                 mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
2816                 break;
2817         case ECORE_BDQ:
2818                 mfw_res_id = RESOURCE_BDQ_E;
2819                 break;
2820         default:
2821                 break;
2822         }
2823
2824         return mfw_res_id;
2825 }
2826
2827 #define ECORE_RESC_ALLOC_VERSION_MAJOR  2
2828 #define ECORE_RESC_ALLOC_VERSION_MINOR  0
2829 #define ECORE_RESC_ALLOC_VERSION                                \
2830         ((ECORE_RESC_ALLOC_VERSION_MAJOR <<                     \
2831           DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) |    \
2832          (ECORE_RESC_ALLOC_VERSION_MINOR <<                     \
2833           DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
2834
2835 struct ecore_resc_alloc_in_params {
2836         u32 cmd;
2837         enum ecore_resources res_id;
2838         u32 resc_max_val;
2839 };
2840
2841 struct ecore_resc_alloc_out_params {
2842         u32 mcp_resp;
2843         u32 mcp_param;
2844         u32 resc_num;
2845         u32 resc_start;
2846         u32 vf_resc_num;
2847         u32 vf_resc_start;
2848         u32 flags;
2849 };
2850
2851 static enum _ecore_status_t
2852 ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn,
2853                               struct ecore_ptt *p_ptt,
2854                               struct ecore_resc_alloc_in_params *p_in_params,
2855                               struct ecore_resc_alloc_out_params *p_out_params)
2856 {
2857         struct resource_info *p_mfw_resc_info;
2858         struct ecore_mcp_mb_params mb_params;
2859         union drv_union_data union_data;
2860         enum _ecore_status_t rc;
2861
2862         p_mfw_resc_info = &union_data.resource;
2863         OSAL_MEM_ZERO(p_mfw_resc_info, sizeof(*p_mfw_resc_info));
2864
2865         p_mfw_resc_info->res_id = ecore_mcp_get_mfw_res_id(p_in_params->res_id);
2866         if (p_mfw_resc_info->res_id == RESOURCE_NUM_INVALID) {
2867                 DP_ERR(p_hwfn,
2868                        "Failed to match resource %d [%s] with the MFW resources\n",
2869                        p_in_params->res_id,
2870                        ecore_hw_get_resc_name(p_in_params->res_id));
2871                 return ECORE_INVAL;
2872         }
2873
2874         switch (p_in_params->cmd) {
2875         case DRV_MSG_SET_RESOURCE_VALUE_MSG:
2876                 p_mfw_resc_info->size = p_in_params->resc_max_val;
2877                 /* Fallthrough */
2878         case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
2879                 break;
2880         default:
2881                 DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
2882                        p_in_params->cmd);
2883                 return ECORE_INVAL;
2884         }
2885
2886         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2887         mb_params.cmd = p_in_params->cmd;
2888         mb_params.param = ECORE_RESC_ALLOC_VERSION;
2889         mb_params.p_data_src = &union_data;
2890         mb_params.p_data_dst = &union_data;
2891
2892         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2893                    "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
2894                    p_in_params->cmd, p_in_params->res_id,
2895                    ecore_hw_get_resc_name(p_in_params->res_id),
2896                    ECORE_MFW_GET_FIELD(mb_params.param,
2897                            DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
2898                    ECORE_MFW_GET_FIELD(mb_params.param,
2899                            DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
2900                    p_in_params->resc_max_val);
2901
2902         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2903         if (rc != ECORE_SUCCESS)
2904                 return rc;
2905
2906         p_out_params->mcp_resp = mb_params.mcp_resp;
2907         p_out_params->mcp_param = mb_params.mcp_param;
2908         p_out_params->resc_num = p_mfw_resc_info->size;
2909         p_out_params->resc_start = p_mfw_resc_info->offset;
2910         p_out_params->vf_resc_num = p_mfw_resc_info->vf_size;
2911         p_out_params->vf_resc_start = p_mfw_resc_info->vf_offset;
2912         p_out_params->flags = p_mfw_resc_info->flags;
2913
2914         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2915                    "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
2916                    ECORE_MFW_GET_FIELD(p_out_params->mcp_param,
2917                            FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
2918                    ECORE_MFW_GET_FIELD(p_out_params->mcp_param,
2919                            FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
2920                    p_out_params->resc_num, p_out_params->resc_start,
2921                    p_out_params->vf_resc_num, p_out_params->vf_resc_start,
2922                    p_out_params->flags);
2923
2924         return ECORE_SUCCESS;
2925 }
2926
2927 enum _ecore_status_t
2928 ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2929                            enum ecore_resources res_id, u32 resc_max_val,
2930                            u32 *p_mcp_resp)
2931 {
2932         struct ecore_resc_alloc_out_params out_params;
2933         struct ecore_resc_alloc_in_params in_params;
2934         enum _ecore_status_t rc;
2935
2936         OSAL_MEM_ZERO(&in_params, sizeof(in_params));
2937         in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
2938         in_params.res_id = res_id;
2939         in_params.resc_max_val = resc_max_val;
2940         OSAL_MEM_ZERO(&out_params, sizeof(out_params));
2941         rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
2942                                            &out_params);
2943         if (rc != ECORE_SUCCESS)
2944                 return rc;
2945
2946         *p_mcp_resp = out_params.mcp_resp;
2947
2948         return ECORE_SUCCESS;
2949 }
2950
2951 enum _ecore_status_t
2952 ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2953                         enum ecore_resources res_id, u32 *p_mcp_resp,
2954                         u32 *p_resc_num, u32 *p_resc_start)
2955 {
2956         struct ecore_resc_alloc_out_params out_params;
2957         struct ecore_resc_alloc_in_params in_params;
2958         enum _ecore_status_t rc;
2959
2960         OSAL_MEM_ZERO(&in_params, sizeof(in_params));
2961         in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
2962         in_params.res_id = res_id;
2963         OSAL_MEM_ZERO(&out_params, sizeof(out_params));
2964         rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
2965                                            &out_params);
2966         if (rc != ECORE_SUCCESS)
2967                 return rc;
2968
2969         *p_mcp_resp = out_params.mcp_resp;
2970
2971         if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
2972                 *p_resc_num = out_params.resc_num;
2973                 *p_resc_start = out_params.resc_start;
2974         }
2975
2976         return ECORE_SUCCESS;
2977 }
2978
2979 enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn,
2980                                                struct ecore_ptt *p_ptt)
2981 {
2982         u32 mcp_resp, mcp_param;
2983
2984         return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
2985                              &mcp_resp, &mcp_param);
2986 }
2987
2988 static enum _ecore_status_t ecore_mcp_resource_cmd(struct ecore_hwfn *p_hwfn,
2989                                                    struct ecore_ptt *p_ptt,
2990                                                    u32 param, u32 *p_mcp_resp,
2991                                                    u32 *p_mcp_param)
2992 {
2993         enum _ecore_status_t rc;
2994
2995         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
2996                            p_mcp_resp, p_mcp_param);
2997         if (rc != ECORE_SUCCESS)
2998                 return rc;
2999
3000         if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3001                 DP_INFO(p_hwfn,
3002                         "The resource command is unsupported by the MFW\n");
3003                 return ECORE_NOTIMPL;
3004         }
3005
3006         if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
3007                 u8 opcode = ECORE_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
3008
3009                 DP_NOTICE(p_hwfn, false,
3010                           "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
3011                           param, opcode);
3012                 return ECORE_INVAL;
3013         }
3014
3015         return rc;
3016 }
3017
3018 enum _ecore_status_t
3019 __ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3020                       struct ecore_resc_lock_params *p_params)
3021 {
3022         u32 param = 0, mcp_resp, mcp_param;
3023         u8 opcode;
3024         enum _ecore_status_t rc;
3025
3026         switch (p_params->timeout) {
3027         case ECORE_MCP_RESC_LOCK_TO_DEFAULT:
3028                 opcode = RESOURCE_OPCODE_REQ;
3029                 p_params->timeout = 0;
3030                 break;
3031         case ECORE_MCP_RESC_LOCK_TO_NONE:
3032                 opcode = RESOURCE_OPCODE_REQ_WO_AGING;
3033                 p_params->timeout = 0;
3034                 break;
3035         default:
3036                 opcode = RESOURCE_OPCODE_REQ_W_AGING;
3037                 break;
3038         }
3039
3040         ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3041         ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3042         ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
3043
3044         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3045                    "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
3046                    param, p_params->timeout, opcode, p_params->resource);
3047
3048         /* Attempt to acquire the resource */
3049         rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
3050                                     &mcp_param);
3051         if (rc != ECORE_SUCCESS)
3052                 return rc;
3053
3054         /* Analyze the response */
3055         p_params->owner = ECORE_MFW_GET_FIELD(mcp_param,
3056                                              RESOURCE_CMD_RSP_OWNER);
3057         opcode = ECORE_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3058
3059         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3060                    "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
3061                    mcp_param, opcode, p_params->owner);
3062
3063         switch (opcode) {
3064         case RESOURCE_OPCODE_GNT:
3065                 p_params->b_granted = true;
3066                 break;
3067         case RESOURCE_OPCODE_BUSY:
3068                 p_params->b_granted = false;
3069                 break;
3070         default:
3071                 DP_NOTICE(p_hwfn, false,
3072                           "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
3073                           mcp_param, opcode);
3074                 return ECORE_INVAL;
3075         }
3076
3077         return ECORE_SUCCESS;
3078 }
3079
3080 enum _ecore_status_t
3081 ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3082                     struct ecore_resc_lock_params *p_params)
3083 {
3084         u32 retry_cnt = 0;
3085         enum _ecore_status_t rc;
3086
3087         do {
3088                 /* No need for an interval before the first iteration */
3089                 if (retry_cnt) {
3090                         if (p_params->sleep_b4_retry) {
3091                                 u16 retry_interval_in_ms =
3092                                         DIV_ROUND_UP(p_params->retry_interval,
3093                                                      1000);
3094
3095                                 OSAL_MSLEEP(retry_interval_in_ms);
3096                         } else {
3097                                 OSAL_UDELAY(p_params->retry_interval);
3098                         }
3099                 }
3100
3101                 rc = __ecore_mcp_resc_lock(p_hwfn, p_ptt, p_params);
3102                 if (rc != ECORE_SUCCESS)
3103                         return rc;
3104
3105                 if (p_params->b_granted)
3106                         break;
3107         } while (retry_cnt++ < p_params->retry_num);
3108
3109         return ECORE_SUCCESS;
3110 }
3111
3112 enum _ecore_status_t
3113 ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3114                       struct ecore_resc_unlock_params *p_params)
3115 {
3116         u32 param = 0, mcp_resp, mcp_param;
3117         u8 opcode;
3118         enum _ecore_status_t rc;
3119
3120         opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
3121                                    : RESOURCE_OPCODE_RELEASE;
3122         ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3123         ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3124
3125         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3126                    "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
3127                    param, opcode, p_params->resource);
3128
3129         /* Attempt to release the resource */
3130         rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
3131                                     &mcp_param);
3132         if (rc != ECORE_SUCCESS)
3133                 return rc;
3134
3135         /* Analyze the response */
3136         opcode = ECORE_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3137
3138         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3139                    "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
3140                    mcp_param, opcode);
3141
3142         switch (opcode) {
3143         case RESOURCE_OPCODE_RELEASED_PREVIOUS:
3144                 DP_INFO(p_hwfn,
3145                         "Resource unlock request for an already released resource [%d]\n",
3146                         p_params->resource);
3147                 /* Fallthrough */
3148         case RESOURCE_OPCODE_RELEASED:
3149                 p_params->b_released = true;
3150                 break;
3151         case RESOURCE_OPCODE_WRONG_OWNER:
3152                 p_params->b_released = false;
3153                 break;
3154         default:
3155                 DP_NOTICE(p_hwfn, false,
3156                           "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
3157                           mcp_param, opcode);
3158                 return ECORE_INVAL;
3159         }
3160
3161         return ECORE_SUCCESS;
3162 }