net/qede/base: set max values for soft resources
[dpdk.git] / drivers / net / qede / base / ecore_mcp.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "bcm_osal.h"
10 #include "ecore.h"
11 #include "ecore_status.h"
12 #include "ecore_mcp.h"
13 #include "mcp_public.h"
14 #include "reg_addr.h"
15 #include "ecore_hw.h"
16 #include "ecore_init_fw_funcs.h"
17 #include "ecore_sriov.h"
18 #include "ecore_vf.h"
19 #include "ecore_iov_api.h"
20 #include "ecore_gtt_reg_addr.h"
21 #include "ecore_iro.h"
22 #include "ecore_dcbx.h"
23
24 #define CHIP_MCP_RESP_ITER_US 10
25 #define EMUL_MCP_RESP_ITER_US (1000 * 1000)
26
27 #define ECORE_DRV_MB_MAX_RETRIES (500 * 1000)   /* Account for 5 sec */
28 #define ECORE_MCP_RESET_RETRIES (50 * 1000)     /* Account for 500 msec */
29
30 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
31         ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
32                  _val)
33
34 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
35         ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
36
37 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
38         DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
39                      OFFSETOF(struct public_drv_mb, _field), _val)
40
41 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
42         DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
43                      OFFSETOF(struct public_drv_mb, _field))
44
45 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
46         DRV_ID_PDA_COMP_VER_SHIFT)
47
48 #define MCP_BYTES_PER_MBIT_SHIFT 17
49
50 #ifndef ASIC_ONLY
51 static int loaded;
52 static int loaded_port[MAX_NUM_PORTS] = { 0 };
53 #endif
54
55 bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn)
56 {
57         if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
58                 return false;
59         return true;
60 }
61
62 void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
63 {
64         u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
65                                         PUBLIC_PORT);
66         u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr);
67
68         p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
69                                                    MFW_PORT(p_hwfn));
70         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
71                    "port_addr = 0x%x, port_id 0x%02x\n",
72                    p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
73 }
74
75 void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
76 {
77         u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
78         OSAL_BE32 tmp;
79         u32 i;
80
81 #ifndef ASIC_ONLY
82         if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev))
83                 return;
84 #endif
85
86         if (!p_hwfn->mcp_info->public_base)
87                 return;
88
89         for (i = 0; i < length; i++) {
90                 tmp = ecore_rd(p_hwfn, p_ptt,
91                                p_hwfn->mcp_info->mfw_mb_addr +
92                                (i << 2) + sizeof(u32));
93
94                 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
95                     OSAL_BE32_TO_CPU(tmp);
96         }
97 }
98
99 enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
100 {
101         if (p_hwfn->mcp_info) {
102                 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
103                 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
104                 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->lock);
105         }
106         OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
107
108         return ECORE_SUCCESS;
109 }
110
111 static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
112                                                    struct ecore_ptt *p_ptt)
113 {
114         struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
115         u32 drv_mb_offsize, mfw_mb_offsize;
116         u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
117
118 #ifndef ASIC_ONLY
119         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
120                 DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n");
121                 p_info->public_base = 0;
122                 return ECORE_INVAL;
123         }
124 #endif
125
126         p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
127         if (!p_info->public_base)
128                 return ECORE_INVAL;
129
130         p_info->public_base |= GRCBASE_MCP;
131
132         /* Calculate the driver and MFW mailbox address */
133         drv_mb_offsize = ecore_rd(p_hwfn, p_ptt,
134                                   SECTION_OFFSIZE_ADDR(p_info->public_base,
135                                                        PUBLIC_DRV_MB));
136         p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
137         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
138                    "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x"
139                    " mcp_pf_id = 0x%x\n",
140                    drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
141
142         /* Set the MFW MB address */
143         mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
144                                   SECTION_OFFSIZE_ADDR(p_info->public_base,
145                                                        PUBLIC_MFW_MB));
146         p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
147         p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
148                                                p_info->mfw_mb_addr);
149
150         /* Get the current driver mailbox sequence before sending
151          * the first command
152          */
153         p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
154             DRV_MSG_SEQ_NUMBER_MASK;
155
156         /* Get current FW pulse sequence */
157         p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
158             DRV_PULSE_SEQ_MASK;
159
160         p_info->mcp_hist = (u16)ecore_rd(p_hwfn, p_ptt,
161                                           MISCS_REG_GENERIC_POR_0);
162
163         return ECORE_SUCCESS;
164 }
165
166 enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
167                                         struct ecore_ptt *p_ptt)
168 {
169         struct ecore_mcp_info *p_info;
170         u32 size;
171
172         /* Allocate mcp_info structure */
173         p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
174                                        sizeof(*p_hwfn->mcp_info));
175         if (!p_hwfn->mcp_info)
176                 goto err;
177         p_info = p_hwfn->mcp_info;
178
179         if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
180                 DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
181                 /* Do not free mcp_info here, since public_base indicate that
182                  * the MCP is not initialized
183                  */
184                 return ECORE_SUCCESS;
185         }
186
187         size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
188         p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
189         p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
190         if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
191                 goto err;
192
193         /* Initialize the MFW spinlock */
194         OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->lock);
195         OSAL_SPIN_LOCK_INIT(&p_info->lock);
196
197         return ECORE_SUCCESS;
198
199 err:
200         DP_NOTICE(p_hwfn, true, "Failed to allocate mcp memory\n");
201         ecore_mcp_free(p_hwfn);
202         return ECORE_NOMEM;
203 }
204
205 /* Locks the MFW mailbox of a PF to ensure a single access.
206  * The lock is achieved in most cases by holding a spinlock, causing other
207  * threads to wait till a previous access is done.
208  * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single
209  * access is achieved by setting a blocking flag, which will fail other
210  * competing contexts to send their mailboxes.
211  */
212 static enum _ecore_status_t ecore_mcp_mb_lock(struct ecore_hwfn *p_hwfn,
213                                               u32 cmd)
214 {
215         OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
216
217         /* The spinlock shouldn't be acquired when the mailbox command is
218          * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel
219          * pending [UN]LOAD_REQ command of another PF together with a spinlock
220          * (i.e. interrupts are disabled) - can lead to a deadlock.
221          * It is assumed that for a single PF, no other mailbox commands can be
222          * sent from another context while sending LOAD_REQ, and that any
223          * parallel commands to UNLOAD_REQ can be cancelled.
224          */
225         if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE)
226                 p_hwfn->mcp_info->block_mb_sending = false;
227
228         if (p_hwfn->mcp_info->block_mb_sending) {
229                 DP_NOTICE(p_hwfn, false,
230                           "Trying to send a MFW mailbox command [0x%x]"
231                           " in parallel to [UN]LOAD_REQ. Aborting.\n",
232                           cmd);
233                 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
234                 return ECORE_BUSY;
235         }
236
237         if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) {
238                 p_hwfn->mcp_info->block_mb_sending = true;
239                 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
240         }
241
242         return ECORE_SUCCESS;
243 }
244
245 static void ecore_mcp_mb_unlock(struct ecore_hwfn *p_hwfn, u32 cmd)
246 {
247         if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
248                 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
249 }
250
251 enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
252                                      struct ecore_ptt *p_ptt)
253 {
254         u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
255         u32 delay = CHIP_MCP_RESP_ITER_US;
256         u32 org_mcp_reset_seq, cnt = 0;
257         enum _ecore_status_t rc = ECORE_SUCCESS;
258
259 #ifndef ASIC_ONLY
260         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
261                 delay = EMUL_MCP_RESP_ITER_US;
262 #endif
263
264         /* Ensure that only a single thread is accessing the mailbox at a
265          * certain time.
266          */
267         rc = ecore_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
268         if (rc != ECORE_SUCCESS)
269                 return rc;
270
271         /* Set drv command along with the updated sequence */
272         org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
273         DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
274
275         do {
276                 /* Wait for MFW response */
277                 OSAL_UDELAY(delay);
278                 /* Give the FW up to 500 second (50*1000*10usec) */
279         } while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt,
280                                                 MISCS_REG_GENERIC_POR_0)) &&
281                  (cnt++ < ECORE_MCP_RESET_RETRIES));
282
283         if (org_mcp_reset_seq !=
284             ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
285                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
286                            "MCP was reset after %d usec\n", cnt * delay);
287         } else {
288                 DP_ERR(p_hwfn, "Failed to reset MCP\n");
289                 rc = ECORE_AGAIN;
290         }
291
292         ecore_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
293
294         return rc;
295 }
296
297 static enum _ecore_status_t ecore_do_mcp_cmd(struct ecore_hwfn *p_hwfn,
298                                              struct ecore_ptt *p_ptt,
299                                              u32 cmd, u32 param,
300                                              u32 *o_mcp_resp,
301                                              u32 *o_mcp_param)
302 {
303         u32 delay = CHIP_MCP_RESP_ITER_US;
304         u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
305         u32 seq, cnt = 1, actual_mb_seq;
306         enum _ecore_status_t rc = ECORE_SUCCESS;
307
308 #ifndef ASIC_ONLY
309         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
310                 delay = EMUL_MCP_RESP_ITER_US;
311         /* There is a built-in delay of 100usec in each MFW response read */
312         if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
313                 max_retries /= 10;
314 #endif
315
316         /* Get actual driver mailbox sequence */
317         actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
318             DRV_MSG_SEQ_NUMBER_MASK;
319
320         /* Use MCP history register to check if MCP reset occurred between
321          * init time and now.
322          */
323         if (p_hwfn->mcp_info->mcp_hist !=
324             ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
325                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Rereading MCP offsets\n");
326                 ecore_load_mcp_offsets(p_hwfn, p_ptt);
327                 ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
328         }
329         seq = ++p_hwfn->mcp_info->drv_mb_seq;
330
331         /* Set drv param */
332         DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
333
334         /* Set drv command along with the updated sequence */
335         DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
336
337         do {
338                 /* Wait for MFW response */
339                 OSAL_UDELAY(delay);
340                 *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
341
342                 /* Give the FW up to 5 second (500*10ms) */
343         } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
344                  (cnt++ < max_retries));
345
346         /* Is this a reply to our command? */
347         if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
348                 *o_mcp_resp &= FW_MSG_CODE_MASK;
349                 /* Get the MCP param */
350                 *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
351         } else {
352                 /* FW BUG! */
353                 DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
354                        cmd, param);
355                 *o_mcp_resp = 0;
356                 rc = ECORE_AGAIN;
357                 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
358         }
359         return rc;
360 }
361
362 static enum _ecore_status_t
363 ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
364                         struct ecore_ptt *p_ptt,
365                         struct ecore_mcp_mb_params *p_mb_params)
366 {
367         u32 union_data_addr;
368         enum _ecore_status_t rc;
369
370         /* MCP not initialized */
371         if (!ecore_mcp_is_init(p_hwfn)) {
372                 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
373                 return ECORE_BUSY;
374         }
375
376         union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
377                           OFFSETOF(struct public_drv_mb, union_data);
378
379         /* Ensure that only a single thread is accessing the mailbox at a
380          * certain time.
381          */
382         rc = ecore_mcp_mb_lock(p_hwfn, p_mb_params->cmd);
383         if (rc != ECORE_SUCCESS)
384                 return rc;
385
386         if (p_mb_params->p_data_src != OSAL_NULL)
387                 ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr,
388                                 p_mb_params->p_data_src,
389                                 sizeof(*p_mb_params->p_data_src));
390
391         rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
392                               p_mb_params->param, &p_mb_params->mcp_resp,
393                               &p_mb_params->mcp_param);
394
395         if (p_mb_params->p_data_dst != OSAL_NULL)
396                 ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
397                                   union_data_addr,
398                                   sizeof(*p_mb_params->p_data_dst));
399
400         ecore_mcp_mb_unlock(p_hwfn, p_mb_params->cmd);
401
402         return rc;
403 }
404
405 enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
406                                    struct ecore_ptt *p_ptt, u32 cmd, u32 param,
407                                    u32 *o_mcp_resp, u32 *o_mcp_param)
408 {
409         struct ecore_mcp_mb_params mb_params;
410         enum _ecore_status_t rc;
411
412 #ifndef ASIC_ONLY
413         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
414                 if (cmd == DRV_MSG_CODE_UNLOAD_REQ) {
415                         loaded--;
416                         loaded_port[p_hwfn->port_id]--;
417                         DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n",
418                                    loaded);
419                 }
420                 return ECORE_SUCCESS;
421         }
422 #endif
423
424         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
425         mb_params.cmd = cmd;
426         mb_params.param = param;
427         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
428         if (rc != ECORE_SUCCESS)
429                 return rc;
430
431         *o_mcp_resp = mb_params.mcp_resp;
432         *o_mcp_param = mb_params.mcp_param;
433
434         return ECORE_SUCCESS;
435 }
436
437 enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
438                                           struct ecore_ptt *p_ptt,
439                                           u32 cmd,
440                                           u32 param,
441                                           u32 *o_mcp_resp,
442                                           u32 *o_mcp_param,
443                                           u32 i_txn_size, u32 *i_buf)
444 {
445         struct ecore_mcp_mb_params mb_params;
446         union drv_union_data union_data;
447         enum _ecore_status_t rc;
448
449         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
450         mb_params.cmd = cmd;
451         mb_params.param = param;
452         OSAL_MEMCPY((u32 *)&union_data.raw_data, i_buf, i_txn_size);
453         mb_params.p_data_src = &union_data;
454         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
455         if (rc != ECORE_SUCCESS)
456                 return rc;
457
458         *o_mcp_resp = mb_params.mcp_resp;
459         *o_mcp_param = mb_params.mcp_param;
460
461         return ECORE_SUCCESS;
462 }
463
464 enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
465                                           struct ecore_ptt *p_ptt,
466                                           u32 cmd,
467                                           u32 param,
468                                           u32 *o_mcp_resp,
469                                           u32 *o_mcp_param,
470                                           u32 *o_txn_size, u32 *o_buf)
471 {
472         struct ecore_mcp_mb_params mb_params;
473         union drv_union_data union_data;
474         enum _ecore_status_t rc;
475
476         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
477         mb_params.cmd = cmd;
478         mb_params.param = param;
479         mb_params.p_data_dst = &union_data;
480         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
481         if (rc != ECORE_SUCCESS)
482                 return rc;
483
484         *o_mcp_resp = mb_params.mcp_resp;
485         *o_mcp_param = mb_params.mcp_param;
486
487         *o_txn_size = *o_mcp_param;
488         OSAL_MEMCPY(o_buf, (u32 *)&union_data.raw_data, *o_txn_size);
489
490         return ECORE_SUCCESS;
491 }
492
493 #ifndef ASIC_ONLY
494 static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
495                                     u32 *p_load_code)
496 {
497         static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
498
499         if (!loaded)
500                 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
501         else if (!loaded_port[p_hwfn->port_id])
502                 load_phase = FW_MSG_CODE_DRV_LOAD_PORT;
503         else
504                 load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
505
506         /* On CMT, always tell that it's engine */
507         if (p_hwfn->p_dev->num_hwfns > 1)
508                 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
509
510         *p_load_code = load_phase;
511         loaded++;
512         loaded_port[p_hwfn->port_id]++;
513
514         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
515                    "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n",
516                    *p_load_code, loaded, p_hwfn->port_id,
517                    loaded_port[p_hwfn->port_id]);
518 }
519 #endif
520
521 static bool ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role)
522 {
523         return (drv_role == DRV_ROLE_OS &&
524                 exist_drv_role == DRV_ROLE_PREBOOT) ||
525                (drv_role == DRV_ROLE_KDUMP && exist_drv_role == DRV_ROLE_OS);
526 }
527
528 static enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn,
529                                                       struct ecore_ptt *p_ptt)
530 {
531         u32 resp = 0, param = 0;
532         enum _ecore_status_t rc;
533
534         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
535                            &resp, &param);
536         if (rc != ECORE_SUCCESS)
537                 DP_NOTICE(p_hwfn, false,
538                           "Failed to send cancel load request, rc = %d\n", rc);
539
540         return rc;
541 }
542
543 #define CONFIG_ECORE_L2_BITMAP_IDX      (0x1 << 0)
544 #define CONFIG_ECORE_SRIOV_BITMAP_IDX   (0x1 << 1)
545 #define CONFIG_ECORE_ROCE_BITMAP_IDX    (0x1 << 2)
546 #define CONFIG_ECORE_IWARP_BITMAP_IDX   (0x1 << 3)
547 #define CONFIG_ECORE_FCOE_BITMAP_IDX    (0x1 << 4)
548 #define CONFIG_ECORE_ISCSI_BITMAP_IDX   (0x1 << 5)
549 #define CONFIG_ECORE_LL2_BITMAP_IDX     (0x1 << 6)
550
551 static u32 ecore_get_config_bitmap(void)
552 {
553         u32 config_bitmap = 0x0;
554
555 #ifdef CONFIG_ECORE_L2
556         config_bitmap |= CONFIG_ECORE_L2_BITMAP_IDX;
557 #endif
558 #ifdef CONFIG_ECORE_SRIOV
559         config_bitmap |= CONFIG_ECORE_SRIOV_BITMAP_IDX;
560 #endif
561 #ifdef CONFIG_ECORE_ROCE
562         config_bitmap |= CONFIG_ECORE_ROCE_BITMAP_IDX;
563 #endif
564 #ifdef CONFIG_ECORE_IWARP
565         config_bitmap |= CONFIG_ECORE_IWARP_BITMAP_IDX;
566 #endif
567 #ifdef CONFIG_ECORE_FCOE
568         config_bitmap |= CONFIG_ECORE_FCOE_BITMAP_IDX;
569 #endif
570 #ifdef CONFIG_ECORE_ISCSI
571         config_bitmap |= CONFIG_ECORE_ISCSI_BITMAP_IDX;
572 #endif
573 #ifdef CONFIG_ECORE_LL2
574         config_bitmap |= CONFIG_ECORE_LL2_BITMAP_IDX;
575 #endif
576
577         return config_bitmap;
578 }
579
580 struct ecore_load_req_in_params {
581         u8 hsi_ver;
582 #define ECORE_LOAD_REQ_HSI_VER_DEFAULT  0
583 #define ECORE_LOAD_REQ_HSI_VER_1        1
584         u32 drv_ver_0;
585         u32 drv_ver_1;
586         u32 fw_ver;
587         u8 drv_role;
588         u8 timeout_val;
589         u8 force_cmd;
590         bool avoid_eng_reset;
591 };
592
593 struct ecore_load_req_out_params {
594         u32 load_code;
595         u32 exist_drv_ver_0;
596         u32 exist_drv_ver_1;
597         u32 exist_fw_ver;
598         u8 exist_drv_role;
599         u8 mfw_hsi_ver;
600         bool drv_exists;
601 };
602
603 static enum _ecore_status_t
604 __ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
605                      struct ecore_load_req_in_params *p_in_params,
606                      struct ecore_load_req_out_params *p_out_params)
607 {
608         union drv_union_data union_data_src, union_data_dst;
609         struct ecore_mcp_mb_params mb_params;
610         struct load_req_stc *p_load_req;
611         struct load_rsp_stc *p_load_rsp;
612         u32 hsi_ver;
613         enum _ecore_status_t rc;
614
615         p_load_req = &union_data_src.load_req;
616         OSAL_MEM_ZERO(p_load_req, sizeof(*p_load_req));
617         p_load_req->drv_ver_0 = p_in_params->drv_ver_0;
618         p_load_req->drv_ver_1 = p_in_params->drv_ver_1;
619         p_load_req->fw_ver = p_in_params->fw_ver;
620         ECORE_MFW_SET_FIELD(p_load_req->misc0, LOAD_REQ_ROLE,
621                             p_in_params->drv_role);
622         ECORE_MFW_SET_FIELD(p_load_req->misc0, LOAD_REQ_LOCK_TO,
623                             p_in_params->timeout_val);
624         ECORE_MFW_SET_FIELD(p_load_req->misc0, LOAD_REQ_FORCE,
625                             p_in_params->force_cmd);
626         ECORE_MFW_SET_FIELD(p_load_req->misc0, LOAD_REQ_FLAGS0,
627                             p_in_params->avoid_eng_reset);
628
629         hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ?
630                   DRV_ID_MCP_HSI_VER_CURRENT :
631                   (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT);
632
633         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
634         mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
635         mb_params.param = PDA_COMP | hsi_ver | p_hwfn->p_dev->drv_type;
636         mb_params.p_data_src = &union_data_src;
637         mb_params.p_data_dst = &union_data_dst;
638
639         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
640                    "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
641                    mb_params.param,
642                    ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
643                    ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
644                    ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
645                    ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
646
647         if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1)
648                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
649                            "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
650                            p_load_req->drv_ver_0, p_load_req->drv_ver_1,
651                            p_load_req->fw_ver, p_load_req->misc0,
652                            ECORE_MFW_GET_FIELD(p_load_req->misc0,
653                                                LOAD_REQ_ROLE),
654                            ECORE_MFW_GET_FIELD(p_load_req->misc0,
655                                                LOAD_REQ_LOCK_TO),
656                            ECORE_MFW_GET_FIELD(p_load_req->misc0,
657                                                LOAD_REQ_FORCE),
658                            ECORE_MFW_GET_FIELD(p_load_req->misc0,
659                                                LOAD_REQ_FLAGS0));
660
661         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
662         if (rc != ECORE_SUCCESS) {
663                 DP_NOTICE(p_hwfn, false,
664                           "Failed to send load request, rc = %d\n", rc);
665                 return rc;
666         }
667
668         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
669                    "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
670         p_out_params->load_code = mb_params.mcp_resp;
671
672         if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
673             p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
674                 p_load_rsp = &union_data_dst.load_rsp;
675                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
676                            "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
677                            p_load_rsp->drv_ver_0, p_load_rsp->drv_ver_1,
678                            p_load_rsp->fw_ver, p_load_rsp->misc0,
679                            ECORE_MFW_GET_FIELD(p_load_rsp->misc0,
680                                                LOAD_RSP_ROLE),
681                            ECORE_MFW_GET_FIELD(p_load_rsp->misc0,
682                                                LOAD_RSP_HSI),
683                            ECORE_MFW_GET_FIELD(p_load_rsp->misc0,
684                                                LOAD_RSP_FLAGS0));
685
686                 p_out_params->exist_drv_ver_0 = p_load_rsp->drv_ver_0;
687                 p_out_params->exist_drv_ver_1 = p_load_rsp->drv_ver_1;
688                 p_out_params->exist_fw_ver = p_load_rsp->fw_ver;
689                 p_out_params->exist_drv_role =
690                         ECORE_MFW_GET_FIELD(p_load_rsp->misc0, LOAD_RSP_ROLE);
691                 p_out_params->mfw_hsi_ver =
692                         ECORE_MFW_GET_FIELD(p_load_rsp->misc0, LOAD_RSP_HSI);
693                 p_out_params->drv_exists =
694                         ECORE_MFW_GET_FIELD(p_load_rsp->misc0,
695                                             LOAD_RSP_FLAGS0) &
696                         LOAD_RSP_FLAGS0_DRV_EXISTS;
697         }
698
699         return ECORE_SUCCESS;
700 }
701
702 static enum _ecore_status_t eocre_get_mfw_drv_role(struct ecore_hwfn *p_hwfn,
703                                                    enum ecore_drv_role drv_role,
704                                                    u8 *p_mfw_drv_role)
705 {
706         switch (drv_role) {
707         case ECORE_DRV_ROLE_OS:
708                 *p_mfw_drv_role = DRV_ROLE_OS;
709                 break;
710         case ECORE_DRV_ROLE_KDUMP:
711                 *p_mfw_drv_role = DRV_ROLE_KDUMP;
712                 break;
713         default:
714                 DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role);
715                 return ECORE_INVAL;
716         }
717
718         return ECORE_SUCCESS;
719 }
720
721 enum ecore_load_req_force {
722         ECORE_LOAD_REQ_FORCE_NONE,
723         ECORE_LOAD_REQ_FORCE_PF,
724         ECORE_LOAD_REQ_FORCE_ALL,
725 };
726
727 static enum _ecore_status_t
728 ecore_get_mfw_force_cmd(struct ecore_hwfn *p_hwfn,
729                         enum ecore_load_req_force force_cmd,
730                         u8 *p_mfw_force_cmd)
731 {
732         switch (force_cmd) {
733         case ECORE_LOAD_REQ_FORCE_NONE:
734                 *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
735                 break;
736         case ECORE_LOAD_REQ_FORCE_PF:
737                 *p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
738                 break;
739         case ECORE_LOAD_REQ_FORCE_ALL:
740                 *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
741                 break;
742         default:
743                 DP_ERR(p_hwfn, "Unexpected force value %d\n", force_cmd);
744                 return ECORE_INVAL;
745         }
746
747         return ECORE_SUCCESS;
748 }
749
750 enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
751                                         struct ecore_ptt *p_ptt,
752                                         struct ecore_load_req_params *p_params)
753 {
754         struct ecore_load_req_out_params out_params;
755         struct ecore_load_req_in_params in_params;
756         u8 mfw_drv_role, mfw_force_cmd;
757         enum _ecore_status_t rc;
758
759 #ifndef ASIC_ONLY
760         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
761                 ecore_mcp_mf_workaround(p_hwfn, &p_params->load_code);
762                 return ECORE_SUCCESS;
763         }
764 #endif
765
766         OSAL_MEM_ZERO(&in_params, sizeof(in_params));
767         in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT;
768         in_params.drv_ver_0 = ECORE_VERSION;
769         in_params.drv_ver_1 = ecore_get_config_bitmap();
770         in_params.fw_ver = STORM_FW_VERSION;
771         rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
772         if (rc != ECORE_SUCCESS)
773                 return rc;
774
775         in_params.drv_role = mfw_drv_role;
776         in_params.timeout_val = p_params->timeout_val;
777         rc = ecore_get_mfw_force_cmd(p_hwfn, ECORE_LOAD_REQ_FORCE_NONE,
778                                      &mfw_force_cmd);
779         if (rc != ECORE_SUCCESS)
780                 return rc;
781
782         in_params.force_cmd = mfw_force_cmd;
783         in_params.avoid_eng_reset = p_params->avoid_eng_reset;
784
785         OSAL_MEM_ZERO(&out_params, sizeof(out_params));
786         rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
787         if (rc != ECORE_SUCCESS)
788                 return rc;
789
790         /* First handle cases where another load request should/might be sent:
791          * - MFW expects the old interface [HSI version = 1]
792          * - MFW responds that a force load request is required
793          */
794         if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
795                 DP_INFO(p_hwfn,
796                         "MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n");
797
798                 /* The previous load request set the mailbox blocking */
799                 p_hwfn->mcp_info->block_mb_sending = false;
800
801                 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1;
802                 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
803                 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
804                                           &out_params);
805                 if (rc != ECORE_SUCCESS)
806                         return rc;
807         } else if (out_params.load_code ==
808                    FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
809                 /* The previous load request set the mailbox blocking */
810                 p_hwfn->mcp_info->block_mb_sending = false;
811
812                 if (ecore_mcp_can_force_load(in_params.drv_role,
813                                              out_params.exist_drv_role)) {
814                         DP_INFO(p_hwfn,
815                                 "A force load is required [existing: role %d, fw_ver 0x%08x, drv_ver 0x%08x_0x%08x]. Sending a force load request.\n",
816                                 out_params.exist_drv_role,
817                                 out_params.exist_fw_ver,
818                                 out_params.exist_drv_ver_0,
819                                 out_params.exist_drv_ver_1);
820
821                         rc = ecore_get_mfw_force_cmd(p_hwfn,
822                                                      ECORE_LOAD_REQ_FORCE_ALL,
823                                                      &mfw_force_cmd);
824                         if (rc != ECORE_SUCCESS)
825                                 return rc;
826
827                         in_params.force_cmd = mfw_force_cmd;
828                         OSAL_MEM_ZERO(&out_params, sizeof(out_params));
829                         rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
830                                                   &out_params);
831                         if (rc != ECORE_SUCCESS)
832                                 return rc;
833                 } else {
834                         DP_NOTICE(p_hwfn, false,
835                                   "A force load is required [existing: role %d, fw_ver 0x%08x, drv_ver 0x%08x_0x%08x]. Avoiding to prevent disruption of active PFs.\n",
836                                   out_params.exist_drv_role,
837                                   out_params.exist_fw_ver,
838                                   out_params.exist_drv_ver_0,
839                                   out_params.exist_drv_ver_1);
840
841                         ecore_mcp_cancel_load_req(p_hwfn, p_ptt);
842                         return ECORE_BUSY;
843                 }
844         }
845
846         /* Now handle the other types of responses.
847          * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
848          * expected here after the additional revised load requests were sent.
849          */
850         switch (out_params.load_code) {
851         case FW_MSG_CODE_DRV_LOAD_ENGINE:
852         case FW_MSG_CODE_DRV_LOAD_PORT:
853         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
854                 if (out_params.mfw_hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
855                     out_params.drv_exists) {
856                         /* The role and fw/driver version match, but the PF is
857                          * already loaded and has not been unloaded gracefully.
858                          * This is unexpected since a quasi-FLR request was
859                          * previously sent as part of ecore_hw_prepare().
860                          */
861                         DP_NOTICE(p_hwfn, false,
862                                   "PF is already loaded - shouldn't have got here since a quasi-FLR request was previously sent!\n");
863                         return ECORE_INVAL;
864                 }
865                 break;
866         case FW_MSG_CODE_DRV_LOAD_REFUSED_PDA:
867         case FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG:
868         case FW_MSG_CODE_DRV_LOAD_REFUSED_HSI:
869         case FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT:
870                 DP_NOTICE(p_hwfn, false,
871                           "MFW refused a load request [resp 0x%08x]. Aborting.\n",
872                           out_params.load_code);
873                 return ECORE_BUSY;
874         default:
875                 DP_NOTICE(p_hwfn, false,
876                           "Unexpected response to load request [resp 0x%08x]. Aborting.\n",
877                           out_params.load_code);
878                 break;
879         }
880
881         p_params->load_code = out_params.load_code;
882
883         return ECORE_SUCCESS;
884 }
885
886 static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
887                                     struct ecore_ptt *p_ptt)
888 {
889         u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
890                                         PUBLIC_PATH);
891         u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
892         u32 path_addr = SECTION_ADDR(mfw_path_offsize,
893                                      ECORE_PATH_ID(p_hwfn));
894         u32 disabled_vfs[VF_MAX_STATIC / 32];
895         int i;
896
897         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
898                    "Reading Disabled VF information from [offset %08x],"
899                    " path_addr %08x\n",
900                    mfw_path_offsize, path_addr);
901
902         for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
903                 disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,
904                                            path_addr +
905                                            OFFSETOF(struct public_path,
906                                                     mcp_vf_disabled) +
907                                            sizeof(u32) * i);
908                 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
909                            "FLR-ed VFs [%08x,...,%08x] - %08x\n",
910                            i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
911         }
912
913         if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs))
914                 OSAL_VF_FLR_UPDATE(p_hwfn);
915 }
916
917 enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
918                                           struct ecore_ptt *p_ptt,
919                                           u32 *vfs_to_ack)
920 {
921         u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
922                                         PUBLIC_FUNC);
923         u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);
924         u32 func_addr = SECTION_ADDR(mfw_func_offsize,
925                                      MCP_PF_ID(p_hwfn));
926         struct ecore_mcp_mb_params mb_params;
927         union drv_union_data union_data;
928         enum _ecore_status_t rc;
929         int i;
930
931         for (i = 0; i < (VF_MAX_STATIC / 32); i++)
932                 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
933                            "Acking VFs [%08x,...,%08x] - %08x\n",
934                            i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
935
936         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
937         mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
938         OSAL_MEMCPY(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8);
939         mb_params.p_data_src = &union_data;
940         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt,
941                                      &mb_params);
942         if (rc != ECORE_SUCCESS) {
943                 DP_NOTICE(p_hwfn, false,
944                           "Failed to pass ACK for VF flr to MFW\n");
945                 return ECORE_TIMEOUT;
946         }
947
948         /* TMP - clear the ACK bits; should be done by MFW */
949         for (i = 0; i < (VF_MAX_STATIC / 32); i++)
950                 ecore_wr(p_hwfn, p_ptt,
951                          func_addr +
952                          OFFSETOF(struct public_func, drv_ack_vf_disabled) +
953                          i * sizeof(u32), 0);
954
955         return rc;
956 }
957
958 static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
959                                                 struct ecore_ptt *p_ptt)
960 {
961         u32 transceiver_state;
962
963         transceiver_state = ecore_rd(p_hwfn, p_ptt,
964                                      p_hwfn->mcp_info->port_addr +
965                                      OFFSETOF(struct public_port,
966                                               transceiver_data));
967
968         DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP),
969                    "Received transceiver state update [0x%08x] from mfw"
970                    " [Addr 0x%x]\n",
971                    transceiver_state, (u32)(p_hwfn->mcp_info->port_addr +
972                                             OFFSETOF(struct public_port,
973                                                      transceiver_data)));
974
975         transceiver_state = GET_FIELD(transceiver_state, ETH_TRANSCEIVER_STATE);
976
977         if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
978                 DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
979         else
980                 DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
981 }
982
983 static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
984                                          struct ecore_ptt *p_ptt,
985                                          bool b_reset)
986 {
987         struct ecore_mcp_link_state *p_link;
988         u8 max_bw, min_bw;
989         u32 status = 0;
990
991         p_link = &p_hwfn->mcp_info->link_output;
992         OSAL_MEMSET(p_link, 0, sizeof(*p_link));
993         if (!b_reset) {
994                 status = ecore_rd(p_hwfn, p_ptt,
995                                   p_hwfn->mcp_info->port_addr +
996                                   OFFSETOF(struct public_port, link_status));
997                 DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP),
998                            "Received link update [0x%08x] from mfw"
999                            " [Addr 0x%x]\n",
1000                            status, (u32)(p_hwfn->mcp_info->port_addr +
1001                                           OFFSETOF(struct public_port,
1002                                                    link_status)));
1003         } else {
1004                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1005                            "Resetting link indications\n");
1006                 return;
1007         }
1008
1009         if (p_hwfn->b_drv_link_init)
1010                 p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1011         else
1012                 p_link->link_up = false;
1013
1014         p_link->full_duplex = true;
1015         switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
1016         case LINK_STATUS_SPEED_AND_DUPLEX_100G:
1017                 p_link->speed = 100000;
1018                 break;
1019         case LINK_STATUS_SPEED_AND_DUPLEX_50G:
1020                 p_link->speed = 50000;
1021                 break;
1022         case LINK_STATUS_SPEED_AND_DUPLEX_40G:
1023                 p_link->speed = 40000;
1024                 break;
1025         case LINK_STATUS_SPEED_AND_DUPLEX_25G:
1026                 p_link->speed = 25000;
1027                 break;
1028         case LINK_STATUS_SPEED_AND_DUPLEX_20G:
1029                 p_link->speed = 20000;
1030                 break;
1031         case LINK_STATUS_SPEED_AND_DUPLEX_10G:
1032                 p_link->speed = 10000;
1033                 break;
1034         case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
1035                 p_link->full_duplex = false;
1036                 /* Fall-through */
1037         case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
1038                 p_link->speed = 1000;
1039                 break;
1040         default:
1041                 p_link->speed = 0;
1042         }
1043
1044         /* We never store total line speed as p_link->speed is
1045          * again changes according to bandwidth allocation.
1046          */
1047         if (p_link->link_up && p_link->speed)
1048                 p_link->line_speed = p_link->speed;
1049         else
1050                 p_link->line_speed = 0;
1051
1052         max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
1053         min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
1054
1055         /* Max bandwidth configuration */
1056         __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
1057                                            p_link, max_bw);
1058
1059         /* Mintz bandwidth configuration */
1060         __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
1061                                            p_link, min_bw);
1062         ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev,
1063                                               p_link->min_pf_rate);
1064
1065         p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
1066         p_link->an_complete = !!(status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
1067         p_link->parallel_detection = !!(status &
1068                                          LINK_STATUS_PARALLEL_DETECTION_USED);
1069         p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
1070
1071         p_link->partner_adv_speed |=
1072             (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
1073             ECORE_LINK_PARTNER_SPEED_1G_FD : 0;
1074         p_link->partner_adv_speed |=
1075             (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
1076             ECORE_LINK_PARTNER_SPEED_1G_HD : 0;
1077         p_link->partner_adv_speed |=
1078             (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
1079             ECORE_LINK_PARTNER_SPEED_10G : 0;
1080         p_link->partner_adv_speed |=
1081             (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
1082             ECORE_LINK_PARTNER_SPEED_20G : 0;
1083         p_link->partner_adv_speed |=
1084             (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
1085             ECORE_LINK_PARTNER_SPEED_25G : 0;
1086         p_link->partner_adv_speed |=
1087             (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
1088             ECORE_LINK_PARTNER_SPEED_40G : 0;
1089         p_link->partner_adv_speed |=
1090             (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
1091             ECORE_LINK_PARTNER_SPEED_50G : 0;
1092         p_link->partner_adv_speed |=
1093             (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
1094             ECORE_LINK_PARTNER_SPEED_100G : 0;
1095
1096         p_link->partner_tx_flow_ctrl_en =
1097             !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
1098         p_link->partner_rx_flow_ctrl_en =
1099             !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
1100
1101         switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
1102         case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
1103                 p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE;
1104                 break;
1105         case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
1106                 p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE;
1107                 break;
1108         case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
1109                 p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE;
1110                 break;
1111         default:
1112                 p_link->partner_adv_pause = 0;
1113         }
1114
1115         p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
1116
1117         OSAL_LINK_UPDATE(p_hwfn);
1118 }
1119
1120 enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
1121                                         struct ecore_ptt *p_ptt, bool b_up)
1122 {
1123         struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
1124         struct ecore_mcp_mb_params mb_params;
1125         union drv_union_data union_data;
1126         struct eth_phy_cfg *p_phy_cfg;
1127         enum _ecore_status_t rc = ECORE_SUCCESS;
1128         u32 cmd;
1129
1130 #ifndef ASIC_ONLY
1131         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
1132                 return ECORE_SUCCESS;
1133 #endif
1134
1135         /* Set the shmem configuration according to params */
1136         p_phy_cfg = &union_data.drv_phy_cfg;
1137         OSAL_MEMSET(p_phy_cfg, 0, sizeof(*p_phy_cfg));
1138         cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
1139         if (!params->speed.autoneg)
1140                 p_phy_cfg->speed = params->speed.forced_speed;
1141         p_phy_cfg->pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
1142         p_phy_cfg->pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
1143         p_phy_cfg->pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1144         p_phy_cfg->adv_speed = params->speed.advertised_speeds;
1145         p_phy_cfg->loopback_mode = params->loopback_mode;
1146         p_hwfn->b_drv_link_init = b_up;
1147
1148         if (b_up)
1149                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1150                            "Configuring Link: Speed 0x%08x, Pause 0x%08x,"
1151                            " adv_speed 0x%08x, loopback 0x%08x\n",
1152                            p_phy_cfg->speed, p_phy_cfg->pause,
1153                            p_phy_cfg->adv_speed, p_phy_cfg->loopback_mode);
1154         else
1155                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
1156
1157         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1158         mb_params.cmd = cmd;
1159         mb_params.p_data_src = &union_data;
1160         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1161
1162         /* if mcp fails to respond we must abort */
1163         if (rc != ECORE_SUCCESS) {
1164                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1165                 return rc;
1166         }
1167
1168         /* Reset the link status if needed */
1169         if (!b_up)
1170                 ecore_mcp_handle_link_change(p_hwfn, p_ptt, true);
1171
1172         return rc;
1173 }
1174
1175 u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
1176                                    struct ecore_ptt *p_ptt)
1177 {
1178         u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
1179
1180         /* TODO - Add support for VFs */
1181         if (IS_VF(p_hwfn->p_dev))
1182                 return ECORE_INVAL;
1183
1184         path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1185                                                  PUBLIC_PATH);
1186         path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr);
1187         path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn));
1188
1189         proc_kill_cnt = ecore_rd(p_hwfn, p_ptt,
1190                                  path_addr +
1191                                  OFFSETOF(struct public_path, process_kill)) &
1192             PROCESS_KILL_COUNTER_MASK;
1193
1194         return proc_kill_cnt;
1195 }
1196
1197 static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn,
1198                                           struct ecore_ptt *p_ptt)
1199 {
1200         struct ecore_dev *p_dev = p_hwfn->p_dev;
1201         u32 proc_kill_cnt;
1202
1203         /* Prevent possible attentions/interrupts during the recovery handling
1204          * and till its load phase, during which they will be re-enabled.
1205          */
1206         ecore_int_igu_disable_int(p_hwfn, p_ptt);
1207
1208         DP_NOTICE(p_hwfn, false, "Received a process kill indication\n");
1209
1210         /* The following operations should be done once, and thus in CMT mode
1211          * are carried out by only the first HW function.
1212          */
1213         if (p_hwfn != ECORE_LEADING_HWFN(p_dev))
1214                 return;
1215
1216         if (p_dev->recov_in_prog) {
1217                 DP_NOTICE(p_hwfn, false,
1218                           "Ignoring the indication since a recovery"
1219                           " process is already in progress\n");
1220                 return;
1221         }
1222
1223         p_dev->recov_in_prog = true;
1224
1225         proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt);
1226         DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt);
1227
1228         OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn);
1229 }
1230
1231 static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
1232                                           struct ecore_ptt *p_ptt,
1233                                           enum MFW_DRV_MSG_TYPE type)
1234 {
1235         enum ecore_mcp_protocol_type stats_type;
1236         union ecore_mcp_protocol_stats stats;
1237         struct ecore_mcp_mb_params mb_params;
1238         union drv_union_data union_data;
1239         u32 hsi_param;
1240
1241         switch (type) {
1242         case MFW_DRV_MSG_GET_LAN_STATS:
1243                 stats_type = ECORE_MCP_LAN_STATS;
1244                 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1245                 break;
1246         default:
1247                 DP_INFO(p_hwfn, "Invalid protocol type %d\n", type);
1248                 return;
1249         }
1250
1251         OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats);
1252
1253         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1254         mb_params.cmd = DRV_MSG_CODE_GET_STATS;
1255         mb_params.param = hsi_param;
1256         OSAL_MEMCPY(&union_data, &stats, sizeof(stats));
1257         mb_params.p_data_src = &union_data;
1258         ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1259 }
1260
1261 static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
1262                                     struct public_func *p_shmem_info)
1263 {
1264         struct ecore_mcp_function_info *p_info;
1265
1266         p_info = &p_hwfn->mcp_info->func_info;
1267
1268         /* TODO - bandwidth min/max should have valid values of 1-100,
1269          * as well as some indication that the feature is disabled.
1270          * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS
1271          * limit and correct value to min `1' and max `100' if limit isn't in
1272          * range.
1273          */
1274         p_info->bandwidth_min = (p_shmem_info->config &
1275                                  FUNC_MF_CFG_MIN_BW_MASK) >>
1276             FUNC_MF_CFG_MIN_BW_SHIFT;
1277         if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1278                 DP_INFO(p_hwfn,
1279                         "bandwidth minimum out of bounds [%02x]. Set to 1\n",
1280                         p_info->bandwidth_min);
1281                 p_info->bandwidth_min = 1;
1282         }
1283
1284         p_info->bandwidth_max = (p_shmem_info->config &
1285                                  FUNC_MF_CFG_MAX_BW_MASK) >>
1286             FUNC_MF_CFG_MAX_BW_SHIFT;
1287         if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1288                 DP_INFO(p_hwfn,
1289                         "bandwidth maximum out of bounds [%02x]. Set to 100\n",
1290                         p_info->bandwidth_max);
1291                 p_info->bandwidth_max = 100;
1292         }
1293 }
1294
1295 static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
1296                                     struct ecore_ptt *p_ptt,
1297                                     struct public_func *p_data,
1298                                     int pfid)
1299 {
1300         u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1301                                         PUBLIC_FUNC);
1302         u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1303         u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1304         u32 i, size;
1305
1306         OSAL_MEM_ZERO(p_data, sizeof(*p_data));
1307
1308         size = OSAL_MIN_T(u32, sizeof(*p_data),
1309                           SECTION_SIZE(mfw_path_offsize));
1310         for (i = 0; i < size / sizeof(u32); i++)
1311                 ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
1312                                               func_addr + (i << 2));
1313
1314         return size;
1315 }
1316
1317 static void
1318 ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1319 {
1320         struct ecore_mcp_function_info *p_info;
1321         struct public_func shmem_info;
1322         u32 resp = 0, param = 0;
1323
1324         ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1325
1326         ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1327
1328         p_info = &p_hwfn->mcp_info->func_info;
1329
1330         ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min);
1331
1332         ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max);
1333
1334         /* Acknowledge the MFW */
1335         ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1336                       &param);
1337 }
1338
1339 static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn,
1340                                          struct ecore_ptt *p_ptt)
1341 {
1342         /* A single notification should be sent to upper driver in CMT mode */
1343         if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1344                 return;
1345
1346         DP_NOTICE(p_hwfn, false,
1347                   "Fan failure was detected on the network interface card"
1348                   " and it's going to be shut down.\n");
1349
1350         ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
1351 }
1352
1353 static enum _ecore_status_t
1354 ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1355                     u32 mdump_cmd, union drv_union_data *p_data_src,
1356                     union drv_union_data *p_data_dst, u32 *p_mcp_resp)
1357 {
1358         struct ecore_mcp_mb_params mb_params;
1359         enum _ecore_status_t rc;
1360
1361         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1362         mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
1363         mb_params.param = mdump_cmd;
1364         mb_params.p_data_src = p_data_src;
1365         mb_params.p_data_dst = p_data_dst;
1366         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1367         if (rc != ECORE_SUCCESS)
1368                 return rc;
1369
1370         *p_mcp_resp = mb_params.mcp_resp;
1371         if (*p_mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
1372                 DP_NOTICE(p_hwfn, false,
1373                           "MFW claims that the mdump command is illegal [mdump_cmd 0x%x]\n",
1374                           mdump_cmd);
1375                 rc = ECORE_INVAL;
1376         }
1377
1378         return rc;
1379 }
1380
1381 static enum _ecore_status_t ecore_mcp_mdump_ack(struct ecore_hwfn *p_hwfn,
1382                                                 struct ecore_ptt *p_ptt)
1383 {
1384         u32 mcp_resp;
1385
1386         return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_ACK,
1387                                    OSAL_NULL, OSAL_NULL, &mcp_resp);
1388 }
1389
1390 enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn,
1391                                                 struct ecore_ptt *p_ptt,
1392                                                 u32 epoch)
1393 {
1394         union drv_union_data union_data;
1395         u32 mcp_resp;
1396
1397         OSAL_MEMCPY(&union_data.raw_data, &epoch, sizeof(epoch));
1398
1399         return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_SET_VALUES,
1400                                    &union_data, OSAL_NULL, &mcp_resp);
1401 }
1402
1403 enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
1404                                              struct ecore_ptt *p_ptt)
1405 {
1406         u32 mcp_resp;
1407
1408         p_hwfn->p_dev->mdump_en = true;
1409
1410         return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_TRIGGER,
1411                                    OSAL_NULL, OSAL_NULL, &mcp_resp);
1412 }
1413
1414 static enum _ecore_status_t
1415 ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1416                            struct mdump_config_stc *p_mdump_config)
1417 {
1418         union drv_union_data union_data;
1419         u32 mcp_resp;
1420         enum _ecore_status_t rc;
1421
1422         rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_GET_CONFIG,
1423                                  OSAL_NULL, &union_data, &mcp_resp);
1424         if (rc != ECORE_SUCCESS)
1425                 return rc;
1426
1427         if (mcp_resp == FW_MSG_CODE_UNSUPPORTED)
1428                 return ECORE_NOTIMPL;
1429
1430         if (mcp_resp != FW_MSG_CODE_OK) {
1431                 DP_NOTICE(p_hwfn, false,
1432                           "Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n",
1433                           mcp_resp);
1434                 rc = ECORE_UNKNOWN_ERROR;
1435         }
1436
1437         OSAL_MEMCPY(p_mdump_config, &union_data.mdump_config,
1438                     sizeof(*p_mdump_config));
1439
1440         return rc;
1441 }
1442
1443 enum _ecore_status_t
1444 ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1445                          struct ecore_mdump_info *p_mdump_info)
1446 {
1447         u32 addr, global_offsize, global_addr;
1448         struct mdump_config_stc mdump_config;
1449         enum _ecore_status_t rc;
1450
1451         OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info));
1452
1453         addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1454                                     PUBLIC_GLOBAL);
1455         global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1456         global_addr = SECTION_ADDR(global_offsize, 0);
1457         p_mdump_info->reason = ecore_rd(p_hwfn, p_ptt,
1458                                         global_addr +
1459                                         OFFSETOF(struct public_global,
1460                                                  mdump_reason));
1461
1462         if (p_mdump_info->reason) {
1463                 rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config);
1464                 if (rc != ECORE_SUCCESS)
1465                         return rc;
1466
1467                 p_mdump_info->version = mdump_config.version;
1468                 p_mdump_info->config = mdump_config.config;
1469                 p_mdump_info->epoch = mdump_config.epoc;
1470                 p_mdump_info->num_of_logs = mdump_config.num_of_logs;
1471                 p_mdump_info->valid_logs = mdump_config.valid_logs;
1472
1473                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1474                            "MFW mdump info: reason %d, version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n",
1475                            p_mdump_info->reason, p_mdump_info->version,
1476                            p_mdump_info->config, p_mdump_info->epoch,
1477                            p_mdump_info->num_of_logs, p_mdump_info->valid_logs);
1478         } else {
1479                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1480                            "MFW mdump info: reason %d\n", p_mdump_info->reason);
1481         }
1482
1483         return ECORE_SUCCESS;
1484 }
1485
1486 enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
1487                                                 struct ecore_ptt *p_ptt)
1488 {
1489         u32 mcp_resp;
1490
1491         return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_CLEAR_LOGS,
1492                                    OSAL_NULL, OSAL_NULL, &mcp_resp);
1493 }
1494
1495 static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
1496                                             struct ecore_ptt *p_ptt)
1497 {
1498         /* In CMT mode - no need for more than a single acknowledgment to the
1499          * MFW, and no more than a single notification to the upper driver.
1500          */
1501         if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1502                 return;
1503
1504         DP_NOTICE(p_hwfn, false,
1505                   "Received a critical error notification from the MFW!\n");
1506
1507         if (p_hwfn->p_dev->mdump_en) {
1508                 DP_NOTICE(p_hwfn, false,
1509                           "Not acknowledging the notification to allow the MFW crash dump\n");
1510                 p_hwfn->p_dev->mdump_en = false;
1511                 return;
1512         }
1513
1514         ecore_mcp_mdump_ack(p_hwfn, p_ptt);
1515         ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
1516 }
1517
1518 enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
1519                                              struct ecore_ptt *p_ptt)
1520 {
1521         struct ecore_mcp_info *info = p_hwfn->mcp_info;
1522         enum _ecore_status_t rc = ECORE_SUCCESS;
1523         bool found = false;
1524         u16 i;
1525
1526         DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n");
1527
1528         /* Read Messages from MFW */
1529         ecore_mcp_read_mb(p_hwfn, p_ptt);
1530
1531         /* Compare current messages to old ones */
1532         for (i = 0; i < info->mfw_mb_length; i++) {
1533                 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
1534                         continue;
1535
1536                 found = true;
1537
1538                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1539                            "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
1540                            i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
1541
1542                 switch (i) {
1543                 case MFW_DRV_MSG_LINK_CHANGE:
1544                         ecore_mcp_handle_link_change(p_hwfn, p_ptt, false);
1545                         break;
1546                 case MFW_DRV_MSG_VF_DISABLED:
1547                         ecore_mcp_handle_vf_flr(p_hwfn, p_ptt);
1548                         break;
1549                 case MFW_DRV_MSG_LLDP_DATA_UPDATED:
1550                         ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1551                                                     ECORE_DCBX_REMOTE_LLDP_MIB);
1552                         break;
1553                 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
1554                         ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1555                                                     ECORE_DCBX_REMOTE_MIB);
1556                         break;
1557                 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
1558                         ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1559                                                     ECORE_DCBX_OPERATIONAL_MIB);
1560                         break;
1561                 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
1562                         ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
1563                         break;
1564                 case MFW_DRV_MSG_ERROR_RECOVERY:
1565                         ecore_mcp_handle_process_kill(p_hwfn, p_ptt);
1566                         break;
1567                 case MFW_DRV_MSG_GET_LAN_STATS:
1568                 case MFW_DRV_MSG_GET_FCOE_STATS:
1569                 case MFW_DRV_MSG_GET_ISCSI_STATS:
1570                 case MFW_DRV_MSG_GET_RDMA_STATS:
1571                         ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
1572                         break;
1573                 case MFW_DRV_MSG_BW_UPDATE:
1574                         ecore_mcp_update_bw(p_hwfn, p_ptt);
1575                         break;
1576                 case MFW_DRV_MSG_FAILURE_DETECTED:
1577                         ecore_mcp_handle_fan_failure(p_hwfn, p_ptt);
1578                         break;
1579                 case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
1580                         ecore_mcp_handle_critical_error(p_hwfn, p_ptt);
1581                         break;
1582                 default:
1583                         DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
1584                         rc = ECORE_INVAL;
1585                 }
1586         }
1587
1588         /* ACK everything */
1589         for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
1590                 OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]);
1591
1592                 /* MFW expect answer in BE, so we force write in that format */
1593                 ecore_wr(p_hwfn, p_ptt,
1594                          info->mfw_mb_addr + sizeof(u32) +
1595                          MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
1596                          sizeof(u32) + i * sizeof(u32), val);
1597         }
1598
1599         if (!found) {
1600                 DP_NOTICE(p_hwfn, false,
1601                           "Received an MFW message indication but no"
1602                           " new message!\n");
1603                 rc = ECORE_INVAL;
1604         }
1605
1606         /* Copy the new mfw messages into the shadow */
1607         OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
1608
1609         return rc;
1610 }
1611
1612 enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
1613                                            struct ecore_ptt *p_ptt,
1614                                            u32 *p_mfw_ver,
1615                                            u32 *p_running_bundle_id)
1616 {
1617         u32 global_offsize;
1618
1619 #ifndef ASIC_ONLY
1620         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1621                 DP_NOTICE(p_hwfn, false, "Emulation - can't get MFW version\n");
1622                 return ECORE_SUCCESS;
1623         }
1624 #endif
1625
1626         if (IS_VF(p_hwfn->p_dev)) {
1627                 if (p_hwfn->vf_iov_info) {
1628                         struct pfvf_acquire_resp_tlv *p_resp;
1629
1630                         p_resp = &p_hwfn->vf_iov_info->acquire_resp;
1631                         *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
1632                         return ECORE_SUCCESS;
1633                 } else {
1634                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1635                                    "VF requested MFW version prior to ACQUIRE\n");
1636                         return ECORE_INVAL;
1637                 }
1638         }
1639
1640         global_offsize = ecore_rd(p_hwfn, p_ptt,
1641                                   SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
1642                                                        public_base,
1643                                                        PUBLIC_GLOBAL));
1644         *p_mfw_ver =
1645             ecore_rd(p_hwfn, p_ptt,
1646                      SECTION_ADDR(global_offsize,
1647                                   0) + OFFSETOF(struct public_global, mfw_ver));
1648
1649         if (p_running_bundle_id != OSAL_NULL) {
1650                 *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt,
1651                                                 SECTION_ADDR(global_offsize,
1652                                                              0) +
1653                                                 OFFSETOF(struct public_global,
1654                                                          running_bundle_id));
1655         }
1656
1657         return ECORE_SUCCESS;
1658 }
1659
1660 enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
1661                                               u32 *p_media_type)
1662 {
1663         struct ecore_hwfn *p_hwfn = &p_dev->hwfns[0];
1664         struct ecore_ptt *p_ptt;
1665
1666         /* TODO - Add support for VFs */
1667         if (IS_VF(p_dev))
1668                 return ECORE_INVAL;
1669
1670         if (!ecore_mcp_is_init(p_hwfn)) {
1671                 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
1672                 return ECORE_BUSY;
1673         }
1674
1675         *p_media_type = MEDIA_UNSPECIFIED;
1676
1677         p_ptt = ecore_ptt_acquire(p_hwfn);
1678         if (!p_ptt)
1679                 return ECORE_BUSY;
1680
1681         *p_media_type = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1682                                  OFFSETOF(struct public_port, media_type));
1683
1684         ecore_ptt_release(p_hwfn, p_ptt);
1685
1686         return ECORE_SUCCESS;
1687 }
1688
1689 /* @DPDK */
1690 /* Old MFW has a global configuration for all PFs regarding RDMA support */
1691 static void
1692 ecore_mcp_get_shmem_proto_legacy(struct ecore_hwfn *p_hwfn,
1693                                  enum ecore_pci_personality *p_proto)
1694 {
1695         *p_proto = ECORE_PCI_ETH;
1696
1697         DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
1698                    "According to Legacy capabilities, L2 personality is %08x\n",
1699                    (u32)*p_proto);
1700 }
1701
1702 /* @DPDK */
1703 static enum _ecore_status_t
1704 ecore_mcp_get_shmem_proto_mfw(struct ecore_hwfn *p_hwfn,
1705                               struct ecore_ptt *p_ptt,
1706                               enum ecore_pci_personality *p_proto)
1707 {
1708         u32 resp = 0, param = 0;
1709         enum _ecore_status_t rc;
1710
1711         DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
1712                    "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
1713                    (u32)*p_proto, resp, param);
1714         return ECORE_SUCCESS;
1715 }
1716
1717 static enum _ecore_status_t
1718 ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn,
1719                           struct public_func *p_info,
1720                           struct ecore_ptt *p_ptt,
1721                           enum ecore_pci_personality *p_proto)
1722 {
1723         enum _ecore_status_t rc = ECORE_SUCCESS;
1724
1725         switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
1726         case FUNC_MF_CFG_PROTOCOL_ETHERNET:
1727                 if (ecore_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto) !=
1728                     ECORE_SUCCESS)
1729                         ecore_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
1730                 break;
1731         default:
1732                 rc = ECORE_INVAL;
1733         }
1734
1735         return rc;
1736 }
1737
1738 enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
1739                                                     struct ecore_ptt *p_ptt)
1740 {
1741         struct ecore_mcp_function_info *info;
1742         struct public_func shmem_info;
1743
1744         ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1745         info = &p_hwfn->mcp_info->func_info;
1746
1747         info->pause_on_host = (shmem_info.config &
1748                                FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
1749
1750         if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
1751                                       &info->protocol)) {
1752                 DP_ERR(p_hwfn, "Unknown personality %08x\n",
1753                        (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
1754                 return ECORE_INVAL;
1755         }
1756
1757         ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1758
1759         if (shmem_info.mac_upper || shmem_info.mac_lower) {
1760                 info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
1761                 info->mac[1] = (u8)(shmem_info.mac_upper);
1762                 info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
1763                 info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
1764                 info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
1765                 info->mac[5] = (u8)(shmem_info.mac_lower);
1766         } else {
1767                 /* TODO - are there protocols for which there's no MAC? */
1768                 DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n");
1769         }
1770
1771         /* TODO - are these calculations true for BE machine? */
1772         info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
1773                          (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
1774         info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
1775                          (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
1776
1777         info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
1778
1779         info->mtu = (u16)shmem_info.mtu_size;
1780
1781         if (info->mtu == 0)
1782                 info->mtu = 1500;
1783
1784         info->mtu = (u16)shmem_info.mtu_size;
1785
1786         DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP),
1787                    "Read configuration from shmem: pause_on_host %02x"
1788                     " protocol %02x BW [%02x - %02x]"
1789                     " MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %lx"
1790                     " node %lx ovlan %04x\n",
1791                    info->pause_on_host, info->protocol,
1792                    info->bandwidth_min, info->bandwidth_max,
1793                    info->mac[0], info->mac[1], info->mac[2],
1794                    info->mac[3], info->mac[4], info->mac[5],
1795                    (unsigned long)info->wwn_port,
1796                    (unsigned long)info->wwn_node, info->ovlan);
1797
1798         return ECORE_SUCCESS;
1799 }
1800
1801 struct ecore_mcp_link_params
1802 *ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn)
1803 {
1804         if (!p_hwfn || !p_hwfn->mcp_info)
1805                 return OSAL_NULL;
1806         return &p_hwfn->mcp_info->link_input;
1807 }
1808
1809 struct ecore_mcp_link_state
1810 *ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn)
1811 {
1812         if (!p_hwfn || !p_hwfn->mcp_info)
1813                 return OSAL_NULL;
1814
1815 #ifndef ASIC_ONLY
1816         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
1817                 DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n");
1818                 p_hwfn->mcp_info->link_output.link_up = true;
1819         }
1820 #endif
1821
1822         return &p_hwfn->mcp_info->link_output;
1823 }
1824
1825 struct ecore_mcp_link_capabilities
1826 *ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn)
1827 {
1828         if (!p_hwfn || !p_hwfn->mcp_info)
1829                 return OSAL_NULL;
1830         return &p_hwfn->mcp_info->link_capabilities;
1831 }
1832
1833 enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
1834                                      struct ecore_ptt *p_ptt)
1835 {
1836         u32 resp = 0, param = 0;
1837         enum _ecore_status_t rc;
1838
1839         rc = ecore_mcp_cmd(p_hwfn, p_ptt,
1840                            DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, &param);
1841
1842         /* Wait for the drain to complete before returning */
1843         OSAL_MSLEEP(1020);
1844
1845         return rc;
1846 }
1847
1848 const struct ecore_mcp_function_info
1849 *ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn)
1850 {
1851         if (!p_hwfn || !p_hwfn->mcp_info)
1852                 return OSAL_NULL;
1853         return &p_hwfn->mcp_info->func_info;
1854 }
1855
1856 enum _ecore_status_t ecore_mcp_nvm_command(struct ecore_hwfn *p_hwfn,
1857                                            struct ecore_ptt *p_ptt,
1858                                            struct ecore_mcp_nvm_params *params)
1859 {
1860         enum _ecore_status_t rc;
1861
1862         switch (params->type) {
1863         case ECORE_MCP_NVM_RD:
1864                 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1865                                           params->nvm_common.offset,
1866                                           &params->nvm_common.resp,
1867                                           &params->nvm_common.param,
1868                                           params->nvm_rd.buf_size,
1869                                           params->nvm_rd.buf);
1870                 break;
1871         case ECORE_MCP_CMD:
1872                 rc = ecore_mcp_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1873                                    params->nvm_common.offset,
1874                                    &params->nvm_common.resp,
1875                                    &params->nvm_common.param);
1876                 break;
1877         case ECORE_MCP_NVM_WR:
1878                 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1879                                           params->nvm_common.offset,
1880                                           &params->nvm_common.resp,
1881                                           &params->nvm_common.param,
1882                                           params->nvm_wr.buf_size,
1883                                           params->nvm_wr.buf);
1884                 break;
1885         default:
1886                 rc = ECORE_NOTIMPL;
1887                 break;
1888         }
1889         return rc;
1890 }
1891
1892 int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
1893                                   struct ecore_ptt *p_ptt, u32 personalities)
1894 {
1895         enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT;
1896         struct public_func shmem_info;
1897         int i, count = 0, num_pfs;
1898
1899         num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
1900
1901         for (i = 0; i < num_pfs; i++) {
1902                 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1903                                          MCP_PF_ID_BY_REL(p_hwfn, i));
1904                 if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
1905                         continue;
1906
1907                 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
1908                                               &protocol) !=
1909                     ECORE_SUCCESS)
1910                         continue;
1911
1912                 if ((1 << ((u32)protocol)) & personalities)
1913                         count++;
1914         }
1915
1916         return count;
1917 }
1918
1919 enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
1920                                               struct ecore_ptt *p_ptt,
1921                                               u32 *p_flash_size)
1922 {
1923         u32 flash_size;
1924
1925 #ifndef ASIC_ONLY
1926         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1927                 DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n");
1928                 return ECORE_INVAL;
1929         }
1930 #endif
1931
1932         if (IS_VF(p_hwfn->p_dev))
1933                 return ECORE_INVAL;
1934
1935         flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
1936         flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
1937             MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
1938         flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
1939
1940         *p_flash_size = flash_size;
1941
1942         return ECORE_SUCCESS;
1943 }
1944
1945 enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
1946                                                   struct ecore_ptt *p_ptt)
1947 {
1948         struct ecore_dev *p_dev = p_hwfn->p_dev;
1949
1950         if (p_dev->recov_in_prog) {
1951                 DP_NOTICE(p_hwfn, false,
1952                           "Avoid triggering a recovery since such a process"
1953                           " is already in progress\n");
1954                 return ECORE_AGAIN;
1955         }
1956
1957         DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n");
1958         ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
1959
1960         return ECORE_SUCCESS;
1961 }
1962
1963 enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
1964                                               struct ecore_ptt *p_ptt,
1965                                               u8 vf_id, u8 num)
1966 {
1967         u32 resp = 0, param = 0, rc_param = 0;
1968         enum _ecore_status_t rc;
1969
1970 /* Only Leader can configure MSIX, and need to take CMT into account */
1971
1972         if (!IS_LEAD_HWFN(p_hwfn))
1973                 return ECORE_SUCCESS;
1974         num *= p_hwfn->p_dev->num_hwfns;
1975
1976         param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
1977             DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
1978         param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
1979             DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
1980
1981         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
1982                            &resp, &rc_param);
1983
1984         if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
1985                 DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n",
1986                           vf_id);
1987                 rc = ECORE_INVAL;
1988         } else {
1989                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1990                            "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
1991                             num, vf_id);
1992         }
1993
1994         return rc;
1995 }
1996
1997 enum _ecore_status_t
1998 ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1999                            struct ecore_mcp_drv_version *p_ver)
2000 {
2001         struct drv_version_stc *p_drv_version;
2002         struct ecore_mcp_mb_params mb_params;
2003         union drv_union_data union_data;
2004         u32 num_words, i;
2005         void *p_name;
2006         OSAL_BE32 val;
2007         enum _ecore_status_t rc;
2008
2009 #ifndef ASIC_ONLY
2010         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
2011                 return ECORE_SUCCESS;
2012 #endif
2013
2014         p_drv_version = &union_data.drv_version;
2015         p_drv_version->version = p_ver->version;
2016         num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4;
2017         for (i = 0; i < num_words; i++) {
2018                 /* The driver name is expected to be in a big-endian format */
2019                 p_name = &p_ver->name[i * sizeof(u32)];
2020                 val = OSAL_CPU_TO_BE32(*(u32 *)p_name);
2021                 *(u32 *)&p_drv_version->name[i * sizeof(u32)] = val;
2022         }
2023
2024         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2025         mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
2026         mb_params.p_data_src = &union_data;
2027         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2028         if (rc != ECORE_SUCCESS)
2029                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2030
2031         return rc;
2032 }
2033
2034 enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
2035                                     struct ecore_ptt *p_ptt)
2036 {
2037         enum _ecore_status_t rc;
2038         u32 resp = 0, param = 0;
2039
2040         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2041                            &param);
2042         if (rc != ECORE_SUCCESS)
2043                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2044
2045         return rc;
2046 }
2047
2048 enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
2049                                       struct ecore_ptt *p_ptt)
2050 {
2051         u32 value, cpu_mode;
2052
2053         ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
2054
2055         value = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2056         value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2057         ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
2058         cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2059
2060         return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -1 : 0;
2061 }
2062
2063 enum _ecore_status_t
2064 ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
2065                                    struct ecore_ptt *p_ptt,
2066                                    enum ecore_ov_client client)
2067 {
2068         enum _ecore_status_t rc;
2069         u32 resp = 0, param = 0;
2070         u32 drv_mb_param;
2071
2072         switch (client) {
2073         case ECORE_OV_CLIENT_DRV:
2074                 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
2075                 break;
2076         case ECORE_OV_CLIENT_USER:
2077                 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
2078                 break;
2079         case ECORE_OV_CLIENT_VENDOR_SPEC:
2080                 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
2081                 break;
2082         default:
2083                 DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", client);
2084                 return ECORE_INVAL;
2085         }
2086
2087         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
2088                            drv_mb_param, &resp, &param);
2089         if (rc != ECORE_SUCCESS)
2090                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2091
2092         return rc;
2093 }
2094
2095 enum _ecore_status_t
2096 ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
2097                                  struct ecore_ptt *p_ptt,
2098                                  enum ecore_ov_driver_state drv_state)
2099 {
2100         enum _ecore_status_t rc;
2101         u32 resp = 0, param = 0;
2102         u32 drv_mb_param;
2103
2104         switch (drv_state) {
2105         case ECORE_OV_DRIVER_STATE_NOT_LOADED:
2106                 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
2107                 break;
2108         case ECORE_OV_DRIVER_STATE_DISABLED:
2109                 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
2110                 break;
2111         case ECORE_OV_DRIVER_STATE_ACTIVE:
2112                 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
2113                 break;
2114         default:
2115                 DP_NOTICE(p_hwfn, true, "Invalid driver state %d\n", drv_state);
2116                 return ECORE_INVAL;
2117         }
2118
2119         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
2120                            drv_mb_param, &resp, &param);
2121         if (rc != ECORE_SUCCESS)
2122                 DP_ERR(p_hwfn, "Failed to send driver state\n");
2123
2124         return rc;
2125 }
2126
2127 enum _ecore_status_t
2128 ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2129                          struct ecore_fc_npiv_tbl *p_table)
2130 {
2131         return 0;
2132 }
2133
2134 enum _ecore_status_t
2135 ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn,
2136                         struct ecore_ptt *p_ptt, u16 mtu)
2137 {
2138         return 0;
2139 }
2140
2141 enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
2142                                        struct ecore_ptt *p_ptt,
2143                                        enum ecore_led_mode mode)
2144 {
2145         u32 resp = 0, param = 0, drv_mb_param;
2146         enum _ecore_status_t rc;
2147
2148         switch (mode) {
2149         case ECORE_LED_MODE_ON:
2150                 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
2151                 break;
2152         case ECORE_LED_MODE_OFF:
2153                 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
2154                 break;
2155         case ECORE_LED_MODE_RESTORE:
2156                 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
2157                 break;
2158         default:
2159                 DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode);
2160                 return ECORE_INVAL;
2161         }
2162
2163         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
2164                            drv_mb_param, &resp, &param);
2165         if (rc != ECORE_SUCCESS)
2166                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2167
2168         return rc;
2169 }
2170
2171 enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
2172                                              struct ecore_ptt *p_ptt,
2173                                              u32 mask_parities)
2174 {
2175         enum _ecore_status_t rc;
2176         u32 resp = 0, param = 0;
2177
2178         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
2179                            mask_parities, &resp, &param);
2180
2181         if (rc != ECORE_SUCCESS) {
2182                 DP_ERR(p_hwfn,
2183                        "MCP response failure for mask parities, aborting\n");
2184         } else if (resp != FW_MSG_CODE_OK) {
2185                 DP_ERR(p_hwfn,
2186                        "MCP did not ack mask parity request. Old MFW?\n");
2187                 rc = ECORE_INVAL;
2188         }
2189
2190         return rc;
2191 }
2192
2193 enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
2194                                         u8 *p_buf, u32 len)
2195 {
2196         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2197         u32 bytes_left, offset, bytes_to_copy, buf_size;
2198         struct ecore_mcp_nvm_params params;
2199         struct ecore_ptt *p_ptt;
2200         enum _ecore_status_t rc = ECORE_SUCCESS;
2201
2202         p_ptt = ecore_ptt_acquire(p_hwfn);
2203         if (!p_ptt)
2204                 return ECORE_BUSY;
2205
2206         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2207         bytes_left = len;
2208         offset = 0;
2209         params.type = ECORE_MCP_NVM_RD;
2210         params.nvm_rd.buf_size = &buf_size;
2211         params.nvm_common.cmd = DRV_MSG_CODE_NVM_READ_NVRAM;
2212         while (bytes_left > 0) {
2213                 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
2214                                            MCP_DRV_NVM_BUF_LEN);
2215                 params.nvm_common.offset = (addr + offset) |
2216                     (bytes_to_copy << DRV_MB_PARAM_NVM_LEN_SHIFT);
2217                 params.nvm_rd.buf = (u32 *)(p_buf + offset);
2218                 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2219                 if (rc != ECORE_SUCCESS || (params.nvm_common.resp !=
2220                                             FW_MSG_CODE_NVM_OK)) {
2221                         DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2222                         break;
2223                 }
2224
2225                 /* This can be a lengthy process, and it's possible scheduler
2226                  * isn't preemptible. Sleep a bit to prevent CPU hogging.
2227                  */
2228                 if (bytes_left % 0x1000 <
2229                     (bytes_left - *params.nvm_rd.buf_size) % 0x1000)
2230                         OSAL_MSLEEP(1);
2231
2232                 offset += *params.nvm_rd.buf_size;
2233                 bytes_left -= *params.nvm_rd.buf_size;
2234         }
2235
2236         p_dev->mcp_nvm_resp = params.nvm_common.resp;
2237         ecore_ptt_release(p_hwfn, p_ptt);
2238
2239         return rc;
2240 }
2241
2242 enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
2243                                         u32 addr, u8 *p_buf, u32 len)
2244 {
2245         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2246         struct ecore_mcp_nvm_params params;
2247         struct ecore_ptt *p_ptt;
2248         enum _ecore_status_t rc;
2249
2250         p_ptt = ecore_ptt_acquire(p_hwfn);
2251         if (!p_ptt)
2252                 return ECORE_BUSY;
2253
2254         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2255         params.type = ECORE_MCP_NVM_RD;
2256         params.nvm_rd.buf_size = &len;
2257         params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_READ) ?
2258             DRV_MSG_CODE_PHY_CORE_READ : DRV_MSG_CODE_PHY_RAW_READ;
2259         params.nvm_common.offset = addr;
2260         params.nvm_rd.buf = (u32 *)p_buf;
2261         rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2262         if (rc != ECORE_SUCCESS)
2263                 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2264
2265         p_dev->mcp_nvm_resp = params.nvm_common.resp;
2266         ecore_ptt_release(p_hwfn, p_ptt);
2267
2268         return rc;
2269 }
2270
2271 enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
2272 {
2273         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2274         struct ecore_mcp_nvm_params params;
2275         struct ecore_ptt *p_ptt;
2276
2277         p_ptt = ecore_ptt_acquire(p_hwfn);
2278         if (!p_ptt)
2279                 return ECORE_BUSY;
2280
2281         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2282         OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp));
2283         ecore_ptt_release(p_hwfn, p_ptt);
2284
2285         return ECORE_SUCCESS;
2286 }
2287
2288 enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr)
2289 {
2290         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2291         struct ecore_mcp_nvm_params params;
2292         struct ecore_ptt *p_ptt;
2293         enum _ecore_status_t rc;
2294
2295         p_ptt = ecore_ptt_acquire(p_hwfn);
2296         if (!p_ptt)
2297                 return ECORE_BUSY;
2298         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2299         params.type = ECORE_MCP_CMD;
2300         params.nvm_common.cmd = DRV_MSG_CODE_NVM_DEL_FILE;
2301         params.nvm_common.offset = addr;
2302         rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2303         p_dev->mcp_nvm_resp = params.nvm_common.resp;
2304         ecore_ptt_release(p_hwfn, p_ptt);
2305
2306         return rc;
2307 }
2308
2309 enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
2310                                                   u32 addr)
2311 {
2312         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2313         struct ecore_mcp_nvm_params params;
2314         struct ecore_ptt *p_ptt;
2315         enum _ecore_status_t rc;
2316
2317         p_ptt = ecore_ptt_acquire(p_hwfn);
2318         if (!p_ptt)
2319                 return ECORE_BUSY;
2320         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2321         params.type = ECORE_MCP_CMD;
2322         params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN;
2323         params.nvm_common.offset = addr;
2324         rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2325         p_dev->mcp_nvm_resp = params.nvm_common.resp;
2326         ecore_ptt_release(p_hwfn, p_ptt);
2327
2328         return rc;
2329 }
2330
2331 /* rc receives ECORE_INVAL as default parameter because
2332  * it might not enter the while loop if the len is 0
2333  */
2334 enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
2335                                          u32 addr, u8 *p_buf, u32 len)
2336 {
2337         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2338         enum _ecore_status_t rc = ECORE_INVAL;
2339         struct ecore_mcp_nvm_params params;
2340         struct ecore_ptt *p_ptt;
2341         u32 buf_idx, buf_size;
2342
2343         p_ptt = ecore_ptt_acquire(p_hwfn);
2344         if (!p_ptt)
2345                 return ECORE_BUSY;
2346
2347         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2348         params.type = ECORE_MCP_NVM_WR;
2349         if (cmd == ECORE_PUT_FILE_DATA)
2350                 params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
2351         else
2352                 params.nvm_common.cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
2353         buf_idx = 0;
2354         while (buf_idx < len) {
2355                 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
2356                                       MCP_DRV_NVM_BUF_LEN);
2357                 params.nvm_common.offset = ((buf_size <<
2358                                              DRV_MB_PARAM_NVM_LEN_SHIFT)
2359                                             | addr) + buf_idx;
2360                 params.nvm_wr.buf_size = buf_size;
2361                 params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
2362                 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2363                 if (rc != ECORE_SUCCESS ||
2364                     ((params.nvm_common.resp != FW_MSG_CODE_NVM_OK) &&
2365                      (params.nvm_common.resp !=
2366                       FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK)))
2367                         DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2368
2369                 /* This can be a lengthy process, and it's possible scheduler
2370                  * isn't preemptible. Sleep a bit to prevent CPU hogging.
2371                  */
2372                 if (buf_idx % 0x1000 >
2373                     (buf_idx + buf_size) % 0x1000)
2374                         OSAL_MSLEEP(1);
2375
2376                 buf_idx += buf_size;
2377         }
2378
2379         p_dev->mcp_nvm_resp = params.nvm_common.resp;
2380         ecore_ptt_release(p_hwfn, p_ptt);
2381
2382         return rc;
2383 }
2384
2385 enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
2386                                          u32 addr, u8 *p_buf, u32 len)
2387 {
2388         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2389         struct ecore_mcp_nvm_params params;
2390         struct ecore_ptt *p_ptt;
2391         enum _ecore_status_t rc;
2392
2393         p_ptt = ecore_ptt_acquire(p_hwfn);
2394         if (!p_ptt)
2395                 return ECORE_BUSY;
2396
2397         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2398         params.type = ECORE_MCP_NVM_WR;
2399         params.nvm_wr.buf_size = len;
2400         params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_WRITE) ?
2401             DRV_MSG_CODE_PHY_CORE_WRITE : DRV_MSG_CODE_PHY_RAW_WRITE;
2402         params.nvm_common.offset = addr;
2403         params.nvm_wr.buf = (u32 *)p_buf;
2404         rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2405         if (rc != ECORE_SUCCESS)
2406                 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2407         p_dev->mcp_nvm_resp = params.nvm_common.resp;
2408         ecore_ptt_release(p_hwfn, p_ptt);
2409
2410         return rc;
2411 }
2412
2413 enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
2414                                                    u32 addr)
2415 {
2416         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2417         struct ecore_mcp_nvm_params params;
2418         struct ecore_ptt *p_ptt;
2419         enum _ecore_status_t rc;
2420
2421         p_ptt = ecore_ptt_acquire(p_hwfn);
2422         if (!p_ptt)
2423                 return ECORE_BUSY;
2424
2425         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2426         params.type = ECORE_MCP_CMD;
2427         params.nvm_common.cmd = DRV_MSG_CODE_SET_SECURE_MODE;
2428         params.nvm_common.offset = addr;
2429         rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2430         p_dev->mcp_nvm_resp = params.nvm_common.resp;
2431         ecore_ptt_release(p_hwfn, p_ptt);
2432
2433         return rc;
2434 }
2435
2436 enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
2437                                             struct ecore_ptt *p_ptt,
2438                                             u32 port, u32 addr, u32 offset,
2439                                             u32 len, u8 *p_buf)
2440 {
2441         struct ecore_mcp_nvm_params params;
2442         enum _ecore_status_t rc;
2443         u32 bytes_left, bytes_to_copy, buf_size;
2444
2445         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2446         params.nvm_common.offset =
2447                 (port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) |
2448                 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT);
2449         addr = offset;
2450         offset = 0;
2451         bytes_left = len;
2452         params.type = ECORE_MCP_NVM_RD;
2453         params.nvm_rd.buf_size = &buf_size;
2454         params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_READ;
2455         while (bytes_left > 0) {
2456                 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
2457                                            MAX_I2C_TRANSACTION_SIZE);
2458                 params.nvm_rd.buf = (u32 *)(p_buf + offset);
2459                 params.nvm_common.offset &=
2460                         (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
2461                          DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
2462                 params.nvm_common.offset |=
2463                         ((addr + offset) <<
2464                          DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT);
2465                 params.nvm_common.offset |=
2466                         (bytes_to_copy << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT);
2467                 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2468                 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
2469                     FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
2470                         return ECORE_NODEV;
2471                 } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
2472                            FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
2473                         return ECORE_UNKNOWN_ERROR;
2474
2475                 offset += *params.nvm_rd.buf_size;
2476                 bytes_left -= *params.nvm_rd.buf_size;
2477         }
2478
2479         return ECORE_SUCCESS;
2480 }
2481
2482 enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
2483                                              struct ecore_ptt *p_ptt,
2484                                              u32 port, u32 addr, u32 offset,
2485                                              u32 len, u8 *p_buf)
2486 {
2487         struct ecore_mcp_nvm_params params;
2488         enum _ecore_status_t rc;
2489         u32 buf_idx, buf_size;
2490
2491         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2492         params.nvm_common.offset =
2493                 (port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) |
2494                 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT);
2495         params.type = ECORE_MCP_NVM_WR;
2496         params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_WRITE;
2497         buf_idx = 0;
2498         while (buf_idx < len) {
2499                 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
2500                                       MAX_I2C_TRANSACTION_SIZE);
2501                 params.nvm_common.offset &=
2502                         (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
2503                          DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
2504                 params.nvm_common.offset |=
2505                         ((offset + buf_idx) <<
2506                          DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT);
2507                 params.nvm_common.offset |=
2508                         (buf_size << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT);
2509                 params.nvm_wr.buf_size = buf_size;
2510                 params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
2511                 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2512                 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
2513                     FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
2514                         return ECORE_NODEV;
2515                 } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
2516                            FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
2517                         return ECORE_UNKNOWN_ERROR;
2518
2519                 buf_idx += buf_size;
2520         }
2521
2522         return ECORE_SUCCESS;
2523 }
2524
2525 enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
2526                                          struct ecore_ptt *p_ptt,
2527                                          u16 gpio, u32 *gpio_val)
2528 {
2529         enum _ecore_status_t rc = ECORE_SUCCESS;
2530         u32 drv_mb_param = 0, rsp;
2531
2532         drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT);
2533
2534         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ,
2535                            drv_mb_param, &rsp, gpio_val);
2536
2537         if (rc != ECORE_SUCCESS)
2538                 return rc;
2539
2540         if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
2541                 return ECORE_UNKNOWN_ERROR;
2542
2543         return ECORE_SUCCESS;
2544 }
2545
2546 enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
2547                                           struct ecore_ptt *p_ptt,
2548                                           u16 gpio, u16 gpio_val)
2549 {
2550         enum _ecore_status_t rc = ECORE_SUCCESS;
2551         u32 drv_mb_param = 0, param, rsp;
2552
2553         drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT) |
2554                 (gpio_val << DRV_MB_PARAM_GPIO_VALUE_SHIFT);
2555
2556         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE,
2557                            drv_mb_param, &rsp, &param);
2558
2559         if (rc != ECORE_SUCCESS)
2560                 return rc;
2561
2562         if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
2563                 return ECORE_UNKNOWN_ERROR;
2564
2565         return ECORE_SUCCESS;
2566 }
2567
2568 enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn,
2569                                          struct ecore_ptt *p_ptt,
2570                                          u16 gpio, u32 *gpio_direction,
2571                                          u32 *gpio_ctrl)
2572 {
2573         u32 drv_mb_param = 0, rsp, val = 0;
2574         enum _ecore_status_t rc = ECORE_SUCCESS;
2575
2576         drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT;
2577
2578         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO,
2579                            drv_mb_param, &rsp, &val);
2580         if (rc != ECORE_SUCCESS)
2581                 return rc;
2582
2583         *gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >>
2584                            DRV_MB_PARAM_GPIO_DIRECTION_SHIFT;
2585         *gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >>
2586                       DRV_MB_PARAM_GPIO_CTRL_SHIFT;
2587
2588         if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
2589                 return ECORE_UNKNOWN_ERROR;
2590
2591         return ECORE_SUCCESS;
2592 }
2593
2594 enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn,
2595                                                   struct ecore_ptt *p_ptt)
2596 {
2597         u32 drv_mb_param = 0, rsp, param;
2598         enum _ecore_status_t rc = ECORE_SUCCESS;
2599
2600         drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
2601                         DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2602
2603         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2604                            drv_mb_param, &rsp, &param);
2605
2606         if (rc != ECORE_SUCCESS)
2607                 return rc;
2608
2609         if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2610             (param != DRV_MB_PARAM_BIST_RC_PASSED))
2611                 rc = ECORE_UNKNOWN_ERROR;
2612
2613         return rc;
2614 }
2615
2616 enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn,
2617                                                struct ecore_ptt *p_ptt)
2618 {
2619         u32 drv_mb_param, rsp, param;
2620         enum _ecore_status_t rc = ECORE_SUCCESS;
2621
2622         drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
2623                         DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2624
2625         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2626                            drv_mb_param, &rsp, &param);
2627
2628         if (rc != ECORE_SUCCESS)
2629                 return rc;
2630
2631         if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2632             (param != DRV_MB_PARAM_BIST_RC_PASSED))
2633                 rc = ECORE_UNKNOWN_ERROR;
2634
2635         return rc;
2636 }
2637
2638 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images(
2639         struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *num_images)
2640 {
2641         u32 drv_mb_param = 0, rsp;
2642         enum _ecore_status_t rc = ECORE_SUCCESS;
2643
2644         drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
2645                         DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2646
2647         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2648                            drv_mb_param, &rsp, num_images);
2649
2650         if (rc != ECORE_SUCCESS)
2651                 return rc;
2652
2653         if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
2654                 rc = ECORE_UNKNOWN_ERROR;
2655
2656         return rc;
2657 }
2658
2659 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att(
2660         struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2661         struct bist_nvm_image_att *p_image_att, u32 image_index)
2662 {
2663         struct ecore_mcp_nvm_params params;
2664         enum _ecore_status_t rc;
2665         u32 buf_size;
2666
2667         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2668         params.nvm_common.offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
2669                                     DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2670         params.nvm_common.offset |= (image_index <<
2671                                     DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT);
2672
2673         params.type = ECORE_MCP_NVM_RD;
2674         params.nvm_rd.buf_size = &buf_size;
2675         params.nvm_common.cmd = DRV_MSG_CODE_BIST_TEST;
2676         params.nvm_rd.buf = (u32 *)p_image_att;
2677
2678         rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2679         if (rc != ECORE_SUCCESS)
2680                 return rc;
2681
2682         if (((params.nvm_common.resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2683             (p_image_att->return_code != 1))
2684                 rc = ECORE_UNKNOWN_ERROR;
2685
2686         return rc;
2687 }
2688
2689 enum _ecore_status_t
2690 ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn,
2691                                struct ecore_ptt *p_ptt,
2692                                struct ecore_temperature_info *p_temp_info)
2693 {
2694         struct ecore_temperature_sensor *p_temp_sensor;
2695         struct temperature_status_stc *p_mfw_temp_info;
2696         struct ecore_mcp_mb_params mb_params;
2697         union drv_union_data union_data;
2698         u32 val;
2699         enum _ecore_status_t rc;
2700         u8 i;
2701
2702         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2703         mb_params.cmd = DRV_MSG_CODE_GET_TEMPERATURE;
2704         mb_params.p_data_dst = &union_data;
2705         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2706         if (rc != ECORE_SUCCESS)
2707                 return rc;
2708
2709         p_mfw_temp_info = &union_data.temp_info;
2710
2711         OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS != MAX_NUM_OF_SENSORS);
2712         p_temp_info->num_sensors = OSAL_MIN_T(u32,
2713                                               p_mfw_temp_info->num_of_sensors,
2714                                               ECORE_MAX_NUM_OF_SENSORS);
2715         for (i = 0; i < p_temp_info->num_sensors; i++) {
2716                 val = p_mfw_temp_info->sensor[i];
2717                 p_temp_sensor = &p_temp_info->sensors[i];
2718                 p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >>
2719                                                  SENSOR_LOCATION_SHIFT;
2720                 p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >>
2721                                                 THRESHOLD_HIGH_SHIFT;
2722                 p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >>
2723                                           CRITICAL_TEMPERATURE_SHIFT;
2724                 p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >>
2725                                               CURRENT_TEMP_SHIFT;
2726         }
2727
2728         return ECORE_SUCCESS;
2729 }
2730
2731 enum _ecore_status_t ecore_mcp_get_mba_versions(
2732         struct ecore_hwfn *p_hwfn,
2733         struct ecore_ptt *p_ptt,
2734         struct ecore_mba_vers *p_mba_vers)
2735 {
2736         struct ecore_mcp_nvm_params params;
2737         enum _ecore_status_t rc;
2738         u32 buf_size;
2739
2740         OSAL_MEM_ZERO(&params, sizeof(params));
2741         params.type = ECORE_MCP_NVM_RD;
2742         params.nvm_common.cmd = DRV_MSG_CODE_GET_MBA_VERSION;
2743         params.nvm_common.offset = 0;
2744         params.nvm_rd.buf = &p_mba_vers->mba_vers[0];
2745         params.nvm_rd.buf_size = &buf_size;
2746         rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2747
2748         if (rc != ECORE_SUCCESS)
2749                 return rc;
2750
2751         if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
2752             FW_MSG_CODE_NVM_OK)
2753                 rc = ECORE_UNKNOWN_ERROR;
2754
2755         if (buf_size != MCP_DRV_NVM_BUF_LEN)
2756                 rc = ECORE_UNKNOWN_ERROR;
2757
2758         return rc;
2759 }
2760
2761 enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
2762                                               struct ecore_ptt *p_ptt,
2763                                               u64 *num_events)
2764 {
2765         u32 rsp;
2766
2767         return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MEM_ECC_EVENTS,
2768                              0, &rsp, (u32 *)num_events);
2769 }
2770
2771 static enum resource_id_enum
2772 ecore_mcp_get_mfw_res_id(enum ecore_resources res_id)
2773 {
2774         enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
2775
2776         switch (res_id) {
2777         case ECORE_SB:
2778                 mfw_res_id = RESOURCE_NUM_SB_E;
2779                 break;
2780         case ECORE_L2_QUEUE:
2781                 mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
2782                 break;
2783         case ECORE_VPORT:
2784                 mfw_res_id = RESOURCE_NUM_VPORT_E;
2785                 break;
2786         case ECORE_RSS_ENG:
2787                 mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
2788                 break;
2789         case ECORE_PQ:
2790                 mfw_res_id = RESOURCE_NUM_PQ_E;
2791                 break;
2792         case ECORE_RL:
2793                 mfw_res_id = RESOURCE_NUM_RL_E;
2794                 break;
2795         case ECORE_MAC:
2796         case ECORE_VLAN:
2797                 /* Each VFC resource can accommodate both a MAC and a VLAN */
2798                 mfw_res_id = RESOURCE_VFC_FILTER_E;
2799                 break;
2800         case ECORE_ILT:
2801                 mfw_res_id = RESOURCE_ILT_E;
2802                 break;
2803         case ECORE_LL2_QUEUE:
2804                 mfw_res_id = RESOURCE_LL2_QUEUE_E;
2805                 break;
2806         case ECORE_RDMA_CNQ_RAM:
2807         case ECORE_CMDQS_CQS:
2808                 /* CNQ/CMDQS are the same resource */
2809                 mfw_res_id = RESOURCE_CQS_E;
2810                 break;
2811         case ECORE_RDMA_STATS_QUEUE:
2812                 mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
2813                 break;
2814         case ECORE_BDQ:
2815                 mfw_res_id = RESOURCE_BDQ_E;
2816                 break;
2817         default:
2818                 break;
2819         }
2820
2821         return mfw_res_id;
2822 }
2823
2824 #define ECORE_RESC_ALLOC_VERSION_MAJOR  2
2825 #define ECORE_RESC_ALLOC_VERSION_MINOR  0
2826 #define ECORE_RESC_ALLOC_VERSION                                \
2827         ((ECORE_RESC_ALLOC_VERSION_MAJOR <<                     \
2828           DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) |    \
2829          (ECORE_RESC_ALLOC_VERSION_MINOR <<                     \
2830           DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
2831
2832 struct ecore_resc_alloc_in_params {
2833         u32 cmd;
2834         enum ecore_resources res_id;
2835         u32 resc_max_val;
2836 };
2837
2838 struct ecore_resc_alloc_out_params {
2839         u32 mcp_resp;
2840         u32 mcp_param;
2841         u32 resc_num;
2842         u32 resc_start;
2843         u32 vf_resc_num;
2844         u32 vf_resc_start;
2845         u32 flags;
2846 };
2847
2848 static enum _ecore_status_t
2849 ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn,
2850                               struct ecore_ptt *p_ptt,
2851                               struct ecore_resc_alloc_in_params *p_in_params,
2852                               struct ecore_resc_alloc_out_params *p_out_params)
2853 {
2854         struct resource_info *p_mfw_resc_info;
2855         struct ecore_mcp_mb_params mb_params;
2856         union drv_union_data union_data;
2857         enum _ecore_status_t rc;
2858
2859         p_mfw_resc_info = &union_data.resource;
2860         OSAL_MEM_ZERO(p_mfw_resc_info, sizeof(*p_mfw_resc_info));
2861
2862         p_mfw_resc_info->res_id = ecore_mcp_get_mfw_res_id(p_in_params->res_id);
2863         if (p_mfw_resc_info->res_id == RESOURCE_NUM_INVALID) {
2864                 DP_ERR(p_hwfn,
2865                        "Failed to match resource %d [%s] with the MFW resources\n",
2866                        p_in_params->res_id,
2867                        ecore_hw_get_resc_name(p_in_params->res_id));
2868                 return ECORE_INVAL;
2869         }
2870
2871         switch (p_in_params->cmd) {
2872         case DRV_MSG_SET_RESOURCE_VALUE_MSG:
2873                 p_mfw_resc_info->size = p_in_params->resc_max_val;
2874                 /* Fallthrough */
2875         case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
2876                 break;
2877         default:
2878                 DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
2879                        p_in_params->cmd);
2880                 return ECORE_INVAL;
2881         }
2882
2883         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2884         mb_params.cmd = p_in_params->cmd;
2885         mb_params.param = ECORE_RESC_ALLOC_VERSION;
2886         mb_params.p_data_src = &union_data;
2887         mb_params.p_data_dst = &union_data;
2888
2889         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2890                    "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
2891                    p_in_params->cmd, p_in_params->res_id,
2892                    ecore_hw_get_resc_name(p_in_params->res_id),
2893                    ECORE_MFW_GET_FIELD(mb_params.param,
2894                            DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
2895                    ECORE_MFW_GET_FIELD(mb_params.param,
2896                            DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
2897                    p_in_params->resc_max_val);
2898
2899         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2900         if (rc != ECORE_SUCCESS)
2901                 return rc;
2902
2903         p_out_params->mcp_resp = mb_params.mcp_resp;
2904         p_out_params->mcp_param = mb_params.mcp_param;
2905         p_out_params->resc_num = p_mfw_resc_info->size;
2906         p_out_params->resc_start = p_mfw_resc_info->offset;
2907         p_out_params->vf_resc_num = p_mfw_resc_info->vf_size;
2908         p_out_params->vf_resc_start = p_mfw_resc_info->vf_offset;
2909         p_out_params->flags = p_mfw_resc_info->flags;
2910
2911         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2912                    "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
2913                    ECORE_MFW_GET_FIELD(p_out_params->mcp_param,
2914                            FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
2915                    ECORE_MFW_GET_FIELD(p_out_params->mcp_param,
2916                            FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
2917                    p_out_params->resc_num, p_out_params->resc_start,
2918                    p_out_params->vf_resc_num, p_out_params->vf_resc_start,
2919                    p_out_params->flags);
2920
2921         return ECORE_SUCCESS;
2922 }
2923
2924 enum _ecore_status_t
2925 ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2926                            enum ecore_resources res_id, u32 resc_max_val,
2927                            u32 *p_mcp_resp)
2928 {
2929         struct ecore_resc_alloc_out_params out_params;
2930         struct ecore_resc_alloc_in_params in_params;
2931         enum _ecore_status_t rc;
2932
2933         OSAL_MEM_ZERO(&in_params, sizeof(in_params));
2934         in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
2935         in_params.res_id = res_id;
2936         in_params.resc_max_val = resc_max_val;
2937         OSAL_MEM_ZERO(&out_params, sizeof(out_params));
2938         rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
2939                                            &out_params);
2940         if (rc != ECORE_SUCCESS)
2941                 return rc;
2942
2943         *p_mcp_resp = out_params.mcp_resp;
2944
2945         return ECORE_SUCCESS;
2946 }
2947
2948 enum _ecore_status_t
2949 ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2950                         enum ecore_resources res_id, u32 *p_mcp_resp,
2951                         u32 *p_resc_num, u32 *p_resc_start)
2952 {
2953         struct ecore_resc_alloc_out_params out_params;
2954         struct ecore_resc_alloc_in_params in_params;
2955         enum _ecore_status_t rc;
2956
2957         OSAL_MEM_ZERO(&in_params, sizeof(in_params));
2958         in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
2959         in_params.res_id = res_id;
2960         OSAL_MEM_ZERO(&out_params, sizeof(out_params));
2961         rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
2962                                            &out_params);
2963         if (rc != ECORE_SUCCESS)
2964                 return rc;
2965
2966         *p_mcp_resp = out_params.mcp_resp;
2967
2968         if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
2969                 *p_resc_num = out_params.resc_num;
2970                 *p_resc_start = out_params.resc_start;
2971         }
2972
2973         return ECORE_SUCCESS;
2974 }
2975
2976 enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn,
2977                                                struct ecore_ptt *p_ptt)
2978 {
2979         u32 mcp_resp, mcp_param;
2980
2981         return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
2982                              &mcp_resp, &mcp_param);
2983 }
2984
2985 static enum _ecore_status_t ecore_mcp_resource_cmd(struct ecore_hwfn *p_hwfn,
2986                                                    struct ecore_ptt *p_ptt,
2987                                                    u32 param, u32 *p_mcp_resp,
2988                                                    u32 *p_mcp_param)
2989 {
2990         enum _ecore_status_t rc;
2991
2992         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
2993                            p_mcp_resp, p_mcp_param);
2994         if (rc != ECORE_SUCCESS)
2995                 return rc;
2996
2997         if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
2998                 DP_INFO(p_hwfn,
2999                         "The resource command is unsupported by the MFW\n");
3000                 return ECORE_NOTIMPL;
3001         }
3002
3003         if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
3004                 u8 opcode = ECORE_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
3005
3006                 DP_NOTICE(p_hwfn, false,
3007                           "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
3008                           param, opcode);
3009                 return ECORE_INVAL;
3010         }
3011
3012         return rc;
3013 }
3014
3015 enum _ecore_status_t
3016 __ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3017                       struct ecore_resc_lock_params *p_params)
3018 {
3019         u32 param = 0, mcp_resp, mcp_param;
3020         u8 opcode;
3021         enum _ecore_status_t rc;
3022
3023         switch (p_params->timeout) {
3024         case ECORE_MCP_RESC_LOCK_TO_DEFAULT:
3025                 opcode = RESOURCE_OPCODE_REQ;
3026                 p_params->timeout = 0;
3027                 break;
3028         case ECORE_MCP_RESC_LOCK_TO_NONE:
3029                 opcode = RESOURCE_OPCODE_REQ_WO_AGING;
3030                 p_params->timeout = 0;
3031                 break;
3032         default:
3033                 opcode = RESOURCE_OPCODE_REQ_W_AGING;
3034                 break;
3035         }
3036
3037         ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3038         ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3039         ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
3040
3041         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3042                    "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
3043                    param, p_params->timeout, opcode, p_params->resource);
3044
3045         /* Attempt to acquire the resource */
3046         rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
3047                                     &mcp_param);
3048         if (rc != ECORE_SUCCESS)
3049                 return rc;
3050
3051         /* Analyze the response */
3052         p_params->owner = ECORE_MFW_GET_FIELD(mcp_param,
3053                                              RESOURCE_CMD_RSP_OWNER);
3054         opcode = ECORE_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3055
3056         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3057                    "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
3058                    mcp_param, opcode, p_params->owner);
3059
3060         switch (opcode) {
3061         case RESOURCE_OPCODE_GNT:
3062                 p_params->b_granted = true;
3063                 break;
3064         case RESOURCE_OPCODE_BUSY:
3065                 p_params->b_granted = false;
3066                 break;
3067         default:
3068                 DP_NOTICE(p_hwfn, false,
3069                           "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
3070                           mcp_param, opcode);
3071                 return ECORE_INVAL;
3072         }
3073
3074         return ECORE_SUCCESS;
3075 }
3076
3077 enum _ecore_status_t
3078 ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3079                     struct ecore_resc_lock_params *p_params)
3080 {
3081         u32 retry_cnt = 0;
3082         enum _ecore_status_t rc;
3083
3084         do {
3085                 /* No need for an interval before the first iteration */
3086                 if (retry_cnt) {
3087                         if (p_params->sleep_b4_retry) {
3088                                 u16 retry_interval_in_ms =
3089                                         DIV_ROUND_UP(p_params->retry_interval,
3090                                                      1000);
3091
3092                                 OSAL_MSLEEP(retry_interval_in_ms);
3093                         } else {
3094                                 OSAL_UDELAY(p_params->retry_interval);
3095                         }
3096                 }
3097
3098                 rc = __ecore_mcp_resc_lock(p_hwfn, p_ptt, p_params);
3099                 if (rc != ECORE_SUCCESS)
3100                         return rc;
3101
3102                 if (p_params->b_granted)
3103                         break;
3104         } while (retry_cnt++ < p_params->retry_num);
3105
3106         return ECORE_SUCCESS;
3107 }
3108
3109 enum _ecore_status_t
3110 ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3111                       struct ecore_resc_unlock_params *p_params)
3112 {
3113         u32 param = 0, mcp_resp, mcp_param;
3114         u8 opcode;
3115         enum _ecore_status_t rc;
3116
3117         opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
3118                                    : RESOURCE_OPCODE_RELEASE;
3119         ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3120         ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3121
3122         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3123                    "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
3124                    param, opcode, p_params->resource);
3125
3126         /* Attempt to release the resource */
3127         rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
3128                                     &mcp_param);
3129         if (rc != ECORE_SUCCESS)
3130                 return rc;
3131
3132         /* Analyze the response */
3133         opcode = ECORE_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3134
3135         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3136                    "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
3137                    mcp_param, opcode);
3138
3139         switch (opcode) {
3140         case RESOURCE_OPCODE_RELEASED_PREVIOUS:
3141                 DP_INFO(p_hwfn,
3142                         "Resource unlock request for an already released resource [%d]\n",
3143                         p_params->resource);
3144                 /* Fallthrough */
3145         case RESOURCE_OPCODE_RELEASED:
3146                 p_params->b_released = true;
3147                 break;
3148         case RESOURCE_OPCODE_WRONG_OWNER:
3149                 p_params->b_released = false;
3150                 break;
3151         default:
3152                 DP_NOTICE(p_hwfn, false,
3153                           "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
3154                           mcp_param, opcode);
3155                 return ECORE_INVAL;
3156         }
3157
3158         return ECORE_SUCCESS;
3159 }