net/qede/base: block mbox command to unresponsive MFW
[dpdk.git] / drivers / net / qede / base / ecore_mcp.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "bcm_osal.h"
10 #include "ecore.h"
11 #include "ecore_status.h"
12 #include "ecore_mcp.h"
13 #include "mcp_public.h"
14 #include "reg_addr.h"
15 #include "ecore_hw.h"
16 #include "ecore_init_fw_funcs.h"
17 #include "ecore_sriov.h"
18 #include "ecore_vf.h"
19 #include "ecore_iov_api.h"
20 #include "ecore_gtt_reg_addr.h"
21 #include "ecore_iro.h"
22 #include "ecore_dcbx.h"
23 #include "ecore_sp_commands.h"
24
25 #define CHIP_MCP_RESP_ITER_US 10
26 #define EMUL_MCP_RESP_ITER_US (1000 * 1000)
27
28 #define ECORE_DRV_MB_MAX_RETRIES (500 * 1000)   /* Account for 5 sec */
29 #define ECORE_MCP_RESET_RETRIES (50 * 1000)     /* Account for 500 msec */
30
31 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
32         ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
33                  _val)
34
35 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
36         ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
37
38 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
39         DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
40                      OFFSETOF(struct public_drv_mb, _field), _val)
41
42 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
43         DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
44                      OFFSETOF(struct public_drv_mb, _field))
45
46 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
47         DRV_ID_PDA_COMP_VER_OFFSET)
48
49 #define MCP_BYTES_PER_MBIT_OFFSET 17
50
51 #ifndef ASIC_ONLY
52 static int loaded;
53 static int loaded_port[MAX_NUM_PORTS] = { 0 };
54 #endif
55
56 bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn)
57 {
58         if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
59                 return false;
60         return true;
61 }
62
63 void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
64 {
65         u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
66                                         PUBLIC_PORT);
67         u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr);
68
69         p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
70                                                    MFW_PORT(p_hwfn));
71         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
72                    "port_addr = 0x%x, port_id 0x%02x\n",
73                    p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
74 }
75
76 void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
77 {
78         u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
79         OSAL_BE32 tmp;
80         u32 i;
81
82 #ifndef ASIC_ONLY
83         if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev))
84                 return;
85 #endif
86
87         if (!p_hwfn->mcp_info->public_base)
88                 return;
89
90         for (i = 0; i < length; i++) {
91                 tmp = ecore_rd(p_hwfn, p_ptt,
92                                p_hwfn->mcp_info->mfw_mb_addr +
93                                (i << 2) + sizeof(u32));
94
95                 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
96                     OSAL_BE32_TO_CPU(tmp);
97         }
98 }
99
100 struct ecore_mcp_cmd_elem {
101         osal_list_entry_t list;
102         struct ecore_mcp_mb_params *p_mb_params;
103         u16 expected_seq_num;
104         bool b_is_completed;
105 };
106
107 /* Must be called while cmd_lock is acquired */
108 static struct ecore_mcp_cmd_elem *
109 ecore_mcp_cmd_add_elem(struct ecore_hwfn *p_hwfn,
110                        struct ecore_mcp_mb_params *p_mb_params,
111                        u16 expected_seq_num)
112 {
113         struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
114
115         p_cmd_elem = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
116                                  sizeof(*p_cmd_elem));
117         if (!p_cmd_elem) {
118                 DP_NOTICE(p_hwfn, false,
119                           "Failed to allocate `struct ecore_mcp_cmd_elem'\n");
120                 goto out;
121         }
122
123         p_cmd_elem->p_mb_params = p_mb_params;
124         p_cmd_elem->expected_seq_num = expected_seq_num;
125         OSAL_LIST_PUSH_HEAD(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
126 out:
127         return p_cmd_elem;
128 }
129
130 /* Must be called while cmd_lock is acquired */
131 static void ecore_mcp_cmd_del_elem(struct ecore_hwfn *p_hwfn,
132                                    struct ecore_mcp_cmd_elem *p_cmd_elem)
133 {
134         OSAL_LIST_REMOVE_ENTRY(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
135         OSAL_FREE(p_hwfn->p_dev, p_cmd_elem);
136 }
137
138 /* Must be called while cmd_lock is acquired */
139 static struct ecore_mcp_cmd_elem *
140 ecore_mcp_cmd_get_elem(struct ecore_hwfn *p_hwfn, u16 seq_num)
141 {
142         struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
143
144         OSAL_LIST_FOR_EACH_ENTRY(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list,
145                                  struct ecore_mcp_cmd_elem) {
146                 if (p_cmd_elem->expected_seq_num == seq_num)
147                         return p_cmd_elem;
148         }
149
150         return OSAL_NULL;
151 }
152
153 enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
154 {
155         if (p_hwfn->mcp_info) {
156                 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL, *p_tmp;
157
158                 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
159                 OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_cmd_elem, p_tmp,
160                                               &p_hwfn->mcp_info->cmd_list, list,
161                                               struct ecore_mcp_cmd_elem) {
162                         ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
163                 }
164                 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
165
166                 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
167                 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
168 #ifdef CONFIG_ECORE_LOCK_ALLOC
169                 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->cmd_lock);
170                 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->link_lock);
171 #endif
172         }
173
174         OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
175
176         return ECORE_SUCCESS;
177 }
178
179 static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
180                                                    struct ecore_ptt *p_ptt)
181 {
182         struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
183         u32 drv_mb_offsize, mfw_mb_offsize;
184         u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
185
186 #ifndef ASIC_ONLY
187         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
188                 DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n");
189                 p_info->public_base = 0;
190                 return ECORE_INVAL;
191         }
192 #endif
193
194         p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
195         if (!p_info->public_base)
196                 return ECORE_INVAL;
197
198         p_info->public_base |= GRCBASE_MCP;
199
200         /* Calculate the driver and MFW mailbox address */
201         drv_mb_offsize = ecore_rd(p_hwfn, p_ptt,
202                                   SECTION_OFFSIZE_ADDR(p_info->public_base,
203                                                        PUBLIC_DRV_MB));
204         p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
205         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
206                    "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x"
207                    " mcp_pf_id = 0x%x\n",
208                    drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
209
210         /* Set the MFW MB address */
211         mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
212                                   SECTION_OFFSIZE_ADDR(p_info->public_base,
213                                                        PUBLIC_MFW_MB));
214         p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
215         p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
216                                                p_info->mfw_mb_addr);
217
218         /* Get the current driver mailbox sequence before sending
219          * the first command
220          */
221         p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
222             DRV_MSG_SEQ_NUMBER_MASK;
223
224         /* Get current FW pulse sequence */
225         p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
226             DRV_PULSE_SEQ_MASK;
227
228         p_info->mcp_hist = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
229
230         return ECORE_SUCCESS;
231 }
232
233 enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
234                                         struct ecore_ptt *p_ptt)
235 {
236         struct ecore_mcp_info *p_info;
237         u32 size;
238
239         /* Allocate mcp_info structure */
240         p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
241                                        sizeof(*p_hwfn->mcp_info));
242         if (!p_hwfn->mcp_info)
243                 goto err;
244         p_info = p_hwfn->mcp_info;
245
246         if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
247                 DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
248                 /* Do not free mcp_info here, since public_base indicate that
249                  * the MCP is not initialized
250                  */
251                 return ECORE_SUCCESS;
252         }
253
254         size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
255         p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
256         p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
257         if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
258                 goto err;
259
260         /* Initialize the MFW spinlocks */
261 #ifdef CONFIG_ECORE_LOCK_ALLOC
262         OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock);
263         OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock);
264 #endif
265         OSAL_SPIN_LOCK_INIT(&p_info->cmd_lock);
266         OSAL_SPIN_LOCK_INIT(&p_info->link_lock);
267
268         OSAL_LIST_INIT(&p_info->cmd_list);
269
270         return ECORE_SUCCESS;
271
272 err:
273         DP_NOTICE(p_hwfn, true, "Failed to allocate mcp memory\n");
274         ecore_mcp_free(p_hwfn);
275         return ECORE_NOMEM;
276 }
277
278 static void ecore_mcp_reread_offsets(struct ecore_hwfn *p_hwfn,
279                                      struct ecore_ptt *p_ptt)
280 {
281         u32 generic_por_0 = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
282
283         /* Use MCP history register to check if MCP reset occurred between init
284          * time and now.
285          */
286         if (p_hwfn->mcp_info->mcp_hist != generic_por_0) {
287                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
288                            "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
289                            p_hwfn->mcp_info->mcp_hist, generic_por_0);
290
291                 ecore_load_mcp_offsets(p_hwfn, p_ptt);
292                 ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
293         }
294 }
295
296 enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
297                                      struct ecore_ptt *p_ptt)
298 {
299         u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0;
300         enum _ecore_status_t rc = ECORE_SUCCESS;
301
302 #ifndef ASIC_ONLY
303         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
304                 delay = EMUL_MCP_RESP_ITER_US;
305 #endif
306
307         if (p_hwfn->mcp_info->b_block_cmd) {
308                 DP_NOTICE(p_hwfn, false,
309                           "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
310                 return ECORE_ABORTED;
311         }
312
313         /* Ensure that only a single thread is accessing the mailbox */
314         OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
315
316         org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
317
318         /* Set drv command along with the updated sequence */
319         ecore_mcp_reread_offsets(p_hwfn, p_ptt);
320         seq = ++p_hwfn->mcp_info->drv_mb_seq;
321         DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
322
323         do {
324                 /* Wait for MFW response */
325                 OSAL_UDELAY(delay);
326                 /* Give the FW up to 500 second (50*1000*10usec) */
327         } while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt,
328                                                 MISCS_REG_GENERIC_POR_0)) &&
329                  (cnt++ < ECORE_MCP_RESET_RETRIES));
330
331         if (org_mcp_reset_seq !=
332             ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
333                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
334                            "MCP was reset after %d usec\n", cnt * delay);
335         } else {
336                 DP_ERR(p_hwfn, "Failed to reset MCP\n");
337                 rc = ECORE_AGAIN;
338         }
339
340         OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
341
342         return rc;
343 }
344
345 /* Must be called while cmd_lock is acquired */
346 static bool ecore_mcp_has_pending_cmd(struct ecore_hwfn *p_hwfn)
347 {
348         struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
349
350         /* There is at most one pending command at a certain time, and if it
351          * exists - it is placed at the HEAD of the list.
352          */
353         if (!OSAL_LIST_IS_EMPTY(&p_hwfn->mcp_info->cmd_list)) {
354                 p_cmd_elem = OSAL_LIST_FIRST_ENTRY(&p_hwfn->mcp_info->cmd_list,
355                                                    struct ecore_mcp_cmd_elem,
356                                                    list);
357                 return !p_cmd_elem->b_is_completed;
358         }
359
360         return false;
361 }
362
363 /* Must be called while cmd_lock is acquired */
364 static enum _ecore_status_t
365 ecore_mcp_update_pending_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
366 {
367         struct ecore_mcp_mb_params *p_mb_params;
368         struct ecore_mcp_cmd_elem *p_cmd_elem;
369         u32 mcp_resp;
370         u16 seq_num;
371
372         mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
373         seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK);
374
375         /* Return if no new non-handled response has been received */
376         if (seq_num != p_hwfn->mcp_info->drv_mb_seq)
377                 return ECORE_AGAIN;
378
379         p_cmd_elem = ecore_mcp_cmd_get_elem(p_hwfn, seq_num);
380         if (!p_cmd_elem) {
381                 DP_ERR(p_hwfn,
382                        "Failed to find a pending mailbox cmd that expects sequence number %d\n",
383                        seq_num);
384                 return ECORE_UNKNOWN_ERROR;
385         }
386
387         p_mb_params = p_cmd_elem->p_mb_params;
388
389         /* Get the MFW response along with the sequence number */
390         p_mb_params->mcp_resp = mcp_resp;
391
392         /* Get the MFW param */
393         p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
394
395         /* Get the union data */
396         if (p_mb_params->p_data_dst != OSAL_NULL &&
397             p_mb_params->data_dst_size) {
398                 u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
399                                       OFFSETOF(struct public_drv_mb,
400                                                union_data);
401                 ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
402                                   union_data_addr, p_mb_params->data_dst_size);
403         }
404
405         p_cmd_elem->b_is_completed = true;
406
407         return ECORE_SUCCESS;
408 }
409
410 /* Must be called while cmd_lock is acquired */
411 static void __ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
412                                       struct ecore_ptt *p_ptt,
413                                       struct ecore_mcp_mb_params *p_mb_params,
414                                       u16 seq_num)
415 {
416         union drv_union_data union_data;
417         u32 union_data_addr;
418
419         /* Set the union data */
420         union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
421                           OFFSETOF(struct public_drv_mb, union_data);
422         OSAL_MEM_ZERO(&union_data, sizeof(union_data));
423         if (p_mb_params->p_data_src != OSAL_NULL && p_mb_params->data_src_size)
424                 OSAL_MEMCPY(&union_data, p_mb_params->p_data_src,
425                             p_mb_params->data_src_size);
426         ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
427                         sizeof(union_data));
428
429         /* Set the drv param */
430         DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param);
431
432         /* Set the drv command along with the sequence number */
433         DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
434
435         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
436                    "MFW mailbox: command 0x%08x param 0x%08x\n",
437                    (p_mb_params->cmd | seq_num), p_mb_params->param);
438 }
439
440 static void ecore_mcp_cmd_set_blocking(struct ecore_hwfn *p_hwfn,
441                                        bool block_cmd)
442 {
443         p_hwfn->mcp_info->b_block_cmd = block_cmd;
444
445         DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n",
446                 block_cmd ? "Block" : "Unblock");
447 }
448
449 static enum _ecore_status_t
450 _ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
451                          struct ecore_mcp_mb_params *p_mb_params,
452                          u32 max_retries, u32 delay)
453 {
454         struct ecore_mcp_cmd_elem *p_cmd_elem;
455         u32 cnt = 0;
456         u16 seq_num;
457         enum _ecore_status_t rc = ECORE_SUCCESS;
458
459         /* Wait until the mailbox is non-occupied */
460         do {
461                 /* Exit the loop if there is no pending command, or if the
462                  * pending command is completed during this iteration.
463                  * The spinlock stays locked until the command is sent.
464                  */
465
466                 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
467
468                 if (!ecore_mcp_has_pending_cmd(p_hwfn))
469                         break;
470
471                 rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt);
472                 if (rc == ECORE_SUCCESS)
473                         break;
474                 else if (rc != ECORE_AGAIN)
475                         goto err;
476
477                 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
478                 OSAL_UDELAY(delay);
479         } while (++cnt < max_retries);
480
481         if (cnt >= max_retries) {
482                 DP_NOTICE(p_hwfn, false,
483                           "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
484                           p_mb_params->cmd, p_mb_params->param);
485                 return ECORE_AGAIN;
486         }
487
488         /* Send the mailbox command */
489         ecore_mcp_reread_offsets(p_hwfn, p_ptt);
490         seq_num = ++p_hwfn->mcp_info->drv_mb_seq;
491         p_cmd_elem = ecore_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num);
492         if (!p_cmd_elem) {
493                 rc = ECORE_NOMEM;
494                 goto err;
495         }
496
497         __ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num);
498         OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
499
500         /* Wait for the MFW response */
501         do {
502                 /* Exit the loop if the command is already completed, or if the
503                  * command is completed during this iteration.
504                  * The spinlock stays locked until the list element is removed.
505                  */
506
507                 OSAL_UDELAY(delay);
508                 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
509
510                 if (p_cmd_elem->b_is_completed)
511                         break;
512
513                 rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt);
514                 if (rc == ECORE_SUCCESS)
515                         break;
516                 else if (rc != ECORE_AGAIN)
517                         goto err;
518
519                 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
520         } while (++cnt < max_retries);
521
522         if (cnt >= max_retries) {
523                 DP_NOTICE(p_hwfn, false,
524                           "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
525                           p_mb_params->cmd, p_mb_params->param);
526
527                 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
528                 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
529                 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
530
531                 ecore_mcp_cmd_set_blocking(p_hwfn, true);
532                 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
533                 return ECORE_AGAIN;
534         }
535
536         ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
537         OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
538
539         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
540                    "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
541                    p_mb_params->mcp_resp, p_mb_params->mcp_param,
542                    (cnt * delay) / 1000, (cnt * delay) % 1000);
543
544         /* Clear the sequence number from the MFW response */
545         p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
546
547         return ECORE_SUCCESS;
548
549 err:
550         OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
551         return rc;
552 }
553
554 static enum _ecore_status_t
555 ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
556                         struct ecore_ptt *p_ptt,
557                         struct ecore_mcp_mb_params *p_mb_params)
558 {
559         osal_size_t union_data_size = sizeof(union drv_union_data);
560         u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
561         u32 delay = CHIP_MCP_RESP_ITER_US;
562
563 #ifndef ASIC_ONLY
564         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
565                 delay = EMUL_MCP_RESP_ITER_US;
566         /* There is a built-in delay of 100usec in each MFW response read */
567         if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
568                 max_retries /= 10;
569 #endif
570
571         /* MCP not initialized */
572         if (!ecore_mcp_is_init(p_hwfn)) {
573                 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
574                 return ECORE_BUSY;
575         }
576
577         if (p_mb_params->data_src_size > union_data_size ||
578             p_mb_params->data_dst_size > union_data_size) {
579                 DP_ERR(p_hwfn,
580                        "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
581                        p_mb_params->data_src_size, p_mb_params->data_dst_size,
582                        union_data_size);
583                 return ECORE_INVAL;
584         }
585
586         if (p_hwfn->mcp_info->b_block_cmd) {
587                 DP_NOTICE(p_hwfn, false,
588                           "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
589                           p_mb_params->cmd, p_mb_params->param);
590                 return ECORE_ABORTED;
591         }
592
593         return _ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
594                                         delay);
595 }
596
597 enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
598                                    struct ecore_ptt *p_ptt, u32 cmd, u32 param,
599                                    u32 *o_mcp_resp, u32 *o_mcp_param)
600 {
601         struct ecore_mcp_mb_params mb_params;
602         enum _ecore_status_t rc;
603
604 #ifndef ASIC_ONLY
605         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
606                 if (cmd == DRV_MSG_CODE_UNLOAD_REQ) {
607                         loaded--;
608                         loaded_port[p_hwfn->port_id]--;
609                         DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n",
610                                    loaded);
611                 }
612                 return ECORE_SUCCESS;
613         }
614 #endif
615
616         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
617         mb_params.cmd = cmd;
618         mb_params.param = param;
619         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
620         if (rc != ECORE_SUCCESS)
621                 return rc;
622
623         *o_mcp_resp = mb_params.mcp_resp;
624         *o_mcp_param = mb_params.mcp_param;
625
626         return ECORE_SUCCESS;
627 }
628
629 enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
630                                           struct ecore_ptt *p_ptt,
631                                           u32 cmd,
632                                           u32 param,
633                                           u32 *o_mcp_resp,
634                                           u32 *o_mcp_param,
635                                           u32 i_txn_size, u32 *i_buf)
636 {
637         struct ecore_mcp_mb_params mb_params;
638         enum _ecore_status_t rc;
639
640         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
641         mb_params.cmd = cmd;
642         mb_params.param = param;
643         mb_params.p_data_src = i_buf;
644         mb_params.data_src_size = (u8)i_txn_size;
645         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
646         if (rc != ECORE_SUCCESS)
647                 return rc;
648
649         *o_mcp_resp = mb_params.mcp_resp;
650         *o_mcp_param = mb_params.mcp_param;
651
652         return ECORE_SUCCESS;
653 }
654
655 enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
656                                           struct ecore_ptt *p_ptt,
657                                           u32 cmd,
658                                           u32 param,
659                                           u32 *o_mcp_resp,
660                                           u32 *o_mcp_param,
661                                           u32 *o_txn_size, u32 *o_buf)
662 {
663         struct ecore_mcp_mb_params mb_params;
664         u8 raw_data[MCP_DRV_NVM_BUF_LEN];
665         enum _ecore_status_t rc;
666
667         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
668         mb_params.cmd = cmd;
669         mb_params.param = param;
670         mb_params.p_data_dst = raw_data;
671
672         /* Use the maximal value since the actual one is part of the response */
673         mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
674
675         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
676         if (rc != ECORE_SUCCESS)
677                 return rc;
678
679         *o_mcp_resp = mb_params.mcp_resp;
680         *o_mcp_param = mb_params.mcp_param;
681
682         *o_txn_size = *o_mcp_param;
683         /* @DPDK */
684         OSAL_MEMCPY(o_buf, raw_data, RTE_MIN(*o_txn_size, MCP_DRV_NVM_BUF_LEN));
685
686         return ECORE_SUCCESS;
687 }
688
689 #ifndef ASIC_ONLY
690 static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
691                                     u32 *p_load_code)
692 {
693         static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
694
695         if (!loaded)
696                 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
697         else if (!loaded_port[p_hwfn->port_id])
698                 load_phase = FW_MSG_CODE_DRV_LOAD_PORT;
699         else
700                 load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
701
702         /* On CMT, always tell that it's engine */
703         if (p_hwfn->p_dev->num_hwfns > 1)
704                 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
705
706         *p_load_code = load_phase;
707         loaded++;
708         loaded_port[p_hwfn->port_id]++;
709
710         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
711                    "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n",
712                    *p_load_code, loaded, p_hwfn->port_id,
713                    loaded_port[p_hwfn->port_id]);
714 }
715 #endif
716
717 static bool
718 ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role,
719                          enum ecore_override_force_load override_force_load)
720 {
721         bool can_force_load = false;
722
723         switch (override_force_load) {
724         case ECORE_OVERRIDE_FORCE_LOAD_ALWAYS:
725                 can_force_load = true;
726                 break;
727         case ECORE_OVERRIDE_FORCE_LOAD_NEVER:
728                 can_force_load = false;
729                 break;
730         default:
731                 can_force_load = (drv_role == DRV_ROLE_OS &&
732                                   exist_drv_role == DRV_ROLE_PREBOOT) ||
733                                  (drv_role == DRV_ROLE_KDUMP &&
734                                   exist_drv_role == DRV_ROLE_OS);
735                 break;
736         }
737
738         return can_force_load;
739 }
740
741 static enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn,
742                                                       struct ecore_ptt *p_ptt)
743 {
744         u32 resp = 0, param = 0;
745         enum _ecore_status_t rc;
746
747         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
748                            &resp, &param);
749         if (rc != ECORE_SUCCESS)
750                 DP_NOTICE(p_hwfn, false,
751                           "Failed to send cancel load request, rc = %d\n", rc);
752
753         return rc;
754 }
755
756 #define CONFIG_ECORE_L2_BITMAP_IDX      (0x1 << 0)
757 #define CONFIG_ECORE_SRIOV_BITMAP_IDX   (0x1 << 1)
758 #define CONFIG_ECORE_ROCE_BITMAP_IDX    (0x1 << 2)
759 #define CONFIG_ECORE_IWARP_BITMAP_IDX   (0x1 << 3)
760 #define CONFIG_ECORE_FCOE_BITMAP_IDX    (0x1 << 4)
761 #define CONFIG_ECORE_ISCSI_BITMAP_IDX   (0x1 << 5)
762 #define CONFIG_ECORE_LL2_BITMAP_IDX     (0x1 << 6)
763
764 static u32 ecore_get_config_bitmap(void)
765 {
766         u32 config_bitmap = 0x0;
767
768 #ifdef CONFIG_ECORE_L2
769         config_bitmap |= CONFIG_ECORE_L2_BITMAP_IDX;
770 #endif
771 #ifdef CONFIG_ECORE_SRIOV
772         config_bitmap |= CONFIG_ECORE_SRIOV_BITMAP_IDX;
773 #endif
774 #ifdef CONFIG_ECORE_ROCE
775         config_bitmap |= CONFIG_ECORE_ROCE_BITMAP_IDX;
776 #endif
777 #ifdef CONFIG_ECORE_IWARP
778         config_bitmap |= CONFIG_ECORE_IWARP_BITMAP_IDX;
779 #endif
780 #ifdef CONFIG_ECORE_FCOE
781         config_bitmap |= CONFIG_ECORE_FCOE_BITMAP_IDX;
782 #endif
783 #ifdef CONFIG_ECORE_ISCSI
784         config_bitmap |= CONFIG_ECORE_ISCSI_BITMAP_IDX;
785 #endif
786 #ifdef CONFIG_ECORE_LL2
787         config_bitmap |= CONFIG_ECORE_LL2_BITMAP_IDX;
788 #endif
789
790         return config_bitmap;
791 }
792
793 struct ecore_load_req_in_params {
794         u8 hsi_ver;
795 #define ECORE_LOAD_REQ_HSI_VER_DEFAULT  0
796 #define ECORE_LOAD_REQ_HSI_VER_1        1
797         u32 drv_ver_0;
798         u32 drv_ver_1;
799         u32 fw_ver;
800         u8 drv_role;
801         u8 timeout_val;
802         u8 force_cmd;
803         bool avoid_eng_reset;
804 };
805
806 struct ecore_load_req_out_params {
807         u32 load_code;
808         u32 exist_drv_ver_0;
809         u32 exist_drv_ver_1;
810         u32 exist_fw_ver;
811         u8 exist_drv_role;
812         u8 mfw_hsi_ver;
813         bool drv_exists;
814 };
815
816 static enum _ecore_status_t
817 __ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
818                      struct ecore_load_req_in_params *p_in_params,
819                      struct ecore_load_req_out_params *p_out_params)
820 {
821         struct ecore_mcp_mb_params mb_params;
822         struct load_req_stc load_req;
823         struct load_rsp_stc load_rsp;
824         u32 hsi_ver;
825         enum _ecore_status_t rc;
826
827         OSAL_MEM_ZERO(&load_req, sizeof(load_req));
828         load_req.drv_ver_0 = p_in_params->drv_ver_0;
829         load_req.drv_ver_1 = p_in_params->drv_ver_1;
830         load_req.fw_ver = p_in_params->fw_ver;
831         SET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role);
832         SET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
833                       p_in_params->timeout_val);
834         SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE, p_in_params->force_cmd);
835         SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
836                       p_in_params->avoid_eng_reset);
837
838         hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ?
839                   DRV_ID_MCP_HSI_VER_CURRENT :
840                   (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_OFFSET);
841
842         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
843         mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
844         mb_params.param = PDA_COMP | hsi_ver | p_hwfn->p_dev->drv_type;
845         mb_params.p_data_src = &load_req;
846         mb_params.data_src_size = sizeof(load_req);
847         mb_params.p_data_dst = &load_rsp;
848         mb_params.data_dst_size = sizeof(load_rsp);
849
850         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
851                    "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
852                    mb_params.param,
853                    GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
854                    GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
855                    GET_MFW_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
856                    GET_MFW_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
857
858         if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1)
859                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
860                            "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
861                            load_req.drv_ver_0, load_req.drv_ver_1,
862                            load_req.fw_ver, load_req.misc0,
863                            GET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE),
864                            GET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO),
865                            GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE),
866                            GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0));
867
868         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
869         if (rc != ECORE_SUCCESS) {
870                 DP_NOTICE(p_hwfn, false,
871                           "Failed to send load request, rc = %d\n", rc);
872                 return rc;
873         }
874
875         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
876                    "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
877         p_out_params->load_code = mb_params.mcp_resp;
878
879         if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
880             p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
881                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
882                            "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
883                            load_rsp.drv_ver_0, load_rsp.drv_ver_1,
884                            load_rsp.fw_ver, load_rsp.misc0,
885                            GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
886                            GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
887                            GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0));
888
889                 p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
890                 p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
891                 p_out_params->exist_fw_ver = load_rsp.fw_ver;
892                 p_out_params->exist_drv_role =
893                         GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
894                 p_out_params->mfw_hsi_ver =
895                         GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
896                 p_out_params->drv_exists =
897                         GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
898                         LOAD_RSP_FLAGS0_DRV_EXISTS;
899         }
900
901         return ECORE_SUCCESS;
902 }
903
904 static void ecore_get_mfw_drv_role(struct ecore_hwfn *p_hwfn,
905                                    enum ecore_drv_role drv_role,
906                                    u8 *p_mfw_drv_role)
907 {
908         switch (drv_role) {
909         case ECORE_DRV_ROLE_OS:
910                 *p_mfw_drv_role = DRV_ROLE_OS;
911                 break;
912         case ECORE_DRV_ROLE_KDUMP:
913                 *p_mfw_drv_role = DRV_ROLE_KDUMP;
914                 break;
915         }
916 }
917
918 enum ecore_load_req_force {
919         ECORE_LOAD_REQ_FORCE_NONE,
920         ECORE_LOAD_REQ_FORCE_PF,
921         ECORE_LOAD_REQ_FORCE_ALL,
922 };
923
924 static void ecore_get_mfw_force_cmd(struct ecore_hwfn *p_hwfn,
925                                     enum ecore_load_req_force force_cmd,
926                                     u8 *p_mfw_force_cmd)
927 {
928         switch (force_cmd) {
929         case ECORE_LOAD_REQ_FORCE_NONE:
930                 *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
931                 break;
932         case ECORE_LOAD_REQ_FORCE_PF:
933                 *p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
934                 break;
935         case ECORE_LOAD_REQ_FORCE_ALL:
936                 *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
937                 break;
938         }
939 }
940
941 enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
942                                         struct ecore_ptt *p_ptt,
943                                         struct ecore_load_req_params *p_params)
944 {
945         struct ecore_load_req_out_params out_params;
946         struct ecore_load_req_in_params in_params;
947         u8 mfw_drv_role = 0, mfw_force_cmd;
948         enum _ecore_status_t rc;
949
950 #ifndef ASIC_ONLY
951         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
952                 ecore_mcp_mf_workaround(p_hwfn, &p_params->load_code);
953                 return ECORE_SUCCESS;
954         }
955 #endif
956
957         OSAL_MEM_ZERO(&in_params, sizeof(in_params));
958         in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT;
959         in_params.drv_ver_0 = ECORE_VERSION;
960         in_params.drv_ver_1 = ecore_get_config_bitmap();
961         in_params.fw_ver = STORM_FW_VERSION;
962         ecore_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
963         in_params.drv_role = mfw_drv_role;
964         in_params.timeout_val = p_params->timeout_val;
965         ecore_get_mfw_force_cmd(p_hwfn, ECORE_LOAD_REQ_FORCE_NONE,
966                                 &mfw_force_cmd);
967         in_params.force_cmd = mfw_force_cmd;
968         in_params.avoid_eng_reset = p_params->avoid_eng_reset;
969
970         OSAL_MEM_ZERO(&out_params, sizeof(out_params));
971         rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
972         if (rc != ECORE_SUCCESS)
973                 return rc;
974
975         /* First handle cases where another load request should/might be sent:
976          * - MFW expects the old interface [HSI version = 1]
977          * - MFW responds that a force load request is required
978          */
979         if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
980                 DP_INFO(p_hwfn,
981                         "MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n");
982
983                 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1;
984                 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
985                 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
986                                           &out_params);
987                 if (rc != ECORE_SUCCESS)
988                         return rc;
989         } else if (out_params.load_code ==
990                    FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
991                 if (ecore_mcp_can_force_load(in_params.drv_role,
992                                              out_params.exist_drv_role,
993                                              p_params->override_force_load)) {
994                         DP_INFO(p_hwfn,
995                                 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, 0x%08x_%08x}, existing={%d, 0x%08x, 0x%08x_%08x}]\n",
996                                 in_params.drv_role, in_params.fw_ver,
997                                 in_params.drv_ver_0, in_params.drv_ver_1,
998                                 out_params.exist_drv_role,
999                                 out_params.exist_fw_ver,
1000                                 out_params.exist_drv_ver_0,
1001                                 out_params.exist_drv_ver_1);
1002
1003                         ecore_get_mfw_force_cmd(p_hwfn,
1004                                                 ECORE_LOAD_REQ_FORCE_ALL,
1005                                                 &mfw_force_cmd);
1006
1007                         in_params.force_cmd = mfw_force_cmd;
1008                         OSAL_MEM_ZERO(&out_params, sizeof(out_params));
1009                         rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
1010                                                   &out_params);
1011                         if (rc != ECORE_SUCCESS)
1012                                 return rc;
1013                 } else {
1014                         DP_NOTICE(p_hwfn, false,
1015                                   "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
1016                                   in_params.drv_role, in_params.fw_ver,
1017                                   in_params.drv_ver_0, in_params.drv_ver_1,
1018                                   out_params.exist_drv_role,
1019                                   out_params.exist_fw_ver,
1020                                   out_params.exist_drv_ver_0,
1021                                   out_params.exist_drv_ver_1);
1022
1023                         ecore_mcp_cancel_load_req(p_hwfn, p_ptt);
1024                         return ECORE_BUSY;
1025                 }
1026         }
1027
1028         /* Now handle the other types of responses.
1029          * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
1030          * expected here after the additional revised load requests were sent.
1031          */
1032         switch (out_params.load_code) {
1033         case FW_MSG_CODE_DRV_LOAD_ENGINE:
1034         case FW_MSG_CODE_DRV_LOAD_PORT:
1035         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
1036                 if (out_params.mfw_hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
1037                     out_params.drv_exists) {
1038                         /* The role and fw/driver version match, but the PF is
1039                          * already loaded and has not been unloaded gracefully.
1040                          * This is unexpected since a quasi-FLR request was
1041                          * previously sent as part of ecore_hw_prepare().
1042                          */
1043                         DP_NOTICE(p_hwfn, false,
1044                                   "PF is already loaded - shouldn't have got here since a quasi-FLR request was previously sent!\n");
1045                         return ECORE_INVAL;
1046                 }
1047                 break;
1048         default:
1049                 DP_NOTICE(p_hwfn, false,
1050                           "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
1051                           out_params.load_code);
1052                 return ECORE_BUSY;
1053         }
1054
1055         p_params->load_code = out_params.load_code;
1056
1057         return ECORE_SUCCESS;
1058 }
1059
1060 enum _ecore_status_t ecore_mcp_load_done(struct ecore_hwfn *p_hwfn,
1061                                          struct ecore_ptt *p_ptt)
1062 {
1063         u32 resp = 0, param = 0;
1064         enum _ecore_status_t rc;
1065
1066         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp,
1067                            &param);
1068         if (rc != ECORE_SUCCESS) {
1069                 DP_NOTICE(p_hwfn, false,
1070                           "Failed to send a LOAD_DONE command, rc = %d\n", rc);
1071                 return rc;
1072         }
1073
1074 #define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR     (1 << 0)
1075
1076         /* Check if there is a DID mismatch between nvm-cfg/efuse */
1077         if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
1078                 DP_NOTICE(p_hwfn, false,
1079                           "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
1080
1081         return ECORE_SUCCESS;
1082 }
1083
1084 enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn,
1085                                           struct ecore_ptt *p_ptt)
1086 {
1087         u32 wol_param, mcp_resp, mcp_param;
1088
1089         /* @DPDK */
1090         wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
1091
1092         return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param,
1093                              &mcp_resp, &mcp_param);
1094 }
1095
1096 enum _ecore_status_t ecore_mcp_unload_done(struct ecore_hwfn *p_hwfn,
1097                                            struct ecore_ptt *p_ptt)
1098 {
1099         struct ecore_mcp_mb_params mb_params;
1100         struct mcp_mac wol_mac;
1101
1102         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1103         mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
1104
1105         return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1106 }
1107
1108 static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
1109                                     struct ecore_ptt *p_ptt)
1110 {
1111         u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1112                                         PUBLIC_PATH);
1113         u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1114         u32 path_addr = SECTION_ADDR(mfw_path_offsize,
1115                                      ECORE_PATH_ID(p_hwfn));
1116         u32 disabled_vfs[VF_MAX_STATIC / 32];
1117         int i;
1118
1119         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1120                    "Reading Disabled VF information from [offset %08x],"
1121                    " path_addr %08x\n",
1122                    mfw_path_offsize, path_addr);
1123
1124         for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
1125                 disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,
1126                                            path_addr +
1127                                            OFFSETOF(struct public_path,
1128                                                     mcp_vf_disabled) +
1129                                            sizeof(u32) * i);
1130                 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
1131                            "FLR-ed VFs [%08x,...,%08x] - %08x\n",
1132                            i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
1133         }
1134
1135         if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs))
1136                 OSAL_VF_FLR_UPDATE(p_hwfn);
1137 }
1138
1139 enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
1140                                           struct ecore_ptt *p_ptt,
1141                                           u32 *vfs_to_ack)
1142 {
1143         u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1144                                         PUBLIC_FUNC);
1145         u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1146         u32 func_addr = SECTION_ADDR(mfw_func_offsize,
1147                                      MCP_PF_ID(p_hwfn));
1148         struct ecore_mcp_mb_params mb_params;
1149         enum _ecore_status_t rc;
1150         int i;
1151
1152         for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1153                 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
1154                            "Acking VFs [%08x,...,%08x] - %08x\n",
1155                            i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
1156
1157         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1158         mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
1159         mb_params.p_data_src = vfs_to_ack;
1160         mb_params.data_src_size = VF_MAX_STATIC / 8;
1161         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt,
1162                                      &mb_params);
1163         if (rc != ECORE_SUCCESS) {
1164                 DP_NOTICE(p_hwfn, false,
1165                           "Failed to pass ACK for VF flr to MFW\n");
1166                 return ECORE_TIMEOUT;
1167         }
1168
1169         /* TMP - clear the ACK bits; should be done by MFW */
1170         for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1171                 ecore_wr(p_hwfn, p_ptt,
1172                          func_addr +
1173                          OFFSETOF(struct public_func, drv_ack_vf_disabled) +
1174                          i * sizeof(u32), 0);
1175
1176         return rc;
1177 }
1178
1179 static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
1180                                                 struct ecore_ptt *p_ptt)
1181 {
1182         u32 transceiver_state;
1183
1184         transceiver_state = ecore_rd(p_hwfn, p_ptt,
1185                                      p_hwfn->mcp_info->port_addr +
1186                                      OFFSETOF(struct public_port,
1187                                               transceiver_data));
1188
1189         DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP),
1190                    "Received transceiver state update [0x%08x] from mfw"
1191                    " [Addr 0x%x]\n",
1192                    transceiver_state, (u32)(p_hwfn->mcp_info->port_addr +
1193                                             OFFSETOF(struct public_port,
1194                                                      transceiver_data)));
1195
1196         transceiver_state = GET_MFW_FIELD(transceiver_state,
1197                                           ETH_TRANSCEIVER_STATE);
1198
1199         if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
1200                 DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
1201         else
1202                 DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
1203 }
1204
1205 static void ecore_mcp_read_eee_config(struct ecore_hwfn *p_hwfn,
1206                                       struct ecore_ptt *p_ptt,
1207                                       struct ecore_mcp_link_state *p_link)
1208 {
1209         u32 eee_status, val;
1210
1211         p_link->eee_adv_caps = 0;
1212         p_link->eee_lp_adv_caps = 0;
1213         eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1214                                      OFFSETOF(struct public_port, eee_status));
1215         p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT);
1216         val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET;
1217         if (val & EEE_1G_ADV)
1218                 p_link->eee_adv_caps |= ECORE_EEE_1G_ADV;
1219         if (val & EEE_10G_ADV)
1220                 p_link->eee_adv_caps |= ECORE_EEE_10G_ADV;
1221         val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET;
1222         if (val & EEE_1G_ADV)
1223                 p_link->eee_lp_adv_caps |= ECORE_EEE_1G_ADV;
1224         if (val & EEE_10G_ADV)
1225                 p_link->eee_lp_adv_caps |= ECORE_EEE_10G_ADV;
1226 }
1227
1228 static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
1229                                          struct ecore_ptt *p_ptt,
1230                                          bool b_reset)
1231 {
1232         struct ecore_mcp_link_state *p_link;
1233         u8 max_bw, min_bw;
1234         u32 status = 0;
1235
1236         /* Prevent SW/attentions from doing this at the same time */
1237         OSAL_SPIN_LOCK(&p_hwfn->mcp_info->link_lock);
1238
1239         p_link = &p_hwfn->mcp_info->link_output;
1240         OSAL_MEMSET(p_link, 0, sizeof(*p_link));
1241         if (!b_reset) {
1242                 status = ecore_rd(p_hwfn, p_ptt,
1243                                   p_hwfn->mcp_info->port_addr +
1244                                   OFFSETOF(struct public_port, link_status));
1245                 DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP),
1246                            "Received link update [0x%08x] from mfw"
1247                            " [Addr 0x%x]\n",
1248                            status, (u32)(p_hwfn->mcp_info->port_addr +
1249                                           OFFSETOF(struct public_port,
1250                                                    link_status)));
1251         } else {
1252                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1253                            "Resetting link indications\n");
1254                 goto out;
1255         }
1256
1257         if (p_hwfn->b_drv_link_init)
1258                 p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1259         else
1260                 p_link->link_up = false;
1261
1262         p_link->full_duplex = true;
1263         switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
1264         case LINK_STATUS_SPEED_AND_DUPLEX_100G:
1265                 p_link->speed = 100000;
1266                 break;
1267         case LINK_STATUS_SPEED_AND_DUPLEX_50G:
1268                 p_link->speed = 50000;
1269                 break;
1270         case LINK_STATUS_SPEED_AND_DUPLEX_40G:
1271                 p_link->speed = 40000;
1272                 break;
1273         case LINK_STATUS_SPEED_AND_DUPLEX_25G:
1274                 p_link->speed = 25000;
1275                 break;
1276         case LINK_STATUS_SPEED_AND_DUPLEX_20G:
1277                 p_link->speed = 20000;
1278                 break;
1279         case LINK_STATUS_SPEED_AND_DUPLEX_10G:
1280                 p_link->speed = 10000;
1281                 break;
1282         case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
1283                 p_link->full_duplex = false;
1284                 /* Fall-through */
1285         case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
1286                 p_link->speed = 1000;
1287                 break;
1288         default:
1289                 p_link->speed = 0;
1290         }
1291
1292         /* We never store total line speed as p_link->speed is
1293          * again changes according to bandwidth allocation.
1294          */
1295         if (p_link->link_up && p_link->speed)
1296                 p_link->line_speed = p_link->speed;
1297         else
1298                 p_link->line_speed = 0;
1299
1300         max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
1301         min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
1302
1303         /* Max bandwidth configuration */
1304         __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
1305                                            p_link, max_bw);
1306
1307         /* Mintz bandwidth configuration */
1308         __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
1309                                            p_link, min_bw);
1310         ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev, p_ptt,
1311                                               p_link->min_pf_rate);
1312
1313         p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
1314         p_link->an_complete = !!(status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
1315         p_link->parallel_detection = !!(status &
1316                                          LINK_STATUS_PARALLEL_DETECTION_USED);
1317         p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
1318
1319         p_link->partner_adv_speed |=
1320             (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
1321             ECORE_LINK_PARTNER_SPEED_1G_FD : 0;
1322         p_link->partner_adv_speed |=
1323             (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
1324             ECORE_LINK_PARTNER_SPEED_1G_HD : 0;
1325         p_link->partner_adv_speed |=
1326             (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
1327             ECORE_LINK_PARTNER_SPEED_10G : 0;
1328         p_link->partner_adv_speed |=
1329             (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
1330             ECORE_LINK_PARTNER_SPEED_20G : 0;
1331         p_link->partner_adv_speed |=
1332             (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
1333             ECORE_LINK_PARTNER_SPEED_25G : 0;
1334         p_link->partner_adv_speed |=
1335             (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
1336             ECORE_LINK_PARTNER_SPEED_40G : 0;
1337         p_link->partner_adv_speed |=
1338             (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
1339             ECORE_LINK_PARTNER_SPEED_50G : 0;
1340         p_link->partner_adv_speed |=
1341             (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
1342             ECORE_LINK_PARTNER_SPEED_100G : 0;
1343
1344         p_link->partner_tx_flow_ctrl_en =
1345             !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
1346         p_link->partner_rx_flow_ctrl_en =
1347             !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
1348
1349         switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
1350         case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
1351                 p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE;
1352                 break;
1353         case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
1354                 p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE;
1355                 break;
1356         case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
1357                 p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE;
1358                 break;
1359         default:
1360                 p_link->partner_adv_pause = 0;
1361         }
1362
1363         p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
1364
1365         if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
1366                 ecore_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
1367
1368         OSAL_LINK_UPDATE(p_hwfn, p_ptt);
1369 out:
1370         OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->link_lock);
1371 }
1372
1373 enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
1374                                         struct ecore_ptt *p_ptt, bool b_up)
1375 {
1376         struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
1377         struct ecore_mcp_mb_params mb_params;
1378         struct eth_phy_cfg phy_cfg;
1379         enum _ecore_status_t rc = ECORE_SUCCESS;
1380         u32 cmd;
1381
1382 #ifndef ASIC_ONLY
1383         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
1384                 return ECORE_SUCCESS;
1385 #endif
1386
1387         /* Set the shmem configuration according to params */
1388         OSAL_MEM_ZERO(&phy_cfg, sizeof(phy_cfg));
1389         cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
1390         if (!params->speed.autoneg)
1391                 phy_cfg.speed = params->speed.forced_speed;
1392         phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
1393         phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
1394         phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1395         phy_cfg.adv_speed = params->speed.advertised_speeds;
1396         phy_cfg.loopback_mode = params->loopback_mode;
1397
1398         /* There are MFWs that share this capability regardless of whether
1399          * this is feasible or not. And given that at the very least adv_caps
1400          * would be set internally by ecore, we want to make sure LFA would
1401          * still work.
1402          */
1403         if ((p_hwfn->mcp_info->capabilities &
1404              FW_MB_PARAM_FEATURE_SUPPORT_EEE) &&
1405             params->eee.enable) {
1406                 phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
1407                 if (params->eee.tx_lpi_enable)
1408                         phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
1409                 if (params->eee.adv_caps & ECORE_EEE_1G_ADV)
1410                         phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G;
1411                 if (params->eee.adv_caps & ECORE_EEE_10G_ADV)
1412                         phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G;
1413                 phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer <<
1414                                     EEE_TX_TIMER_USEC_OFFSET) &
1415                                         EEE_TX_TIMER_USEC_MASK;
1416         }
1417
1418         p_hwfn->b_drv_link_init = b_up;
1419
1420         if (b_up)
1421                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1422                            "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x\n",
1423                            phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed,
1424                            phy_cfg.loopback_mode);
1425         else
1426                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
1427
1428         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1429         mb_params.cmd = cmd;
1430         mb_params.p_data_src = &phy_cfg;
1431         mb_params.data_src_size = sizeof(phy_cfg);
1432         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1433
1434         /* if mcp fails to respond we must abort */
1435         if (rc != ECORE_SUCCESS) {
1436                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1437                 return rc;
1438         }
1439
1440         /* Mimic link-change attention, done for several reasons:
1441          *  - On reset, there's no guarantee MFW would trigger
1442          *    an attention.
1443          *  - On initialization, older MFWs might not indicate link change
1444          *    during LFA, so we'll never get an UP indication.
1445          */
1446         ecore_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
1447
1448         return rc;
1449 }
1450
1451 u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
1452                                    struct ecore_ptt *p_ptt)
1453 {
1454         u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
1455
1456         /* TODO - Add support for VFs */
1457         if (IS_VF(p_hwfn->p_dev))
1458                 return ECORE_INVAL;
1459
1460         path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1461                                                  PUBLIC_PATH);
1462         path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr);
1463         path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn));
1464
1465         proc_kill_cnt = ecore_rd(p_hwfn, p_ptt,
1466                                  path_addr +
1467                                  OFFSETOF(struct public_path, process_kill)) &
1468             PROCESS_KILL_COUNTER_MASK;
1469
1470         return proc_kill_cnt;
1471 }
1472
1473 static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn,
1474                                           struct ecore_ptt *p_ptt)
1475 {
1476         struct ecore_dev *p_dev = p_hwfn->p_dev;
1477         u32 proc_kill_cnt;
1478
1479         /* Prevent possible attentions/interrupts during the recovery handling
1480          * and till its load phase, during which they will be re-enabled.
1481          */
1482         ecore_int_igu_disable_int(p_hwfn, p_ptt);
1483
1484         DP_NOTICE(p_hwfn, false, "Received a process kill indication\n");
1485
1486         /* The following operations should be done once, and thus in CMT mode
1487          * are carried out by only the first HW function.
1488          */
1489         if (p_hwfn != ECORE_LEADING_HWFN(p_dev))
1490                 return;
1491
1492         if (p_dev->recov_in_prog) {
1493                 DP_NOTICE(p_hwfn, false,
1494                           "Ignoring the indication since a recovery"
1495                           " process is already in progress\n");
1496                 return;
1497         }
1498
1499         p_dev->recov_in_prog = true;
1500
1501         proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt);
1502         DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt);
1503
1504         OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn);
1505 }
1506
1507 static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
1508                                           struct ecore_ptt *p_ptt,
1509                                           enum MFW_DRV_MSG_TYPE type)
1510 {
1511         enum ecore_mcp_protocol_type stats_type;
1512         union ecore_mcp_protocol_stats stats;
1513         struct ecore_mcp_mb_params mb_params;
1514         u32 hsi_param;
1515         enum _ecore_status_t rc;
1516
1517         switch (type) {
1518         case MFW_DRV_MSG_GET_LAN_STATS:
1519                 stats_type = ECORE_MCP_LAN_STATS;
1520                 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1521                 break;
1522         default:
1523                 DP_INFO(p_hwfn, "Invalid protocol type %d\n", type);
1524                 return;
1525         }
1526
1527         OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats);
1528
1529         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1530         mb_params.cmd = DRV_MSG_CODE_GET_STATS;
1531         mb_params.param = hsi_param;
1532         mb_params.p_data_src = &stats;
1533         mb_params.data_src_size = sizeof(stats);
1534         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1535         if (rc != ECORE_SUCCESS)
1536                 DP_ERR(p_hwfn, "Failed to send protocol stats, rc = %d\n", rc);
1537 }
1538
1539 static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
1540                                     struct public_func *p_shmem_info)
1541 {
1542         struct ecore_mcp_function_info *p_info;
1543
1544         p_info = &p_hwfn->mcp_info->func_info;
1545
1546         /* TODO - bandwidth min/max should have valid values of 1-100,
1547          * as well as some indication that the feature is disabled.
1548          * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS
1549          * limit and correct value to min `1' and max `100' if limit isn't in
1550          * range.
1551          */
1552         p_info->bandwidth_min = (p_shmem_info->config &
1553                                  FUNC_MF_CFG_MIN_BW_MASK) >>
1554             FUNC_MF_CFG_MIN_BW_OFFSET;
1555         if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1556                 DP_INFO(p_hwfn,
1557                         "bandwidth minimum out of bounds [%02x]. Set to 1\n",
1558                         p_info->bandwidth_min);
1559                 p_info->bandwidth_min = 1;
1560         }
1561
1562         p_info->bandwidth_max = (p_shmem_info->config &
1563                                  FUNC_MF_CFG_MAX_BW_MASK) >>
1564             FUNC_MF_CFG_MAX_BW_OFFSET;
1565         if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1566                 DP_INFO(p_hwfn,
1567                         "bandwidth maximum out of bounds [%02x]. Set to 100\n",
1568                         p_info->bandwidth_max);
1569                 p_info->bandwidth_max = 100;
1570         }
1571 }
1572
1573 static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
1574                                     struct ecore_ptt *p_ptt,
1575                                     struct public_func *p_data,
1576                                     int pfid)
1577 {
1578         u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1579                                         PUBLIC_FUNC);
1580         u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1581         u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1582         u32 i, size;
1583
1584         OSAL_MEM_ZERO(p_data, sizeof(*p_data));
1585
1586         size = OSAL_MIN_T(u32, sizeof(*p_data),
1587                           SECTION_SIZE(mfw_path_offsize));
1588         for (i = 0; i < size / sizeof(u32); i++)
1589                 ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
1590                                               func_addr + (i << 2));
1591
1592         return size;
1593 }
1594
1595 static void
1596 ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1597 {
1598         struct ecore_mcp_function_info *p_info;
1599         struct public_func shmem_info;
1600         u32 resp = 0, param = 0;
1601
1602         ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1603
1604         ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1605
1606         p_info = &p_hwfn->mcp_info->func_info;
1607
1608         ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min);
1609
1610         ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max);
1611
1612         /* Acknowledge the MFW */
1613         ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1614                       &param);
1615 }
1616
1617 static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn,
1618                                          struct ecore_ptt *p_ptt)
1619 {
1620         /* A single notification should be sent to upper driver in CMT mode */
1621         if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1622                 return;
1623
1624         DP_NOTICE(p_hwfn, false,
1625                   "Fan failure was detected on the network interface card"
1626                   " and it's going to be shut down.\n");
1627
1628         ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
1629 }
1630
1631 struct ecore_mdump_cmd_params {
1632         u32 cmd;
1633         void *p_data_src;
1634         u8 data_src_size;
1635         void *p_data_dst;
1636         u8 data_dst_size;
1637         u32 mcp_resp;
1638 };
1639
1640 static enum _ecore_status_t
1641 ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1642                     struct ecore_mdump_cmd_params *p_mdump_cmd_params)
1643 {
1644         struct ecore_mcp_mb_params mb_params;
1645         enum _ecore_status_t rc;
1646
1647         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1648         mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
1649         mb_params.param = p_mdump_cmd_params->cmd;
1650         mb_params.p_data_src = p_mdump_cmd_params->p_data_src;
1651         mb_params.data_src_size = p_mdump_cmd_params->data_src_size;
1652         mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst;
1653         mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size;
1654         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1655         if (rc != ECORE_SUCCESS)
1656                 return rc;
1657
1658         p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
1659
1660         if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
1661                 DP_INFO(p_hwfn,
1662                         "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n",
1663                         p_mdump_cmd_params->cmd);
1664                 rc = ECORE_NOTIMPL;
1665         } else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
1666                 DP_INFO(p_hwfn,
1667                         "The mdump command is not supported by the MFW\n");
1668                 rc = ECORE_NOTIMPL;
1669         }
1670
1671         return rc;
1672 }
1673
1674 static enum _ecore_status_t ecore_mcp_mdump_ack(struct ecore_hwfn *p_hwfn,
1675                                                 struct ecore_ptt *p_ptt)
1676 {
1677         struct ecore_mdump_cmd_params mdump_cmd_params;
1678
1679         OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1680         mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK;
1681
1682         return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1683 }
1684
1685 enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn,
1686                                                 struct ecore_ptt *p_ptt,
1687                                                 u32 epoch)
1688 {
1689         struct ecore_mdump_cmd_params mdump_cmd_params;
1690
1691         OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1692         mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_SET_VALUES;
1693         mdump_cmd_params.p_data_src = &epoch;
1694         mdump_cmd_params.data_src_size = sizeof(epoch);
1695
1696         return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1697 }
1698
1699 enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
1700                                              struct ecore_ptt *p_ptt)
1701 {
1702         struct ecore_mdump_cmd_params mdump_cmd_params;
1703
1704         OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1705         mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_TRIGGER;
1706
1707         return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1708 }
1709
1710 static enum _ecore_status_t
1711 ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1712                            struct mdump_config_stc *p_mdump_config)
1713 {
1714         struct ecore_mdump_cmd_params mdump_cmd_params;
1715         enum _ecore_status_t rc;
1716
1717         OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1718         mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_CONFIG;
1719         mdump_cmd_params.p_data_dst = p_mdump_config;
1720         mdump_cmd_params.data_dst_size = sizeof(*p_mdump_config);
1721
1722         rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1723         if (rc != ECORE_SUCCESS)
1724                 return rc;
1725
1726         if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1727                 DP_INFO(p_hwfn,
1728                         "Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n",
1729                         mdump_cmd_params.mcp_resp);
1730                 rc = ECORE_UNKNOWN_ERROR;
1731         }
1732
1733         return rc;
1734 }
1735
1736 enum _ecore_status_t
1737 ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1738                          struct ecore_mdump_info *p_mdump_info)
1739 {
1740         u32 addr, global_offsize, global_addr;
1741         struct mdump_config_stc mdump_config;
1742         enum _ecore_status_t rc;
1743
1744         OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info));
1745
1746         addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1747                                     PUBLIC_GLOBAL);
1748         global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1749         global_addr = SECTION_ADDR(global_offsize, 0);
1750         p_mdump_info->reason = ecore_rd(p_hwfn, p_ptt,
1751                                         global_addr +
1752                                         OFFSETOF(struct public_global,
1753                                                  mdump_reason));
1754
1755         if (p_mdump_info->reason) {
1756                 rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config);
1757                 if (rc != ECORE_SUCCESS)
1758                         return rc;
1759
1760                 p_mdump_info->version = mdump_config.version;
1761                 p_mdump_info->config = mdump_config.config;
1762                 p_mdump_info->epoch = mdump_config.epoc;
1763                 p_mdump_info->num_of_logs = mdump_config.num_of_logs;
1764                 p_mdump_info->valid_logs = mdump_config.valid_logs;
1765
1766                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1767                            "MFW mdump info: reason %d, version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n",
1768                            p_mdump_info->reason, p_mdump_info->version,
1769                            p_mdump_info->config, p_mdump_info->epoch,
1770                            p_mdump_info->num_of_logs, p_mdump_info->valid_logs);
1771         } else {
1772                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1773                            "MFW mdump info: reason %d\n", p_mdump_info->reason);
1774         }
1775
1776         return ECORE_SUCCESS;
1777 }
1778
1779 enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
1780                                                 struct ecore_ptt *p_ptt)
1781 {
1782         struct ecore_mdump_cmd_params mdump_cmd_params;
1783
1784         OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1785         mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLEAR_LOGS;
1786
1787         return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1788 }
1789
1790 enum _ecore_status_t
1791 ecore_mcp_mdump_get_retain(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1792                            struct ecore_mdump_retain_data *p_mdump_retain)
1793 {
1794         struct ecore_mdump_cmd_params mdump_cmd_params;
1795         struct mdump_retain_data_stc mfw_mdump_retain;
1796         enum _ecore_status_t rc;
1797
1798         OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1799         mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN;
1800         mdump_cmd_params.p_data_dst = &mfw_mdump_retain;
1801         mdump_cmd_params.data_dst_size = sizeof(mfw_mdump_retain);
1802
1803         rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1804         if (rc != ECORE_SUCCESS)
1805                 return rc;
1806
1807         if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1808                 DP_INFO(p_hwfn,
1809                         "Failed to get the mdump retained data [mcp_resp 0x%x]\n",
1810                         mdump_cmd_params.mcp_resp);
1811                 return ECORE_UNKNOWN_ERROR;
1812         }
1813
1814         p_mdump_retain->valid = mfw_mdump_retain.valid;
1815         p_mdump_retain->epoch = mfw_mdump_retain.epoch;
1816         p_mdump_retain->pf = mfw_mdump_retain.pf;
1817         p_mdump_retain->status = mfw_mdump_retain.status;
1818
1819         return ECORE_SUCCESS;
1820 }
1821
1822 enum _ecore_status_t ecore_mcp_mdump_clr_retain(struct ecore_hwfn *p_hwfn,
1823                                                 struct ecore_ptt *p_ptt)
1824 {
1825         struct ecore_mdump_cmd_params mdump_cmd_params;
1826
1827         OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1828         mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLR_RETAIN;
1829
1830         return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1831 }
1832
1833 static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
1834                                             struct ecore_ptt *p_ptt)
1835 {
1836         struct ecore_mdump_retain_data mdump_retain;
1837         enum _ecore_status_t rc;
1838
1839         /* In CMT mode - no need for more than a single acknowledgment to the
1840          * MFW, and no more than a single notification to the upper driver.
1841          */
1842         if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1843                 return;
1844
1845         rc = ecore_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain);
1846         if (rc == ECORE_SUCCESS && mdump_retain.valid) {
1847                 DP_NOTICE(p_hwfn, false,
1848                           "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n",
1849                           mdump_retain.epoch, mdump_retain.pf,
1850                           mdump_retain.status);
1851         } else {
1852                 DP_NOTICE(p_hwfn, false,
1853                           "The MFW notified that a critical error occurred in the device\n");
1854         }
1855
1856         if (p_hwfn->p_dev->allow_mdump) {
1857                 DP_NOTICE(p_hwfn, false,
1858                           "Not acknowledging the notification to allow the MFW crash dump\n");
1859                 return;
1860         }
1861
1862         DP_NOTICE(p_hwfn, false,
1863                   "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n");
1864         ecore_mcp_mdump_ack(p_hwfn, p_ptt);
1865         ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
1866 }
1867
1868 enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
1869                                              struct ecore_ptt *p_ptt)
1870 {
1871         struct ecore_mcp_info *info = p_hwfn->mcp_info;
1872         enum _ecore_status_t rc = ECORE_SUCCESS;
1873         bool found = false;
1874         u16 i;
1875
1876         DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n");
1877
1878         /* Read Messages from MFW */
1879         ecore_mcp_read_mb(p_hwfn, p_ptt);
1880
1881         /* Compare current messages to old ones */
1882         for (i = 0; i < info->mfw_mb_length; i++) {
1883                 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
1884                         continue;
1885
1886                 found = true;
1887
1888                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1889                            "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
1890                            i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
1891
1892                 switch (i) {
1893                 case MFW_DRV_MSG_LINK_CHANGE:
1894                         ecore_mcp_handle_link_change(p_hwfn, p_ptt, false);
1895                         break;
1896                 case MFW_DRV_MSG_VF_DISABLED:
1897                         ecore_mcp_handle_vf_flr(p_hwfn, p_ptt);
1898                         break;
1899                 case MFW_DRV_MSG_LLDP_DATA_UPDATED:
1900                         ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1901                                                     ECORE_DCBX_REMOTE_LLDP_MIB);
1902                         break;
1903                 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
1904                         ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1905                                                     ECORE_DCBX_REMOTE_MIB);
1906                         break;
1907                 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
1908                         ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1909                                                     ECORE_DCBX_OPERATIONAL_MIB);
1910                         break;
1911                 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
1912                         ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
1913                         break;
1914                 case MFW_DRV_MSG_ERROR_RECOVERY:
1915                         ecore_mcp_handle_process_kill(p_hwfn, p_ptt);
1916                         break;
1917                 case MFW_DRV_MSG_GET_LAN_STATS:
1918                 case MFW_DRV_MSG_GET_FCOE_STATS:
1919                 case MFW_DRV_MSG_GET_ISCSI_STATS:
1920                 case MFW_DRV_MSG_GET_RDMA_STATS:
1921                         ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
1922                         break;
1923                 case MFW_DRV_MSG_BW_UPDATE:
1924                         ecore_mcp_update_bw(p_hwfn, p_ptt);
1925                         break;
1926                 case MFW_DRV_MSG_FAILURE_DETECTED:
1927                         ecore_mcp_handle_fan_failure(p_hwfn, p_ptt);
1928                         break;
1929                 case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
1930                         ecore_mcp_handle_critical_error(p_hwfn, p_ptt);
1931                         break;
1932                 default:
1933                         DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
1934                         rc = ECORE_INVAL;
1935                 }
1936         }
1937
1938         /* ACK everything */
1939         for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
1940                 OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]);
1941
1942                 /* MFW expect answer in BE, so we force write in that format */
1943                 ecore_wr(p_hwfn, p_ptt,
1944                          info->mfw_mb_addr + sizeof(u32) +
1945                          MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
1946                          sizeof(u32) + i * sizeof(u32), val);
1947         }
1948
1949         if (!found) {
1950                 DP_NOTICE(p_hwfn, false,
1951                           "Received an MFW message indication but no"
1952                           " new message!\n");
1953                 rc = ECORE_INVAL;
1954         }
1955
1956         /* Copy the new mfw messages into the shadow */
1957         OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
1958
1959         return rc;
1960 }
1961
1962 enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
1963                                            struct ecore_ptt *p_ptt,
1964                                            u32 *p_mfw_ver,
1965                                            u32 *p_running_bundle_id)
1966 {
1967         u32 global_offsize;
1968
1969 #ifndef ASIC_ONLY
1970         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1971                 DP_NOTICE(p_hwfn, false, "Emulation - can't get MFW version\n");
1972                 return ECORE_SUCCESS;
1973         }
1974 #endif
1975
1976         if (IS_VF(p_hwfn->p_dev)) {
1977                 if (p_hwfn->vf_iov_info) {
1978                         struct pfvf_acquire_resp_tlv *p_resp;
1979
1980                         p_resp = &p_hwfn->vf_iov_info->acquire_resp;
1981                         *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
1982                         return ECORE_SUCCESS;
1983                 } else {
1984                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1985                                    "VF requested MFW version prior to ACQUIRE\n");
1986                         return ECORE_INVAL;
1987                 }
1988         }
1989
1990         global_offsize = ecore_rd(p_hwfn, p_ptt,
1991                                   SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
1992                                                        public_base,
1993                                                        PUBLIC_GLOBAL));
1994         *p_mfw_ver =
1995             ecore_rd(p_hwfn, p_ptt,
1996                      SECTION_ADDR(global_offsize,
1997                                   0) + OFFSETOF(struct public_global, mfw_ver));
1998
1999         if (p_running_bundle_id != OSAL_NULL) {
2000                 *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt,
2001                                                 SECTION_ADDR(global_offsize,
2002                                                              0) +
2003                                                 OFFSETOF(struct public_global,
2004                                                          running_bundle_id));
2005         }
2006
2007         return ECORE_SUCCESS;
2008 }
2009
2010 enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,
2011                                               struct ecore_ptt *p_ptt,
2012                                               u32 *p_media_type)
2013 {
2014
2015         /* TODO - Add support for VFs */
2016         if (IS_VF(p_hwfn->p_dev))
2017                 return ECORE_INVAL;
2018
2019         if (!ecore_mcp_is_init(p_hwfn)) {
2020                 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
2021                 return ECORE_BUSY;
2022         }
2023
2024         if (!p_ptt) {
2025                 *p_media_type = MEDIA_UNSPECIFIED;
2026                 return ECORE_INVAL;
2027         } else {
2028                 *p_media_type = ecore_rd(p_hwfn, p_ptt,
2029                                          p_hwfn->mcp_info->port_addr +
2030                                          OFFSETOF(struct public_port,
2031                                                   media_type));
2032         }
2033
2034         return ECORE_SUCCESS;
2035 }
2036
2037 /* @DPDK */
2038 /* Old MFW has a global configuration for all PFs regarding RDMA support */
2039 static void
2040 ecore_mcp_get_shmem_proto_legacy(struct ecore_hwfn *p_hwfn,
2041                                  enum ecore_pci_personality *p_proto)
2042 {
2043         *p_proto = ECORE_PCI_ETH;
2044
2045         DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2046                    "According to Legacy capabilities, L2 personality is %08x\n",
2047                    (u32)*p_proto);
2048 }
2049
2050 /* @DPDK */
2051 static enum _ecore_status_t
2052 ecore_mcp_get_shmem_proto_mfw(struct ecore_hwfn *p_hwfn,
2053                               struct ecore_ptt *p_ptt,
2054                               enum ecore_pci_personality *p_proto)
2055 {
2056         u32 resp = 0, param = 0;
2057         enum _ecore_status_t rc;
2058
2059         DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2060                    "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
2061                    (u32)*p_proto, resp, param);
2062         return ECORE_SUCCESS;
2063 }
2064
2065 static enum _ecore_status_t
2066 ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn,
2067                           struct public_func *p_info,
2068                           struct ecore_ptt *p_ptt,
2069                           enum ecore_pci_personality *p_proto)
2070 {
2071         enum _ecore_status_t rc = ECORE_SUCCESS;
2072
2073         switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
2074         case FUNC_MF_CFG_PROTOCOL_ETHERNET:
2075                 if (ecore_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto) !=
2076                     ECORE_SUCCESS)
2077                         ecore_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
2078                 break;
2079         default:
2080                 rc = ECORE_INVAL;
2081         }
2082
2083         return rc;
2084 }
2085
2086 enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
2087                                                     struct ecore_ptt *p_ptt)
2088 {
2089         struct ecore_mcp_function_info *info;
2090         struct public_func shmem_info;
2091
2092         ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
2093         info = &p_hwfn->mcp_info->func_info;
2094
2095         info->pause_on_host = (shmem_info.config &
2096                                FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
2097
2098         if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2099                                       &info->protocol)) {
2100                 DP_ERR(p_hwfn, "Unknown personality %08x\n",
2101                        (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
2102                 return ECORE_INVAL;
2103         }
2104
2105         ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
2106
2107         if (shmem_info.mac_upper || shmem_info.mac_lower) {
2108                 info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
2109                 info->mac[1] = (u8)(shmem_info.mac_upper);
2110                 info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
2111                 info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
2112                 info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
2113                 info->mac[5] = (u8)(shmem_info.mac_lower);
2114         } else {
2115                 /* TODO - are there protocols for which there's no MAC? */
2116                 DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n");
2117         }
2118
2119         /* TODO - are these calculations true for BE machine? */
2120         info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
2121                          (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
2122         info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
2123                          (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
2124
2125         info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
2126
2127         info->mtu = (u16)shmem_info.mtu_size;
2128
2129         if (info->mtu == 0)
2130                 info->mtu = 1500;
2131
2132         info->mtu = (u16)shmem_info.mtu_size;
2133
2134         DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP),
2135                    "Read configuration from shmem: pause_on_host %02x"
2136                     " protocol %02x BW [%02x - %02x]"
2137                     " MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %lx"
2138                     " node %lx ovlan %04x\n",
2139                    info->pause_on_host, info->protocol,
2140                    info->bandwidth_min, info->bandwidth_max,
2141                    info->mac[0], info->mac[1], info->mac[2],
2142                    info->mac[3], info->mac[4], info->mac[5],
2143                    (unsigned long)info->wwn_port,
2144                    (unsigned long)info->wwn_node, info->ovlan);
2145
2146         return ECORE_SUCCESS;
2147 }
2148
2149 struct ecore_mcp_link_params
2150 *ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn)
2151 {
2152         if (!p_hwfn || !p_hwfn->mcp_info)
2153                 return OSAL_NULL;
2154         return &p_hwfn->mcp_info->link_input;
2155 }
2156
2157 struct ecore_mcp_link_state
2158 *ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn)
2159 {
2160         if (!p_hwfn || !p_hwfn->mcp_info)
2161                 return OSAL_NULL;
2162
2163 #ifndef ASIC_ONLY
2164         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
2165                 DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n");
2166                 p_hwfn->mcp_info->link_output.link_up = true;
2167         }
2168 #endif
2169
2170         return &p_hwfn->mcp_info->link_output;
2171 }
2172
2173 struct ecore_mcp_link_capabilities
2174 *ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn)
2175 {
2176         if (!p_hwfn || !p_hwfn->mcp_info)
2177                 return OSAL_NULL;
2178         return &p_hwfn->mcp_info->link_capabilities;
2179 }
2180
2181 enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
2182                                      struct ecore_ptt *p_ptt)
2183 {
2184         u32 resp = 0, param = 0;
2185         enum _ecore_status_t rc;
2186
2187         rc = ecore_mcp_cmd(p_hwfn, p_ptt,
2188                            DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, &param);
2189
2190         /* Wait for the drain to complete before returning */
2191         OSAL_MSLEEP(1020);
2192
2193         return rc;
2194 }
2195
2196 const struct ecore_mcp_function_info
2197 *ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn)
2198 {
2199         if (!p_hwfn || !p_hwfn->mcp_info)
2200                 return OSAL_NULL;
2201         return &p_hwfn->mcp_info->func_info;
2202 }
2203
2204 int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
2205                                   struct ecore_ptt *p_ptt, u32 personalities)
2206 {
2207         enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT;
2208         struct public_func shmem_info;
2209         int i, count = 0, num_pfs;
2210
2211         num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
2212
2213         for (i = 0; i < num_pfs; i++) {
2214                 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
2215                                          MCP_PF_ID_BY_REL(p_hwfn, i));
2216                 if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
2217                         continue;
2218
2219                 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2220                                               &protocol) !=
2221                     ECORE_SUCCESS)
2222                         continue;
2223
2224                 if ((1 << ((u32)protocol)) & personalities)
2225                         count++;
2226         }
2227
2228         return count;
2229 }
2230
2231 enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
2232                                               struct ecore_ptt *p_ptt,
2233                                               u32 *p_flash_size)
2234 {
2235         u32 flash_size;
2236
2237 #ifndef ASIC_ONLY
2238         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
2239                 DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n");
2240                 return ECORE_INVAL;
2241         }
2242 #endif
2243
2244         if (IS_VF(p_hwfn->p_dev))
2245                 return ECORE_INVAL;
2246
2247         flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
2248         flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
2249                      MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
2250         flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_OFFSET));
2251
2252         *p_flash_size = flash_size;
2253
2254         return ECORE_SUCCESS;
2255 }
2256
2257 enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
2258                                                   struct ecore_ptt *p_ptt)
2259 {
2260         struct ecore_dev *p_dev = p_hwfn->p_dev;
2261
2262         if (p_dev->recov_in_prog) {
2263                 DP_NOTICE(p_hwfn, false,
2264                           "Avoid triggering a recovery since such a process"
2265                           " is already in progress\n");
2266                 return ECORE_AGAIN;
2267         }
2268
2269         DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n");
2270         ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
2271
2272         return ECORE_SUCCESS;
2273 }
2274
2275 static enum _ecore_status_t
2276 ecore_mcp_config_vf_msix_bb(struct ecore_hwfn *p_hwfn,
2277                             struct ecore_ptt *p_ptt,
2278                             u8 vf_id, u8 num)
2279 {
2280         u32 resp = 0, param = 0, rc_param = 0;
2281         enum _ecore_status_t rc;
2282
2283 /* Only Leader can configure MSIX, and need to take CMT into account */
2284
2285         if (!IS_LEAD_HWFN(p_hwfn))
2286                 return ECORE_SUCCESS;
2287         num *= p_hwfn->p_dev->num_hwfns;
2288
2289         param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_OFFSET) &
2290             DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
2291         param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_OFFSET) &
2292             DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
2293
2294         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
2295                            &resp, &rc_param);
2296
2297         if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
2298                 DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n",
2299                           vf_id);
2300                 rc = ECORE_INVAL;
2301         } else {
2302                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2303                            "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
2304                             num, vf_id);
2305         }
2306
2307         return rc;
2308 }
2309
2310 static enum _ecore_status_t
2311 ecore_mcp_config_vf_msix_ah(struct ecore_hwfn *p_hwfn,
2312                             struct ecore_ptt *p_ptt,
2313                             u8 num)
2314 {
2315         u32 resp = 0, param = num, rc_param = 0;
2316         enum _ecore_status_t rc;
2317
2318         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX,
2319                            param, &resp, &rc_param);
2320
2321         if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) {
2322                 DP_NOTICE(p_hwfn, true, "MFW failed to set MSI-X for VFs\n");
2323                 rc = ECORE_INVAL;
2324         } else {
2325                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2326                            "Requested 0x%02x MSI-x interrupts for VFs\n",
2327                            num);
2328         }
2329
2330         return rc;
2331 }
2332
2333 enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
2334                                               struct ecore_ptt *p_ptt,
2335                                               u8 vf_id, u8 num)
2336 {
2337         if (ECORE_IS_BB(p_hwfn->p_dev))
2338                 return ecore_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
2339         else
2340                 return ecore_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num);
2341 }
2342
2343 enum _ecore_status_t
2344 ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2345                            struct ecore_mcp_drv_version *p_ver)
2346 {
2347         struct ecore_mcp_mb_params mb_params;
2348         struct drv_version_stc drv_version;
2349         u32 num_words, i;
2350         void *p_name;
2351         OSAL_BE32 val;
2352         enum _ecore_status_t rc;
2353
2354 #ifndef ASIC_ONLY
2355         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
2356                 return ECORE_SUCCESS;
2357 #endif
2358
2359         OSAL_MEM_ZERO(&drv_version, sizeof(drv_version));
2360         drv_version.version = p_ver->version;
2361         num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4;
2362         for (i = 0; i < num_words; i++) {
2363                 /* The driver name is expected to be in a big-endian format */
2364                 p_name = &p_ver->name[i * sizeof(u32)];
2365                 val = OSAL_CPU_TO_BE32(*(u32 *)p_name);
2366                 *(u32 *)&drv_version.name[i * sizeof(u32)] = val;
2367         }
2368
2369         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2370         mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
2371         mb_params.p_data_src = &drv_version;
2372         mb_params.data_src_size = sizeof(drv_version);
2373         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2374         if (rc != ECORE_SUCCESS)
2375                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2376
2377         return rc;
2378 }
2379
2380 /* A maximal 100 msec waiting time for the MCP to halt */
2381 #define ECORE_MCP_HALT_SLEEP_MS         10
2382 #define ECORE_MCP_HALT_MAX_RETRIES      10
2383
2384 enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
2385                                     struct ecore_ptt *p_ptt)
2386 {
2387         u32 resp = 0, param = 0, cpu_state, cnt = 0;
2388         enum _ecore_status_t rc;
2389
2390         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2391                            &param);
2392         if (rc != ECORE_SUCCESS) {
2393                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2394                 return rc;
2395         }
2396
2397         do {
2398                 OSAL_MSLEEP(ECORE_MCP_HALT_SLEEP_MS);
2399                 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2400                 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
2401                         break;
2402         } while (++cnt < ECORE_MCP_HALT_MAX_RETRIES);
2403
2404         if (cnt == ECORE_MCP_HALT_MAX_RETRIES) {
2405                 DP_NOTICE(p_hwfn, false,
2406                           "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2407                           ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
2408                 return ECORE_BUSY;
2409         }
2410
2411         ecore_mcp_cmd_set_blocking(p_hwfn, true);
2412
2413         return ECORE_SUCCESS;
2414 }
2415
2416 #define ECORE_MCP_RESUME_SLEEP_MS       10
2417
2418 enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
2419                                       struct ecore_ptt *p_ptt)
2420 {
2421         u32 cpu_mode, cpu_state;
2422
2423         ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
2424
2425         cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2426         cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2427         ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
2428
2429         OSAL_MSLEEP(ECORE_MCP_RESUME_SLEEP_MS);
2430         cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2431
2432         if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
2433                 DP_NOTICE(p_hwfn, false,
2434                           "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2435                           cpu_mode, cpu_state);
2436                 return ECORE_BUSY;
2437         }
2438
2439         ecore_mcp_cmd_set_blocking(p_hwfn, false);
2440
2441         return ECORE_SUCCESS;
2442 }
2443
2444 enum _ecore_status_t
2445 ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
2446                                    struct ecore_ptt *p_ptt,
2447                                    enum ecore_ov_client client)
2448 {
2449         enum _ecore_status_t rc;
2450         u32 resp = 0, param = 0;
2451         u32 drv_mb_param;
2452
2453         switch (client) {
2454         case ECORE_OV_CLIENT_DRV:
2455                 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
2456                 break;
2457         case ECORE_OV_CLIENT_USER:
2458                 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
2459                 break;
2460         case ECORE_OV_CLIENT_VENDOR_SPEC:
2461                 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
2462                 break;
2463         default:
2464                 DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", client);
2465                 return ECORE_INVAL;
2466         }
2467
2468         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
2469                            drv_mb_param, &resp, &param);
2470         if (rc != ECORE_SUCCESS)
2471                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2472
2473         return rc;
2474 }
2475
2476 enum _ecore_status_t
2477 ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
2478                                  struct ecore_ptt *p_ptt,
2479                                  enum ecore_ov_driver_state drv_state)
2480 {
2481         enum _ecore_status_t rc;
2482         u32 resp = 0, param = 0;
2483         u32 drv_mb_param;
2484
2485         switch (drv_state) {
2486         case ECORE_OV_DRIVER_STATE_NOT_LOADED:
2487                 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
2488                 break;
2489         case ECORE_OV_DRIVER_STATE_DISABLED:
2490                 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
2491                 break;
2492         case ECORE_OV_DRIVER_STATE_ACTIVE:
2493                 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
2494                 break;
2495         default:
2496                 DP_NOTICE(p_hwfn, true, "Invalid driver state %d\n", drv_state);
2497                 return ECORE_INVAL;
2498         }
2499
2500         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
2501                            drv_mb_param, &resp, &param);
2502         if (rc != ECORE_SUCCESS)
2503                 DP_ERR(p_hwfn, "Failed to send driver state\n");
2504
2505         return rc;
2506 }
2507
2508 enum _ecore_status_t
2509 ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2510                          struct ecore_fc_npiv_tbl *p_table)
2511 {
2512         return 0;
2513 }
2514
2515 enum _ecore_status_t
2516 ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn,
2517                         struct ecore_ptt *p_ptt, u16 mtu)
2518 {
2519         return 0;
2520 }
2521
2522 enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
2523                                        struct ecore_ptt *p_ptt,
2524                                        enum ecore_led_mode mode)
2525 {
2526         u32 resp = 0, param = 0, drv_mb_param;
2527         enum _ecore_status_t rc;
2528
2529         switch (mode) {
2530         case ECORE_LED_MODE_ON:
2531                 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
2532                 break;
2533         case ECORE_LED_MODE_OFF:
2534                 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
2535                 break;
2536         case ECORE_LED_MODE_RESTORE:
2537                 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
2538                 break;
2539         default:
2540                 DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode);
2541                 return ECORE_INVAL;
2542         }
2543
2544         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
2545                            drv_mb_param, &resp, &param);
2546         if (rc != ECORE_SUCCESS)
2547                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2548
2549         return rc;
2550 }
2551
2552 enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
2553                                              struct ecore_ptt *p_ptt,
2554                                              u32 mask_parities)
2555 {
2556         u32 resp = 0, param = 0;
2557         enum _ecore_status_t rc;
2558
2559         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
2560                            mask_parities, &resp, &param);
2561
2562         if (rc != ECORE_SUCCESS) {
2563                 DP_ERR(p_hwfn,
2564                        "MCP response failure for mask parities, aborting\n");
2565         } else if (resp != FW_MSG_CODE_OK) {
2566                 DP_ERR(p_hwfn,
2567                        "MCP did not ack mask parity request. Old MFW?\n");
2568                 rc = ECORE_INVAL;
2569         }
2570
2571         return rc;
2572 }
2573
2574 enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
2575                                         u8 *p_buf, u32 len)
2576 {
2577         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2578         u32 bytes_left, offset, bytes_to_copy, buf_size;
2579         u32 nvm_offset, resp, param;
2580         struct ecore_ptt *p_ptt;
2581         enum _ecore_status_t rc = ECORE_SUCCESS;
2582
2583         p_ptt = ecore_ptt_acquire(p_hwfn);
2584         if (!p_ptt)
2585                 return ECORE_BUSY;
2586
2587         bytes_left = len;
2588         offset = 0;
2589         while (bytes_left > 0) {
2590                 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
2591                                            MCP_DRV_NVM_BUF_LEN);
2592                 nvm_offset = (addr + offset) | (bytes_to_copy <<
2593                                                 DRV_MB_PARAM_NVM_LEN_OFFSET);
2594                 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
2595                                           DRV_MSG_CODE_NVM_READ_NVRAM,
2596                                           nvm_offset, &resp, &param, &buf_size,
2597                                           (u32 *)(p_buf + offset));
2598                 if (rc != ECORE_SUCCESS) {
2599                         DP_NOTICE(p_dev, false,
2600                                   "ecore_mcp_nvm_rd_cmd() failed, rc = %d\n",
2601                                   rc);
2602                         resp = FW_MSG_CODE_ERROR;
2603                         break;
2604                 }
2605
2606                 if (resp != FW_MSG_CODE_NVM_OK) {
2607                         DP_NOTICE(p_dev, false,
2608                                   "nvm read failed, resp = 0x%08x\n", resp);
2609                         rc = ECORE_UNKNOWN_ERROR;
2610                         break;
2611                 }
2612
2613                 /* This can be a lengthy process, and it's possible scheduler
2614                  * isn't preemptible. Sleep a bit to prevent CPU hogging.
2615                  */
2616                 if (bytes_left % 0x1000 <
2617                     (bytes_left - buf_size) % 0x1000)
2618                         OSAL_MSLEEP(1);
2619
2620                 offset += buf_size;
2621                 bytes_left -= buf_size;
2622         }
2623
2624         p_dev->mcp_nvm_resp = resp;
2625         ecore_ptt_release(p_hwfn, p_ptt);
2626
2627         return rc;
2628 }
2629
2630 enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
2631                                         u32 addr, u8 *p_buf, u32 len)
2632 {
2633         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2634         struct ecore_ptt *p_ptt;
2635         u32 resp, param;
2636         enum _ecore_status_t rc;
2637
2638         p_ptt = ecore_ptt_acquire(p_hwfn);
2639         if (!p_ptt)
2640                 return ECORE_BUSY;
2641
2642         rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
2643                                   (cmd == ECORE_PHY_CORE_READ) ?
2644                                   DRV_MSG_CODE_PHY_CORE_READ :
2645                                   DRV_MSG_CODE_PHY_RAW_READ,
2646                                   addr, &resp, &param, &len, (u32 *)p_buf);
2647         if (rc != ECORE_SUCCESS)
2648                 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2649
2650         p_dev->mcp_nvm_resp = resp;
2651         ecore_ptt_release(p_hwfn, p_ptt);
2652
2653         return rc;
2654 }
2655
2656 enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
2657 {
2658         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2659         struct ecore_ptt *p_ptt;
2660
2661         p_ptt = ecore_ptt_acquire(p_hwfn);
2662         if (!p_ptt)
2663                 return ECORE_BUSY;
2664
2665         OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp));
2666         ecore_ptt_release(p_hwfn, p_ptt);
2667
2668         return ECORE_SUCCESS;
2669 }
2670
2671 enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr)
2672 {
2673         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2674         struct ecore_ptt *p_ptt;
2675         u32 resp, param;
2676         enum _ecore_status_t rc;
2677
2678         p_ptt = ecore_ptt_acquire(p_hwfn);
2679         if (!p_ptt)
2680                 return ECORE_BUSY;
2681         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_DEL_FILE, addr,
2682                            &resp, &param);
2683         p_dev->mcp_nvm_resp = resp;
2684         ecore_ptt_release(p_hwfn, p_ptt);
2685
2686         return rc;
2687 }
2688
2689 enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
2690                                                   u32 addr)
2691 {
2692         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2693         struct ecore_ptt *p_ptt;
2694         u32 resp, param;
2695         enum _ecore_status_t rc;
2696
2697         p_ptt = ecore_ptt_acquire(p_hwfn);
2698         if (!p_ptt)
2699                 return ECORE_BUSY;
2700         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, addr,
2701                            &resp, &param);
2702         p_dev->mcp_nvm_resp = resp;
2703         ecore_ptt_release(p_hwfn, p_ptt);
2704
2705         return rc;
2706 }
2707
2708 /* rc receives ECORE_INVAL as default parameter because
2709  * it might not enter the while loop if the len is 0
2710  */
2711 enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
2712                                          u32 addr, u8 *p_buf, u32 len)
2713 {
2714         u32 buf_idx, buf_size, nvm_cmd, nvm_offset, resp, param;
2715         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2716         enum _ecore_status_t rc = ECORE_INVAL;
2717         struct ecore_ptt *p_ptt;
2718
2719         p_ptt = ecore_ptt_acquire(p_hwfn);
2720         if (!p_ptt)
2721                 return ECORE_BUSY;
2722
2723         switch (cmd) {
2724         case ECORE_PUT_FILE_DATA:
2725                 nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
2726                 break;
2727         case ECORE_NVM_WRITE_NVRAM:
2728                 nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
2729                 break;
2730         case ECORE_EXT_PHY_FW_UPGRADE:
2731                 nvm_cmd = DRV_MSG_CODE_EXT_PHY_FW_UPGRADE;
2732                 break;
2733         default:
2734                 DP_NOTICE(p_hwfn, true, "Invalid nvm write command 0x%x\n",
2735                           cmd);
2736                 rc = ECORE_INVAL;
2737                 goto out;
2738         }
2739
2740         buf_idx = 0;
2741         while (buf_idx < len) {
2742                 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
2743                                       MCP_DRV_NVM_BUF_LEN);
2744                 nvm_offset = ((buf_size << DRV_MB_PARAM_NVM_LEN_OFFSET) |
2745                               addr) +
2746                              buf_idx;
2747                 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset,
2748                                           &resp, &param, buf_size,
2749                                           (u32 *)&p_buf[buf_idx]);
2750                 if (rc != ECORE_SUCCESS) {
2751                         DP_NOTICE(p_dev, false,
2752                                   "ecore_mcp_nvm_write() failed, rc = %d\n",
2753                                   rc);
2754                         resp = FW_MSG_CODE_ERROR;
2755                         break;
2756                 }
2757
2758                 if (resp != FW_MSG_CODE_OK &&
2759                     resp != FW_MSG_CODE_NVM_OK &&
2760                     resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK) {
2761                         DP_NOTICE(p_dev, false,
2762                                   "nvm write failed, resp = 0x%08x\n", resp);
2763                         rc = ECORE_UNKNOWN_ERROR;
2764                         break;
2765                 }
2766
2767                 /* This can be a lengthy process, and it's possible scheduler
2768                  * isn't preemptible. Sleep a bit to prevent CPU hogging.
2769                  */
2770                 if (buf_idx % 0x1000 >
2771                     (buf_idx + buf_size) % 0x1000)
2772                         OSAL_MSLEEP(1);
2773
2774                 buf_idx += buf_size;
2775         }
2776
2777         p_dev->mcp_nvm_resp = resp;
2778 out:
2779         ecore_ptt_release(p_hwfn, p_ptt);
2780
2781         return rc;
2782 }
2783
2784 enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
2785                                          u32 addr, u8 *p_buf, u32 len)
2786 {
2787         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2788         struct ecore_ptt *p_ptt;
2789         u32 resp, param, nvm_cmd;
2790         enum _ecore_status_t rc;
2791
2792         p_ptt = ecore_ptt_acquire(p_hwfn);
2793         if (!p_ptt)
2794                 return ECORE_BUSY;
2795
2796         nvm_cmd = (cmd == ECORE_PHY_CORE_WRITE) ?  DRV_MSG_CODE_PHY_CORE_WRITE :
2797                         DRV_MSG_CODE_PHY_RAW_WRITE;
2798         rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, addr,
2799                                   &resp, &param, len, (u32 *)p_buf);
2800         if (rc != ECORE_SUCCESS)
2801                 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2802         p_dev->mcp_nvm_resp = resp;
2803         ecore_ptt_release(p_hwfn, p_ptt);
2804
2805         return rc;
2806 }
2807
2808 enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
2809                                                    u32 addr)
2810 {
2811         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2812         struct ecore_ptt *p_ptt;
2813         u32 resp, param;
2814         enum _ecore_status_t rc;
2815
2816         p_ptt = ecore_ptt_acquire(p_hwfn);
2817         if (!p_ptt)
2818                 return ECORE_BUSY;
2819
2820         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_SECURE_MODE, addr,
2821                            &resp, &param);
2822         p_dev->mcp_nvm_resp = resp;
2823         ecore_ptt_release(p_hwfn, p_ptt);
2824
2825         return rc;
2826 }
2827
2828 enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
2829                                             struct ecore_ptt *p_ptt,
2830                                             u32 port, u32 addr, u32 offset,
2831                                             u32 len, u8 *p_buf)
2832 {
2833         u32 bytes_left, bytes_to_copy, buf_size, nvm_offset;
2834         u32 resp, param;
2835         enum _ecore_status_t rc;
2836
2837         nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) |
2838                         (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET);
2839         addr = offset;
2840         offset = 0;
2841         bytes_left = len;
2842         while (bytes_left > 0) {
2843                 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
2844                                            MAX_I2C_TRANSACTION_SIZE);
2845                 nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
2846                                DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
2847                 nvm_offset |= ((addr + offset) <<
2848                                 DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET);
2849                 nvm_offset |= (bytes_to_copy <<
2850                                DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET);
2851                 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
2852                                           DRV_MSG_CODE_TRANSCEIVER_READ,
2853                                           nvm_offset, &resp, &param, &buf_size,
2854                                           (u32 *)(p_buf + offset));
2855                 if ((resp & FW_MSG_CODE_MASK) ==
2856                     FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
2857                         return ECORE_NODEV;
2858                 } else if ((resp & FW_MSG_CODE_MASK) !=
2859                            FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
2860                         return ECORE_UNKNOWN_ERROR;
2861
2862                 offset += buf_size;
2863                 bytes_left -= buf_size;
2864         }
2865
2866         return ECORE_SUCCESS;
2867 }
2868
2869 enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
2870                                              struct ecore_ptt *p_ptt,
2871                                              u32 port, u32 addr, u32 offset,
2872                                              u32 len, u8 *p_buf)
2873 {
2874         u32 buf_idx, buf_size, nvm_offset, resp, param;
2875         enum _ecore_status_t rc;
2876
2877         nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) |
2878                         (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET);
2879         buf_idx = 0;
2880         while (buf_idx < len) {
2881                 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
2882                                       MAX_I2C_TRANSACTION_SIZE);
2883                 nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
2884                                  DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
2885                 nvm_offset |= ((offset + buf_idx) <<
2886                                  DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET);
2887                 nvm_offset |= (buf_size <<
2888                                DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET);
2889                 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt,
2890                                           DRV_MSG_CODE_TRANSCEIVER_WRITE,
2891                                           nvm_offset, &resp, &param, buf_size,
2892                                           (u32 *)&p_buf[buf_idx]);
2893                 if ((resp & FW_MSG_CODE_MASK) ==
2894                     FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
2895                         return ECORE_NODEV;
2896                 } else if ((resp & FW_MSG_CODE_MASK) !=
2897                            FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
2898                         return ECORE_UNKNOWN_ERROR;
2899
2900                 buf_idx += buf_size;
2901         }
2902
2903         return ECORE_SUCCESS;
2904 }
2905
2906 enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
2907                                          struct ecore_ptt *p_ptt,
2908                                          u16 gpio, u32 *gpio_val)
2909 {
2910         enum _ecore_status_t rc = ECORE_SUCCESS;
2911         u32 drv_mb_param = 0, rsp;
2912
2913         drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET);
2914
2915         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ,
2916                            drv_mb_param, &rsp, gpio_val);
2917
2918         if (rc != ECORE_SUCCESS)
2919                 return rc;
2920
2921         if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
2922                 return ECORE_UNKNOWN_ERROR;
2923
2924         return ECORE_SUCCESS;
2925 }
2926
2927 enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
2928                                           struct ecore_ptt *p_ptt,
2929                                           u16 gpio, u16 gpio_val)
2930 {
2931         enum _ecore_status_t rc = ECORE_SUCCESS;
2932         u32 drv_mb_param = 0, param, rsp;
2933
2934         drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET) |
2935                 (gpio_val << DRV_MB_PARAM_GPIO_VALUE_OFFSET);
2936
2937         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE,
2938                            drv_mb_param, &rsp, &param);
2939
2940         if (rc != ECORE_SUCCESS)
2941                 return rc;
2942
2943         if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
2944                 return ECORE_UNKNOWN_ERROR;
2945
2946         return ECORE_SUCCESS;
2947 }
2948
2949 enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn,
2950                                          struct ecore_ptt *p_ptt,
2951                                          u16 gpio, u32 *gpio_direction,
2952                                          u32 *gpio_ctrl)
2953 {
2954         u32 drv_mb_param = 0, rsp, val = 0;
2955         enum _ecore_status_t rc = ECORE_SUCCESS;
2956
2957         drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET;
2958
2959         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO,
2960                            drv_mb_param, &rsp, &val);
2961         if (rc != ECORE_SUCCESS)
2962                 return rc;
2963
2964         *gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >>
2965                            DRV_MB_PARAM_GPIO_DIRECTION_OFFSET;
2966         *gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >>
2967                       DRV_MB_PARAM_GPIO_CTRL_OFFSET;
2968
2969         if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
2970                 return ECORE_UNKNOWN_ERROR;
2971
2972         return ECORE_SUCCESS;
2973 }
2974
2975 enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn,
2976                                                   struct ecore_ptt *p_ptt)
2977 {
2978         u32 drv_mb_param = 0, rsp, param;
2979         enum _ecore_status_t rc = ECORE_SUCCESS;
2980
2981         drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
2982                         DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
2983
2984         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2985                            drv_mb_param, &rsp, &param);
2986
2987         if (rc != ECORE_SUCCESS)
2988                 return rc;
2989
2990         if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2991             (param != DRV_MB_PARAM_BIST_RC_PASSED))
2992                 rc = ECORE_UNKNOWN_ERROR;
2993
2994         return rc;
2995 }
2996
2997 enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn,
2998                                                struct ecore_ptt *p_ptt)
2999 {
3000         u32 drv_mb_param, rsp, param;
3001         enum _ecore_status_t rc = ECORE_SUCCESS;
3002
3003         drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
3004                         DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3005
3006         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3007                            drv_mb_param, &rsp, &param);
3008
3009         if (rc != ECORE_SUCCESS)
3010                 return rc;
3011
3012         if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3013             (param != DRV_MB_PARAM_BIST_RC_PASSED))
3014                 rc = ECORE_UNKNOWN_ERROR;
3015
3016         return rc;
3017 }
3018
3019 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images(
3020         struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *num_images)
3021 {
3022         u32 drv_mb_param = 0, rsp;
3023         enum _ecore_status_t rc = ECORE_SUCCESS;
3024
3025         drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
3026                         DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3027
3028         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3029                            drv_mb_param, &rsp, num_images);
3030
3031         if (rc != ECORE_SUCCESS)
3032                 return rc;
3033
3034         if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
3035                 rc = ECORE_UNKNOWN_ERROR;
3036
3037         return rc;
3038 }
3039
3040 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att(
3041         struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3042         struct bist_nvm_image_att *p_image_att, u32 image_index)
3043 {
3044         u32 buf_size, nvm_offset, resp, param;
3045         enum _ecore_status_t rc;
3046
3047         nvm_offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
3048                                     DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3049         nvm_offset |= (image_index <<
3050                        DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_OFFSET);
3051         rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3052                                   nvm_offset, &resp, &param, &buf_size,
3053                                   (u32 *)p_image_att);
3054         if (rc != ECORE_SUCCESS)
3055                 return rc;
3056
3057         if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3058             (p_image_att->return_code != 1))
3059                 rc = ECORE_UNKNOWN_ERROR;
3060
3061         return rc;
3062 }
3063
3064 enum _ecore_status_t
3065 ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn,
3066                                struct ecore_ptt *p_ptt,
3067                                struct ecore_temperature_info *p_temp_info)
3068 {
3069         struct ecore_temperature_sensor *p_temp_sensor;
3070         struct temperature_status_stc mfw_temp_info;
3071         struct ecore_mcp_mb_params mb_params;
3072         u32 val;
3073         enum _ecore_status_t rc;
3074         u8 i;
3075
3076         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3077         mb_params.cmd = DRV_MSG_CODE_GET_TEMPERATURE;
3078         mb_params.p_data_dst = &mfw_temp_info;
3079         mb_params.data_dst_size = sizeof(mfw_temp_info);
3080         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3081         if (rc != ECORE_SUCCESS)
3082                 return rc;
3083
3084         OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS != MAX_NUM_OF_SENSORS);
3085         p_temp_info->num_sensors = OSAL_MIN_T(u32, mfw_temp_info.num_of_sensors,
3086                                               ECORE_MAX_NUM_OF_SENSORS);
3087         for (i = 0; i < p_temp_info->num_sensors; i++) {
3088                 val = mfw_temp_info.sensor[i];
3089                 p_temp_sensor = &p_temp_info->sensors[i];
3090                 p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >>
3091                                                  SENSOR_LOCATION_OFFSET;
3092                 p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >>
3093                                                 THRESHOLD_HIGH_OFFSET;
3094                 p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >>
3095                                           CRITICAL_TEMPERATURE_OFFSET;
3096                 p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >>
3097                                               CURRENT_TEMP_OFFSET;
3098         }
3099
3100         return ECORE_SUCCESS;
3101 }
3102
3103 enum _ecore_status_t ecore_mcp_get_mba_versions(
3104         struct ecore_hwfn *p_hwfn,
3105         struct ecore_ptt *p_ptt,
3106         struct ecore_mba_vers *p_mba_vers)
3107 {
3108         u32 buf_size, resp, param;
3109         enum _ecore_status_t rc;
3110
3111         rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MBA_VERSION,
3112                                   0, &resp, &param, &buf_size,
3113                                   &p_mba_vers->mba_vers[0]);
3114
3115         if (rc != ECORE_SUCCESS)
3116                 return rc;
3117
3118         if ((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
3119                 rc = ECORE_UNKNOWN_ERROR;
3120
3121         if (buf_size != MCP_DRV_NVM_BUF_LEN)
3122                 rc = ECORE_UNKNOWN_ERROR;
3123
3124         return rc;
3125 }
3126
3127 enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
3128                                               struct ecore_ptt *p_ptt,
3129                                               u64 *num_events)
3130 {
3131         u32 rsp;
3132
3133         return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MEM_ECC_EVENTS,
3134                              0, &rsp, (u32 *)num_events);
3135 }
3136
3137 static enum resource_id_enum
3138 ecore_mcp_get_mfw_res_id(enum ecore_resources res_id)
3139 {
3140         enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
3141
3142         switch (res_id) {
3143         case ECORE_SB:
3144                 mfw_res_id = RESOURCE_NUM_SB_E;
3145                 break;
3146         case ECORE_L2_QUEUE:
3147                 mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
3148                 break;
3149         case ECORE_VPORT:
3150                 mfw_res_id = RESOURCE_NUM_VPORT_E;
3151                 break;
3152         case ECORE_RSS_ENG:
3153                 mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
3154                 break;
3155         case ECORE_PQ:
3156                 mfw_res_id = RESOURCE_NUM_PQ_E;
3157                 break;
3158         case ECORE_RL:
3159                 mfw_res_id = RESOURCE_NUM_RL_E;
3160                 break;
3161         case ECORE_MAC:
3162         case ECORE_VLAN:
3163                 /* Each VFC resource can accommodate both a MAC and a VLAN */
3164                 mfw_res_id = RESOURCE_VFC_FILTER_E;
3165                 break;
3166         case ECORE_ILT:
3167                 mfw_res_id = RESOURCE_ILT_E;
3168                 break;
3169         case ECORE_LL2_QUEUE:
3170                 mfw_res_id = RESOURCE_LL2_QUEUE_E;
3171                 break;
3172         case ECORE_RDMA_CNQ_RAM:
3173         case ECORE_CMDQS_CQS:
3174                 /* CNQ/CMDQS are the same resource */
3175                 mfw_res_id = RESOURCE_CQS_E;
3176                 break;
3177         case ECORE_RDMA_STATS_QUEUE:
3178                 mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
3179                 break;
3180         case ECORE_BDQ:
3181                 mfw_res_id = RESOURCE_BDQ_E;
3182                 break;
3183         default:
3184                 break;
3185         }
3186
3187         return mfw_res_id;
3188 }
3189
3190 #define ECORE_RESC_ALLOC_VERSION_MAJOR  2
3191 #define ECORE_RESC_ALLOC_VERSION_MINOR  0
3192 #define ECORE_RESC_ALLOC_VERSION                                \
3193         ((ECORE_RESC_ALLOC_VERSION_MAJOR <<                     \
3194           DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_OFFSET) |   \
3195          (ECORE_RESC_ALLOC_VERSION_MINOR <<                     \
3196           DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_OFFSET))
3197
3198 struct ecore_resc_alloc_in_params {
3199         u32 cmd;
3200         enum ecore_resources res_id;
3201         u32 resc_max_val;
3202 };
3203
3204 struct ecore_resc_alloc_out_params {
3205         u32 mcp_resp;
3206         u32 mcp_param;
3207         u32 resc_num;
3208         u32 resc_start;
3209         u32 vf_resc_num;
3210         u32 vf_resc_start;
3211         u32 flags;
3212 };
3213
3214 #define ECORE_RECOVERY_PROLOG_SLEEP_MS  100
3215
3216 enum _ecore_status_t ecore_recovery_prolog(struct ecore_dev *p_dev)
3217 {
3218         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3219         struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
3220         enum _ecore_status_t rc;
3221
3222         /* Allow ongoing PCIe transactions to complete */
3223         OSAL_MSLEEP(ECORE_RECOVERY_PROLOG_SLEEP_MS);
3224
3225         /* Clear the PF's internal FID_enable in the PXP */
3226         rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_ptt, false);
3227         if (rc != ECORE_SUCCESS)
3228                 DP_NOTICE(p_hwfn, false,
3229                           "ecore_pglueb_set_pfid_enable() failed. rc = %d.\n",
3230                           rc);
3231
3232         return rc;
3233 }
3234
3235 static enum _ecore_status_t
3236 ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn,
3237                               struct ecore_ptt *p_ptt,
3238                               struct ecore_resc_alloc_in_params *p_in_params,
3239                               struct ecore_resc_alloc_out_params *p_out_params)
3240 {
3241         struct ecore_mcp_mb_params mb_params;
3242         struct resource_info mfw_resc_info;
3243         enum _ecore_status_t rc;
3244
3245         OSAL_MEM_ZERO(&mfw_resc_info, sizeof(mfw_resc_info));
3246
3247         mfw_resc_info.res_id = ecore_mcp_get_mfw_res_id(p_in_params->res_id);
3248         if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
3249                 DP_ERR(p_hwfn,
3250                        "Failed to match resource %d [%s] with the MFW resources\n",
3251                        p_in_params->res_id,
3252                        ecore_hw_get_resc_name(p_in_params->res_id));
3253                 return ECORE_INVAL;
3254         }
3255
3256         switch (p_in_params->cmd) {
3257         case DRV_MSG_SET_RESOURCE_VALUE_MSG:
3258                 mfw_resc_info.size = p_in_params->resc_max_val;
3259                 /* Fallthrough */
3260         case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
3261                 break;
3262         default:
3263                 DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
3264                        p_in_params->cmd);
3265                 return ECORE_INVAL;
3266         }
3267
3268         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3269         mb_params.cmd = p_in_params->cmd;
3270         mb_params.param = ECORE_RESC_ALLOC_VERSION;
3271         mb_params.p_data_src = &mfw_resc_info;
3272         mb_params.data_src_size = sizeof(mfw_resc_info);
3273         mb_params.p_data_dst = mb_params.p_data_src;
3274         mb_params.data_dst_size = mb_params.data_src_size;
3275
3276         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3277                    "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
3278                    p_in_params->cmd, p_in_params->res_id,
3279                    ecore_hw_get_resc_name(p_in_params->res_id),
3280                    GET_MFW_FIELD(mb_params.param,
3281                                  DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3282                    GET_MFW_FIELD(mb_params.param,
3283                                  DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3284                    p_in_params->resc_max_val);
3285
3286         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3287         if (rc != ECORE_SUCCESS)
3288                 return rc;
3289
3290         p_out_params->mcp_resp = mb_params.mcp_resp;
3291         p_out_params->mcp_param = mb_params.mcp_param;
3292         p_out_params->resc_num = mfw_resc_info.size;
3293         p_out_params->resc_start = mfw_resc_info.offset;
3294         p_out_params->vf_resc_num = mfw_resc_info.vf_size;
3295         p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
3296         p_out_params->flags = mfw_resc_info.flags;
3297
3298         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3299                    "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
3300                    GET_MFW_FIELD(p_out_params->mcp_param,
3301                                  FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3302                    GET_MFW_FIELD(p_out_params->mcp_param,
3303                                  FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3304                    p_out_params->resc_num, p_out_params->resc_start,
3305                    p_out_params->vf_resc_num, p_out_params->vf_resc_start,
3306                    p_out_params->flags);
3307
3308         return ECORE_SUCCESS;
3309 }
3310
3311 enum _ecore_status_t
3312 ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3313                            enum ecore_resources res_id, u32 resc_max_val,
3314                            u32 *p_mcp_resp)
3315 {
3316         struct ecore_resc_alloc_out_params out_params;
3317         struct ecore_resc_alloc_in_params in_params;
3318         enum _ecore_status_t rc;
3319
3320         OSAL_MEM_ZERO(&in_params, sizeof(in_params));
3321         in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
3322         in_params.res_id = res_id;
3323         in_params.resc_max_val = resc_max_val;
3324         OSAL_MEM_ZERO(&out_params, sizeof(out_params));
3325         rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3326                                            &out_params);
3327         if (rc != ECORE_SUCCESS)
3328                 return rc;
3329
3330         *p_mcp_resp = out_params.mcp_resp;
3331
3332         return ECORE_SUCCESS;
3333 }
3334
3335 enum _ecore_status_t
3336 ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3337                         enum ecore_resources res_id, u32 *p_mcp_resp,
3338                         u32 *p_resc_num, u32 *p_resc_start)
3339 {
3340         struct ecore_resc_alloc_out_params out_params;
3341         struct ecore_resc_alloc_in_params in_params;
3342         enum _ecore_status_t rc;
3343
3344         OSAL_MEM_ZERO(&in_params, sizeof(in_params));
3345         in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
3346         in_params.res_id = res_id;
3347         OSAL_MEM_ZERO(&out_params, sizeof(out_params));
3348         rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3349                                            &out_params);
3350         if (rc != ECORE_SUCCESS)
3351                 return rc;
3352
3353         *p_mcp_resp = out_params.mcp_resp;
3354
3355         if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
3356                 *p_resc_num = out_params.resc_num;
3357                 *p_resc_start = out_params.resc_start;
3358         }
3359
3360         return ECORE_SUCCESS;
3361 }
3362
3363 enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn,
3364                                                struct ecore_ptt *p_ptt)
3365 {
3366         u32 mcp_resp, mcp_param;
3367
3368         return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
3369                              &mcp_resp, &mcp_param);
3370 }
3371
3372 static enum _ecore_status_t ecore_mcp_resource_cmd(struct ecore_hwfn *p_hwfn,
3373                                                    struct ecore_ptt *p_ptt,
3374                                                    u32 param, u32 *p_mcp_resp,
3375                                                    u32 *p_mcp_param)
3376 {
3377         enum _ecore_status_t rc;
3378
3379         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
3380                            p_mcp_resp, p_mcp_param);
3381         if (rc != ECORE_SUCCESS)
3382                 return rc;
3383
3384         if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3385                 DP_INFO(p_hwfn,
3386                         "The resource command is unsupported by the MFW\n");
3387                 return ECORE_NOTIMPL;
3388         }
3389
3390         if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
3391                 u8 opcode = GET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
3392
3393                 DP_NOTICE(p_hwfn, false,
3394                           "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
3395                           param, opcode);
3396                 return ECORE_INVAL;
3397         }
3398
3399         return rc;
3400 }
3401
3402 enum _ecore_status_t
3403 __ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3404                       struct ecore_resc_lock_params *p_params)
3405 {
3406         u32 param = 0, mcp_resp, mcp_param;
3407         u8 opcode;
3408         enum _ecore_status_t rc;
3409
3410         switch (p_params->timeout) {
3411         case ECORE_MCP_RESC_LOCK_TO_DEFAULT:
3412                 opcode = RESOURCE_OPCODE_REQ;
3413                 p_params->timeout = 0;
3414                 break;
3415         case ECORE_MCP_RESC_LOCK_TO_NONE:
3416                 opcode = RESOURCE_OPCODE_REQ_WO_AGING;
3417                 p_params->timeout = 0;
3418                 break;
3419         default:
3420                 opcode = RESOURCE_OPCODE_REQ_W_AGING;
3421                 break;
3422         }
3423
3424         SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3425         SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3426         SET_MFW_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
3427
3428         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3429                    "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
3430                    param, p_params->timeout, opcode, p_params->resource);
3431
3432         /* Attempt to acquire the resource */
3433         rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
3434                                     &mcp_param);
3435         if (rc != ECORE_SUCCESS)
3436                 return rc;
3437
3438         /* Analyze the response */
3439         p_params->owner = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER);
3440         opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3441
3442         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3443                    "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
3444                    mcp_param, opcode, p_params->owner);
3445
3446         switch (opcode) {
3447         case RESOURCE_OPCODE_GNT:
3448                 p_params->b_granted = true;
3449                 break;
3450         case RESOURCE_OPCODE_BUSY:
3451                 p_params->b_granted = false;
3452                 break;
3453         default:
3454                 DP_NOTICE(p_hwfn, false,
3455                           "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
3456                           mcp_param, opcode);
3457                 return ECORE_INVAL;
3458         }
3459
3460         return ECORE_SUCCESS;
3461 }
3462
3463 enum _ecore_status_t
3464 ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3465                     struct ecore_resc_lock_params *p_params)
3466 {
3467         u32 retry_cnt = 0;
3468         enum _ecore_status_t rc;
3469
3470         do {
3471                 /* No need for an interval before the first iteration */
3472                 if (retry_cnt) {
3473                         if (p_params->sleep_b4_retry) {
3474                                 u16 retry_interval_in_ms =
3475                                         DIV_ROUND_UP(p_params->retry_interval,
3476                                                      1000);
3477
3478                                 OSAL_MSLEEP(retry_interval_in_ms);
3479                         } else {
3480                                 OSAL_UDELAY(p_params->retry_interval);
3481                         }
3482                 }
3483
3484                 rc = __ecore_mcp_resc_lock(p_hwfn, p_ptt, p_params);
3485                 if (rc != ECORE_SUCCESS)
3486                         return rc;
3487
3488                 if (p_params->b_granted)
3489                         break;
3490         } while (retry_cnt++ < p_params->retry_num);
3491
3492         return ECORE_SUCCESS;
3493 }
3494
3495 void
3496 ecore_mcp_resc_lock_default_init(struct ecore_hwfn *p_hwfn,
3497                                  struct ecore_resc_lock_params *p_lock,
3498                                  struct ecore_resc_unlock_params *p_unlock,
3499                                  enum ecore_resc_lock resource,
3500                                  bool b_is_permanent)
3501 {
3502         if (p_lock != OSAL_NULL) {
3503                 OSAL_MEM_ZERO(p_lock, sizeof(*p_lock));
3504
3505                 /* Permanent resources don't require aging, and there's no
3506                  * point in trying to acquire them more than once since it's
3507                  * unexpected another entity would release them.
3508                  */
3509                 if (b_is_permanent) {
3510                         p_lock->timeout = ECORE_MCP_RESC_LOCK_TO_NONE;
3511                 } else {
3512                         p_lock->retry_num = ECORE_MCP_RESC_LOCK_RETRY_CNT_DFLT;
3513                         p_lock->retry_interval =
3514                                         ECORE_MCP_RESC_LOCK_RETRY_VAL_DFLT;
3515                         p_lock->sleep_b4_retry = true;
3516                 }
3517
3518                 p_lock->resource = resource;
3519         }
3520
3521         if (p_unlock != OSAL_NULL) {
3522                 OSAL_MEM_ZERO(p_unlock, sizeof(*p_unlock));
3523                 p_unlock->resource = resource;
3524         }
3525 }
3526
3527 enum _ecore_status_t
3528 ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3529                       struct ecore_resc_unlock_params *p_params)
3530 {
3531         u32 param = 0, mcp_resp, mcp_param;
3532         u8 opcode;
3533         enum _ecore_status_t rc;
3534
3535         opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
3536                                    : RESOURCE_OPCODE_RELEASE;
3537         SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3538         SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3539
3540         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3541                    "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
3542                    param, opcode, p_params->resource);
3543
3544         /* Attempt to release the resource */
3545         rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
3546                                     &mcp_param);
3547         if (rc != ECORE_SUCCESS)
3548                 return rc;
3549
3550         /* Analyze the response */
3551         opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3552
3553         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3554                    "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
3555                    mcp_param, opcode);
3556
3557         switch (opcode) {
3558         case RESOURCE_OPCODE_RELEASED_PREVIOUS:
3559                 DP_INFO(p_hwfn,
3560                         "Resource unlock request for an already released resource [%d]\n",
3561                         p_params->resource);
3562                 /* Fallthrough */
3563         case RESOURCE_OPCODE_RELEASED:
3564                 p_params->b_released = true;
3565                 break;
3566         case RESOURCE_OPCODE_WRONG_OWNER:
3567                 p_params->b_released = false;
3568                 break;
3569         default:
3570                 DP_NOTICE(p_hwfn, false,
3571                           "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
3572                           mcp_param, opcode);
3573                 return ECORE_INVAL;
3574         }
3575
3576         return ECORE_SUCCESS;
3577 }
3578
3579 bool ecore_mcp_is_smart_an_supported(struct ecore_hwfn *p_hwfn)
3580 {
3581         return !!(p_hwfn->mcp_info->capabilities &
3582                   FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ);
3583 }
3584
3585 enum _ecore_status_t ecore_mcp_get_capabilities(struct ecore_hwfn *p_hwfn,
3586                                                 struct ecore_ptt *p_ptt)
3587 {
3588         u32 mcp_resp;
3589         enum _ecore_status_t rc;
3590
3591         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT,
3592                            0, &mcp_resp, &p_hwfn->mcp_info->capabilities);
3593         if (rc == ECORE_SUCCESS)
3594                 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_PROBE),
3595                            "MFW supported features: %08x\n",
3596                            p_hwfn->mcp_info->capabilities);
3597
3598         return rc;
3599 }
3600
3601 enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn,
3602                                                 struct ecore_ptt *p_ptt)
3603 {
3604         u32 mcp_resp, mcp_param, features;
3605
3606         features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ |
3607                    DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE;
3608
3609         return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
3610                              features, &mcp_resp, &mcp_param);
3611 }