net/qede/base: add API to send STAG config update to FW
[dpdk.git] / drivers / net / qede / base / ecore_mcp.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "bcm_osal.h"
10 #include "ecore.h"
11 #include "ecore_status.h"
12 #include "ecore_mcp.h"
13 #include "mcp_public.h"
14 #include "reg_addr.h"
15 #include "ecore_hw.h"
16 #include "ecore_init_fw_funcs.h"
17 #include "ecore_sriov.h"
18 #include "ecore_vf.h"
19 #include "ecore_iov_api.h"
20 #include "ecore_gtt_reg_addr.h"
21 #include "ecore_iro.h"
22 #include "ecore_dcbx.h"
23 #include "ecore_sp_commands.h"
24
25 #define CHIP_MCP_RESP_ITER_US 10
26 #define EMUL_MCP_RESP_ITER_US (1000 * 1000)
27
28 #define ECORE_DRV_MB_MAX_RETRIES (500 * 1000)   /* Account for 5 sec */
29 #define ECORE_MCP_RESET_RETRIES (50 * 1000)     /* Account for 500 msec */
30
31 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
32         ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
33                  _val)
34
35 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
36         ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
37
38 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
39         DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
40                      OFFSETOF(struct public_drv_mb, _field), _val)
41
42 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
43         DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
44                      OFFSETOF(struct public_drv_mb, _field))
45
46 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
47         DRV_ID_PDA_COMP_VER_OFFSET)
48
49 #define MCP_BYTES_PER_MBIT_OFFSET 17
50
51 #ifndef ASIC_ONLY
52 static int loaded;
53 static int loaded_port[MAX_NUM_PORTS] = { 0 };
54 #endif
55
56 bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn)
57 {
58         if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
59                 return false;
60         return true;
61 }
62
63 void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
64 {
65         u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
66                                         PUBLIC_PORT);
67         u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr);
68
69         p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
70                                                    MFW_PORT(p_hwfn));
71         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
72                    "port_addr = 0x%x, port_id 0x%02x\n",
73                    p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
74 }
75
76 void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
77 {
78         u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
79         OSAL_BE32 tmp;
80         u32 i;
81
82 #ifndef ASIC_ONLY
83         if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev))
84                 return;
85 #endif
86
87         if (!p_hwfn->mcp_info->public_base)
88                 return;
89
90         for (i = 0; i < length; i++) {
91                 tmp = ecore_rd(p_hwfn, p_ptt,
92                                p_hwfn->mcp_info->mfw_mb_addr +
93                                (i << 2) + sizeof(u32));
94
95                 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
96                     OSAL_BE32_TO_CPU(tmp);
97         }
98 }
99
100 struct ecore_mcp_cmd_elem {
101         osal_list_entry_t list;
102         struct ecore_mcp_mb_params *p_mb_params;
103         u16 expected_seq_num;
104         bool b_is_completed;
105 };
106
107 /* Must be called while cmd_lock is acquired */
108 static struct ecore_mcp_cmd_elem *
109 ecore_mcp_cmd_add_elem(struct ecore_hwfn *p_hwfn,
110                        struct ecore_mcp_mb_params *p_mb_params,
111                        u16 expected_seq_num)
112 {
113         struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
114
115         p_cmd_elem = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
116                                  sizeof(*p_cmd_elem));
117         if (!p_cmd_elem) {
118                 DP_NOTICE(p_hwfn, false,
119                           "Failed to allocate `struct ecore_mcp_cmd_elem'\n");
120                 goto out;
121         }
122
123         p_cmd_elem->p_mb_params = p_mb_params;
124         p_cmd_elem->expected_seq_num = expected_seq_num;
125         OSAL_LIST_PUSH_HEAD(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
126 out:
127         return p_cmd_elem;
128 }
129
130 /* Must be called while cmd_lock is acquired */
131 static void ecore_mcp_cmd_del_elem(struct ecore_hwfn *p_hwfn,
132                                    struct ecore_mcp_cmd_elem *p_cmd_elem)
133 {
134         OSAL_LIST_REMOVE_ENTRY(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
135         OSAL_FREE(p_hwfn->p_dev, p_cmd_elem);
136 }
137
138 /* Must be called while cmd_lock is acquired */
139 static struct ecore_mcp_cmd_elem *
140 ecore_mcp_cmd_get_elem(struct ecore_hwfn *p_hwfn, u16 seq_num)
141 {
142         struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
143
144         OSAL_LIST_FOR_EACH_ENTRY(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list,
145                                  struct ecore_mcp_cmd_elem) {
146                 if (p_cmd_elem->expected_seq_num == seq_num)
147                         return p_cmd_elem;
148         }
149
150         return OSAL_NULL;
151 }
152
153 enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
154 {
155         if (p_hwfn->mcp_info) {
156                 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL, *p_tmp;
157
158                 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
159                 OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_cmd_elem, p_tmp,
160                                               &p_hwfn->mcp_info->cmd_list, list,
161                                               struct ecore_mcp_cmd_elem) {
162                         ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
163                 }
164                 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
165
166                 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
167                 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
168 #ifdef CONFIG_ECORE_LOCK_ALLOC
169                 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->cmd_lock);
170                 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->link_lock);
171 #endif
172         }
173
174         OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
175
176         return ECORE_SUCCESS;
177 }
178
179 static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
180                                                    struct ecore_ptt *p_ptt)
181 {
182         struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
183         u32 drv_mb_offsize, mfw_mb_offsize;
184         u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
185
186 #ifndef ASIC_ONLY
187         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
188                 DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n");
189                 p_info->public_base = 0;
190                 return ECORE_INVAL;
191         }
192 #endif
193
194         p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
195         if (!p_info->public_base)
196                 return ECORE_INVAL;
197
198         p_info->public_base |= GRCBASE_MCP;
199
200         /* Calculate the driver and MFW mailbox address */
201         drv_mb_offsize = ecore_rd(p_hwfn, p_ptt,
202                                   SECTION_OFFSIZE_ADDR(p_info->public_base,
203                                                        PUBLIC_DRV_MB));
204         p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
205         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
206                    "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x"
207                    " mcp_pf_id = 0x%x\n",
208                    drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
209
210         /* Set the MFW MB address */
211         mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
212                                   SECTION_OFFSIZE_ADDR(p_info->public_base,
213                                                        PUBLIC_MFW_MB));
214         p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
215         p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
216                                                p_info->mfw_mb_addr);
217
218         /* Get the current driver mailbox sequence before sending
219          * the first command
220          */
221         p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
222             DRV_MSG_SEQ_NUMBER_MASK;
223
224         /* Get current FW pulse sequence */
225         p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
226             DRV_PULSE_SEQ_MASK;
227
228         p_info->mcp_hist = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
229
230         return ECORE_SUCCESS;
231 }
232
233 enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
234                                         struct ecore_ptt *p_ptt)
235 {
236         struct ecore_mcp_info *p_info;
237         u32 size;
238
239         /* Allocate mcp_info structure */
240         p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
241                                        sizeof(*p_hwfn->mcp_info));
242         if (!p_hwfn->mcp_info)
243                 goto err;
244         p_info = p_hwfn->mcp_info;
245
246         if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
247                 DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
248                 /* Do not free mcp_info here, since public_base indicate that
249                  * the MCP is not initialized
250                  */
251                 return ECORE_SUCCESS;
252         }
253
254         size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
255         p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
256         p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
257         if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
258                 goto err;
259
260         /* Initialize the MFW spinlocks */
261 #ifdef CONFIG_ECORE_LOCK_ALLOC
262         OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock);
263         OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock);
264 #endif
265         OSAL_SPIN_LOCK_INIT(&p_info->cmd_lock);
266         OSAL_SPIN_LOCK_INIT(&p_info->link_lock);
267
268         OSAL_LIST_INIT(&p_info->cmd_list);
269
270         return ECORE_SUCCESS;
271
272 err:
273         DP_NOTICE(p_hwfn, true, "Failed to allocate mcp memory\n");
274         ecore_mcp_free(p_hwfn);
275         return ECORE_NOMEM;
276 }
277
278 static void ecore_mcp_reread_offsets(struct ecore_hwfn *p_hwfn,
279                                      struct ecore_ptt *p_ptt)
280 {
281         u32 generic_por_0 = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
282
283         /* Use MCP history register to check if MCP reset occurred between init
284          * time and now.
285          */
286         if (p_hwfn->mcp_info->mcp_hist != generic_por_0) {
287                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
288                            "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
289                            p_hwfn->mcp_info->mcp_hist, generic_por_0);
290
291                 ecore_load_mcp_offsets(p_hwfn, p_ptt);
292                 ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
293         }
294 }
295
296 enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
297                                      struct ecore_ptt *p_ptt)
298 {
299         u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0;
300         enum _ecore_status_t rc = ECORE_SUCCESS;
301
302 #ifndef ASIC_ONLY
303         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
304                 delay = EMUL_MCP_RESP_ITER_US;
305 #endif
306
307         /* Ensure that only a single thread is accessing the mailbox */
308         OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
309
310         org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
311
312         /* Set drv command along with the updated sequence */
313         ecore_mcp_reread_offsets(p_hwfn, p_ptt);
314         seq = ++p_hwfn->mcp_info->drv_mb_seq;
315         DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
316
317         do {
318                 /* Wait for MFW response */
319                 OSAL_UDELAY(delay);
320                 /* Give the FW up to 500 second (50*1000*10usec) */
321         } while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt,
322                                                 MISCS_REG_GENERIC_POR_0)) &&
323                  (cnt++ < ECORE_MCP_RESET_RETRIES));
324
325         if (org_mcp_reset_seq !=
326             ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
327                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
328                            "MCP was reset after %d usec\n", cnt * delay);
329         } else {
330                 DP_ERR(p_hwfn, "Failed to reset MCP\n");
331                 rc = ECORE_AGAIN;
332         }
333
334         OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
335
336         return rc;
337 }
338
339 /* Must be called while cmd_lock is acquired */
340 static bool ecore_mcp_has_pending_cmd(struct ecore_hwfn *p_hwfn)
341 {
342         struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
343
344         /* There is at most one pending command at a certain time, and if it
345          * exists - it is placed at the HEAD of the list.
346          */
347         if (!OSAL_LIST_IS_EMPTY(&p_hwfn->mcp_info->cmd_list)) {
348                 p_cmd_elem = OSAL_LIST_FIRST_ENTRY(&p_hwfn->mcp_info->cmd_list,
349                                                    struct ecore_mcp_cmd_elem,
350                                                    list);
351                 return !p_cmd_elem->b_is_completed;
352         }
353
354         return false;
355 }
356
357 /* Must be called while cmd_lock is acquired */
358 static enum _ecore_status_t
359 ecore_mcp_update_pending_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
360 {
361         struct ecore_mcp_mb_params *p_mb_params;
362         struct ecore_mcp_cmd_elem *p_cmd_elem;
363         u32 mcp_resp;
364         u16 seq_num;
365
366         mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
367         seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK);
368
369         /* Return if no new non-handled response has been received */
370         if (seq_num != p_hwfn->mcp_info->drv_mb_seq)
371                 return ECORE_AGAIN;
372
373         p_cmd_elem = ecore_mcp_cmd_get_elem(p_hwfn, seq_num);
374         if (!p_cmd_elem) {
375                 DP_ERR(p_hwfn,
376                        "Failed to find a pending mailbox cmd that expects sequence number %d\n",
377                        seq_num);
378                 return ECORE_UNKNOWN_ERROR;
379         }
380
381         p_mb_params = p_cmd_elem->p_mb_params;
382
383         /* Get the MFW response along with the sequence number */
384         p_mb_params->mcp_resp = mcp_resp;
385
386         /* Get the MFW param */
387         p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
388
389         /* Get the union data */
390         if (p_mb_params->p_data_dst != OSAL_NULL &&
391             p_mb_params->data_dst_size) {
392                 u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
393                                       OFFSETOF(struct public_drv_mb,
394                                                union_data);
395                 ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
396                                   union_data_addr, p_mb_params->data_dst_size);
397         }
398
399         p_cmd_elem->b_is_completed = true;
400
401         return ECORE_SUCCESS;
402 }
403
404 /* Must be called while cmd_lock is acquired */
405 static void __ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
406                                       struct ecore_ptt *p_ptt,
407                                       struct ecore_mcp_mb_params *p_mb_params,
408                                       u16 seq_num)
409 {
410         union drv_union_data union_data;
411         u32 union_data_addr;
412
413         /* Set the union data */
414         union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
415                           OFFSETOF(struct public_drv_mb, union_data);
416         OSAL_MEM_ZERO(&union_data, sizeof(union_data));
417         if (p_mb_params->p_data_src != OSAL_NULL && p_mb_params->data_src_size)
418                 OSAL_MEMCPY(&union_data, p_mb_params->p_data_src,
419                             p_mb_params->data_src_size);
420         ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
421                         sizeof(union_data));
422
423         /* Set the drv param */
424         DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param);
425
426         /* Set the drv command along with the sequence number */
427         DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
428
429         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
430                    "MFW mailbox: command 0x%08x param 0x%08x\n",
431                    (p_mb_params->cmd | seq_num), p_mb_params->param);
432 }
433
434 static enum _ecore_status_t
435 _ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
436                          struct ecore_mcp_mb_params *p_mb_params,
437                          u32 max_retries, u32 delay)
438 {
439         struct ecore_mcp_cmd_elem *p_cmd_elem;
440         u32 cnt = 0;
441         u16 seq_num;
442         enum _ecore_status_t rc = ECORE_SUCCESS;
443
444         /* Wait until the mailbox is non-occupied */
445         do {
446                 /* Exit the loop if there is no pending command, or if the
447                  * pending command is completed during this iteration.
448                  * The spinlock stays locked until the command is sent.
449                  */
450
451                 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
452
453                 if (!ecore_mcp_has_pending_cmd(p_hwfn))
454                         break;
455
456                 rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt);
457                 if (rc == ECORE_SUCCESS)
458                         break;
459                 else if (rc != ECORE_AGAIN)
460                         goto err;
461
462                 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
463                 OSAL_UDELAY(delay);
464         } while (++cnt < max_retries);
465
466         if (cnt >= max_retries) {
467                 DP_NOTICE(p_hwfn, false,
468                           "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
469                           p_mb_params->cmd, p_mb_params->param);
470                 return ECORE_AGAIN;
471         }
472
473         /* Send the mailbox command */
474         ecore_mcp_reread_offsets(p_hwfn, p_ptt);
475         seq_num = ++p_hwfn->mcp_info->drv_mb_seq;
476         p_cmd_elem = ecore_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num);
477         if (!p_cmd_elem) {
478                 rc = ECORE_NOMEM;
479                 goto err;
480         }
481
482         __ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num);
483         OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
484
485         /* Wait for the MFW response */
486         do {
487                 /* Exit the loop if the command is already completed, or if the
488                  * command is completed during this iteration.
489                  * The spinlock stays locked until the list element is removed.
490                  */
491
492                 OSAL_UDELAY(delay);
493                 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
494
495                 if (p_cmd_elem->b_is_completed)
496                         break;
497
498                 rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt);
499                 if (rc == ECORE_SUCCESS)
500                         break;
501                 else if (rc != ECORE_AGAIN)
502                         goto err;
503
504                 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
505         } while (++cnt < max_retries);
506
507         if (cnt >= max_retries) {
508                 DP_NOTICE(p_hwfn, false,
509                           "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
510                           p_mb_params->cmd, p_mb_params->param);
511
512                 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
513                 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
514                 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
515
516                 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
517                 return ECORE_AGAIN;
518         }
519
520         ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
521         OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
522
523         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
524                    "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
525                    p_mb_params->mcp_resp, p_mb_params->mcp_param,
526                    (cnt * delay) / 1000, (cnt * delay) % 1000);
527
528         /* Clear the sequence number from the MFW response */
529         p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
530
531         return ECORE_SUCCESS;
532
533 err:
534         OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
535         return rc;
536 }
537
538 static enum _ecore_status_t
539 ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
540                         struct ecore_ptt *p_ptt,
541                         struct ecore_mcp_mb_params *p_mb_params)
542 {
543         osal_size_t union_data_size = sizeof(union drv_union_data);
544         u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
545         u32 delay = CHIP_MCP_RESP_ITER_US;
546
547 #ifndef ASIC_ONLY
548         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
549                 delay = EMUL_MCP_RESP_ITER_US;
550         /* There is a built-in delay of 100usec in each MFW response read */
551         if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
552                 max_retries /= 10;
553 #endif
554
555         /* MCP not initialized */
556         if (!ecore_mcp_is_init(p_hwfn)) {
557                 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
558                 return ECORE_BUSY;
559         }
560
561         if (p_mb_params->data_src_size > union_data_size ||
562             p_mb_params->data_dst_size > union_data_size) {
563                 DP_ERR(p_hwfn,
564                        "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
565                        p_mb_params->data_src_size, p_mb_params->data_dst_size,
566                        union_data_size);
567                 return ECORE_INVAL;
568         }
569
570         return _ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
571                                         delay);
572 }
573
574 enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
575                                    struct ecore_ptt *p_ptt, u32 cmd, u32 param,
576                                    u32 *o_mcp_resp, u32 *o_mcp_param)
577 {
578         struct ecore_mcp_mb_params mb_params;
579         enum _ecore_status_t rc;
580
581 #ifndef ASIC_ONLY
582         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
583                 if (cmd == DRV_MSG_CODE_UNLOAD_REQ) {
584                         loaded--;
585                         loaded_port[p_hwfn->port_id]--;
586                         DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n",
587                                    loaded);
588                 }
589                 return ECORE_SUCCESS;
590         }
591 #endif
592
593         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
594         mb_params.cmd = cmd;
595         mb_params.param = param;
596         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
597         if (rc != ECORE_SUCCESS)
598                 return rc;
599
600         *o_mcp_resp = mb_params.mcp_resp;
601         *o_mcp_param = mb_params.mcp_param;
602
603         return ECORE_SUCCESS;
604 }
605
606 enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
607                                           struct ecore_ptt *p_ptt,
608                                           u32 cmd,
609                                           u32 param,
610                                           u32 *o_mcp_resp,
611                                           u32 *o_mcp_param,
612                                           u32 i_txn_size, u32 *i_buf)
613 {
614         struct ecore_mcp_mb_params mb_params;
615         enum _ecore_status_t rc;
616
617         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
618         mb_params.cmd = cmd;
619         mb_params.param = param;
620         mb_params.p_data_src = i_buf;
621         mb_params.data_src_size = (u8)i_txn_size;
622         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
623         if (rc != ECORE_SUCCESS)
624                 return rc;
625
626         *o_mcp_resp = mb_params.mcp_resp;
627         *o_mcp_param = mb_params.mcp_param;
628
629         return ECORE_SUCCESS;
630 }
631
632 enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
633                                           struct ecore_ptt *p_ptt,
634                                           u32 cmd,
635                                           u32 param,
636                                           u32 *o_mcp_resp,
637                                           u32 *o_mcp_param,
638                                           u32 *o_txn_size, u32 *o_buf)
639 {
640         struct ecore_mcp_mb_params mb_params;
641         u8 raw_data[MCP_DRV_NVM_BUF_LEN];
642         enum _ecore_status_t rc;
643
644         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
645         mb_params.cmd = cmd;
646         mb_params.param = param;
647         mb_params.p_data_dst = raw_data;
648
649         /* Use the maximal value since the actual one is part of the response */
650         mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
651
652         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
653         if (rc != ECORE_SUCCESS)
654                 return rc;
655
656         *o_mcp_resp = mb_params.mcp_resp;
657         *o_mcp_param = mb_params.mcp_param;
658
659         *o_txn_size = *o_mcp_param;
660         /* @DPDK */
661         OSAL_MEMCPY(o_buf, raw_data, RTE_MIN(*o_txn_size, MCP_DRV_NVM_BUF_LEN));
662
663         return ECORE_SUCCESS;
664 }
665
666 #ifndef ASIC_ONLY
667 static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
668                                     u32 *p_load_code)
669 {
670         static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
671
672         if (!loaded)
673                 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
674         else if (!loaded_port[p_hwfn->port_id])
675                 load_phase = FW_MSG_CODE_DRV_LOAD_PORT;
676         else
677                 load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
678
679         /* On CMT, always tell that it's engine */
680         if (p_hwfn->p_dev->num_hwfns > 1)
681                 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
682
683         *p_load_code = load_phase;
684         loaded++;
685         loaded_port[p_hwfn->port_id]++;
686
687         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
688                    "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n",
689                    *p_load_code, loaded, p_hwfn->port_id,
690                    loaded_port[p_hwfn->port_id]);
691 }
692 #endif
693
694 static bool
695 ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role,
696                          enum ecore_override_force_load override_force_load)
697 {
698         bool can_force_load = false;
699
700         switch (override_force_load) {
701         case ECORE_OVERRIDE_FORCE_LOAD_ALWAYS:
702                 can_force_load = true;
703                 break;
704         case ECORE_OVERRIDE_FORCE_LOAD_NEVER:
705                 can_force_load = false;
706                 break;
707         default:
708                 can_force_load = (drv_role == DRV_ROLE_OS &&
709                                   exist_drv_role == DRV_ROLE_PREBOOT) ||
710                                  (drv_role == DRV_ROLE_KDUMP &&
711                                   exist_drv_role == DRV_ROLE_OS);
712                 break;
713         }
714
715         return can_force_load;
716 }
717
718 static enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn,
719                                                       struct ecore_ptt *p_ptt)
720 {
721         u32 resp = 0, param = 0;
722         enum _ecore_status_t rc;
723
724         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
725                            &resp, &param);
726         if (rc != ECORE_SUCCESS)
727                 DP_NOTICE(p_hwfn, false,
728                           "Failed to send cancel load request, rc = %d\n", rc);
729
730         return rc;
731 }
732
733 #define CONFIG_ECORE_L2_BITMAP_IDX      (0x1 << 0)
734 #define CONFIG_ECORE_SRIOV_BITMAP_IDX   (0x1 << 1)
735 #define CONFIG_ECORE_ROCE_BITMAP_IDX    (0x1 << 2)
736 #define CONFIG_ECORE_IWARP_BITMAP_IDX   (0x1 << 3)
737 #define CONFIG_ECORE_FCOE_BITMAP_IDX    (0x1 << 4)
738 #define CONFIG_ECORE_ISCSI_BITMAP_IDX   (0x1 << 5)
739 #define CONFIG_ECORE_LL2_BITMAP_IDX     (0x1 << 6)
740
741 static u32 ecore_get_config_bitmap(void)
742 {
743         u32 config_bitmap = 0x0;
744
745 #ifdef CONFIG_ECORE_L2
746         config_bitmap |= CONFIG_ECORE_L2_BITMAP_IDX;
747 #endif
748 #ifdef CONFIG_ECORE_SRIOV
749         config_bitmap |= CONFIG_ECORE_SRIOV_BITMAP_IDX;
750 #endif
751 #ifdef CONFIG_ECORE_ROCE
752         config_bitmap |= CONFIG_ECORE_ROCE_BITMAP_IDX;
753 #endif
754 #ifdef CONFIG_ECORE_IWARP
755         config_bitmap |= CONFIG_ECORE_IWARP_BITMAP_IDX;
756 #endif
757 #ifdef CONFIG_ECORE_FCOE
758         config_bitmap |= CONFIG_ECORE_FCOE_BITMAP_IDX;
759 #endif
760 #ifdef CONFIG_ECORE_ISCSI
761         config_bitmap |= CONFIG_ECORE_ISCSI_BITMAP_IDX;
762 #endif
763 #ifdef CONFIG_ECORE_LL2
764         config_bitmap |= CONFIG_ECORE_LL2_BITMAP_IDX;
765 #endif
766
767         return config_bitmap;
768 }
769
770 struct ecore_load_req_in_params {
771         u8 hsi_ver;
772 #define ECORE_LOAD_REQ_HSI_VER_DEFAULT  0
773 #define ECORE_LOAD_REQ_HSI_VER_1        1
774         u32 drv_ver_0;
775         u32 drv_ver_1;
776         u32 fw_ver;
777         u8 drv_role;
778         u8 timeout_val;
779         u8 force_cmd;
780         bool avoid_eng_reset;
781 };
782
783 struct ecore_load_req_out_params {
784         u32 load_code;
785         u32 exist_drv_ver_0;
786         u32 exist_drv_ver_1;
787         u32 exist_fw_ver;
788         u8 exist_drv_role;
789         u8 mfw_hsi_ver;
790         bool drv_exists;
791 };
792
793 static enum _ecore_status_t
794 __ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
795                      struct ecore_load_req_in_params *p_in_params,
796                      struct ecore_load_req_out_params *p_out_params)
797 {
798         struct ecore_mcp_mb_params mb_params;
799         struct load_req_stc load_req;
800         struct load_rsp_stc load_rsp;
801         u32 hsi_ver;
802         enum _ecore_status_t rc;
803
804         OSAL_MEM_ZERO(&load_req, sizeof(load_req));
805         load_req.drv_ver_0 = p_in_params->drv_ver_0;
806         load_req.drv_ver_1 = p_in_params->drv_ver_1;
807         load_req.fw_ver = p_in_params->fw_ver;
808         SET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role);
809         SET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
810                       p_in_params->timeout_val);
811         SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE, p_in_params->force_cmd);
812         SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
813                       p_in_params->avoid_eng_reset);
814
815         hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ?
816                   DRV_ID_MCP_HSI_VER_CURRENT :
817                   (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_OFFSET);
818
819         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
820         mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
821         mb_params.param = PDA_COMP | hsi_ver | p_hwfn->p_dev->drv_type;
822         mb_params.p_data_src = &load_req;
823         mb_params.data_src_size = sizeof(load_req);
824         mb_params.p_data_dst = &load_rsp;
825         mb_params.data_dst_size = sizeof(load_rsp);
826
827         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
828                    "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
829                    mb_params.param,
830                    GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
831                    GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
832                    GET_MFW_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
833                    GET_MFW_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
834
835         if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1)
836                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
837                            "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
838                            load_req.drv_ver_0, load_req.drv_ver_1,
839                            load_req.fw_ver, load_req.misc0,
840                            GET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE),
841                            GET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO),
842                            GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE),
843                            GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0));
844
845         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
846         if (rc != ECORE_SUCCESS) {
847                 DP_NOTICE(p_hwfn, false,
848                           "Failed to send load request, rc = %d\n", rc);
849                 return rc;
850         }
851
852         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
853                    "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
854         p_out_params->load_code = mb_params.mcp_resp;
855
856         if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
857             p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
858                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
859                            "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
860                            load_rsp.drv_ver_0, load_rsp.drv_ver_1,
861                            load_rsp.fw_ver, load_rsp.misc0,
862                            GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
863                            GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
864                            GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0));
865
866                 p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
867                 p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
868                 p_out_params->exist_fw_ver = load_rsp.fw_ver;
869                 p_out_params->exist_drv_role =
870                         GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
871                 p_out_params->mfw_hsi_ver =
872                         GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
873                 p_out_params->drv_exists =
874                         GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
875                         LOAD_RSP_FLAGS0_DRV_EXISTS;
876         }
877
878         return ECORE_SUCCESS;
879 }
880
881 static void ecore_get_mfw_drv_role(struct ecore_hwfn *p_hwfn,
882                                    enum ecore_drv_role drv_role,
883                                    u8 *p_mfw_drv_role)
884 {
885         switch (drv_role) {
886         case ECORE_DRV_ROLE_OS:
887                 *p_mfw_drv_role = DRV_ROLE_OS;
888                 break;
889         case ECORE_DRV_ROLE_KDUMP:
890                 *p_mfw_drv_role = DRV_ROLE_KDUMP;
891                 break;
892         }
893 }
894
895 enum ecore_load_req_force {
896         ECORE_LOAD_REQ_FORCE_NONE,
897         ECORE_LOAD_REQ_FORCE_PF,
898         ECORE_LOAD_REQ_FORCE_ALL,
899 };
900
901 static void ecore_get_mfw_force_cmd(struct ecore_hwfn *p_hwfn,
902                                     enum ecore_load_req_force force_cmd,
903                                     u8 *p_mfw_force_cmd)
904 {
905         switch (force_cmd) {
906         case ECORE_LOAD_REQ_FORCE_NONE:
907                 *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
908                 break;
909         case ECORE_LOAD_REQ_FORCE_PF:
910                 *p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
911                 break;
912         case ECORE_LOAD_REQ_FORCE_ALL:
913                 *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
914                 break;
915         }
916 }
917
918 enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
919                                         struct ecore_ptt *p_ptt,
920                                         struct ecore_load_req_params *p_params)
921 {
922         struct ecore_load_req_out_params out_params;
923         struct ecore_load_req_in_params in_params;
924         u8 mfw_drv_role = 0, mfw_force_cmd;
925         enum _ecore_status_t rc;
926
927 #ifndef ASIC_ONLY
928         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
929                 ecore_mcp_mf_workaround(p_hwfn, &p_params->load_code);
930                 return ECORE_SUCCESS;
931         }
932 #endif
933
934         OSAL_MEM_ZERO(&in_params, sizeof(in_params));
935         in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT;
936         in_params.drv_ver_0 = ECORE_VERSION;
937         in_params.drv_ver_1 = ecore_get_config_bitmap();
938         in_params.fw_ver = STORM_FW_VERSION;
939         ecore_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
940         in_params.drv_role = mfw_drv_role;
941         in_params.timeout_val = p_params->timeout_val;
942         ecore_get_mfw_force_cmd(p_hwfn, ECORE_LOAD_REQ_FORCE_NONE,
943                                 &mfw_force_cmd);
944         in_params.force_cmd = mfw_force_cmd;
945         in_params.avoid_eng_reset = p_params->avoid_eng_reset;
946
947         OSAL_MEM_ZERO(&out_params, sizeof(out_params));
948         rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
949         if (rc != ECORE_SUCCESS)
950                 return rc;
951
952         /* First handle cases where another load request should/might be sent:
953          * - MFW expects the old interface [HSI version = 1]
954          * - MFW responds that a force load request is required
955          */
956         if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
957                 DP_INFO(p_hwfn,
958                         "MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n");
959
960                 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1;
961                 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
962                 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
963                                           &out_params);
964                 if (rc != ECORE_SUCCESS)
965                         return rc;
966         } else if (out_params.load_code ==
967                    FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
968                 if (ecore_mcp_can_force_load(in_params.drv_role,
969                                              out_params.exist_drv_role,
970                                              p_params->override_force_load)) {
971                         DP_INFO(p_hwfn,
972                                 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, 0x%08x_%08x}, existing={%d, 0x%08x, 0x%08x_%08x}]\n",
973                                 in_params.drv_role, in_params.fw_ver,
974                                 in_params.drv_ver_0, in_params.drv_ver_1,
975                                 out_params.exist_drv_role,
976                                 out_params.exist_fw_ver,
977                                 out_params.exist_drv_ver_0,
978                                 out_params.exist_drv_ver_1);
979
980                         ecore_get_mfw_force_cmd(p_hwfn,
981                                                 ECORE_LOAD_REQ_FORCE_ALL,
982                                                 &mfw_force_cmd);
983
984                         in_params.force_cmd = mfw_force_cmd;
985                         OSAL_MEM_ZERO(&out_params, sizeof(out_params));
986                         rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
987                                                   &out_params);
988                         if (rc != ECORE_SUCCESS)
989                                 return rc;
990                 } else {
991                         DP_NOTICE(p_hwfn, false,
992                                   "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
993                                   in_params.drv_role, in_params.fw_ver,
994                                   in_params.drv_ver_0, in_params.drv_ver_1,
995                                   out_params.exist_drv_role,
996                                   out_params.exist_fw_ver,
997                                   out_params.exist_drv_ver_0,
998                                   out_params.exist_drv_ver_1);
999
1000                         ecore_mcp_cancel_load_req(p_hwfn, p_ptt);
1001                         return ECORE_BUSY;
1002                 }
1003         }
1004
1005         /* Now handle the other types of responses.
1006          * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
1007          * expected here after the additional revised load requests were sent.
1008          */
1009         switch (out_params.load_code) {
1010         case FW_MSG_CODE_DRV_LOAD_ENGINE:
1011         case FW_MSG_CODE_DRV_LOAD_PORT:
1012         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
1013                 if (out_params.mfw_hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
1014                     out_params.drv_exists) {
1015                         /* The role and fw/driver version match, but the PF is
1016                          * already loaded and has not been unloaded gracefully.
1017                          * This is unexpected since a quasi-FLR request was
1018                          * previously sent as part of ecore_hw_prepare().
1019                          */
1020                         DP_NOTICE(p_hwfn, false,
1021                                   "PF is already loaded - shouldn't have got here since a quasi-FLR request was previously sent!\n");
1022                         return ECORE_INVAL;
1023                 }
1024                 break;
1025         default:
1026                 DP_NOTICE(p_hwfn, false,
1027                           "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
1028                           out_params.load_code);
1029                 return ECORE_BUSY;
1030         }
1031
1032         p_params->load_code = out_params.load_code;
1033
1034         return ECORE_SUCCESS;
1035 }
1036
1037 enum _ecore_status_t ecore_mcp_load_done(struct ecore_hwfn *p_hwfn,
1038                                          struct ecore_ptt *p_ptt)
1039 {
1040         u32 resp = 0, param = 0;
1041         enum _ecore_status_t rc;
1042
1043         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp,
1044                            &param);
1045         if (rc != ECORE_SUCCESS) {
1046                 DP_NOTICE(p_hwfn, false,
1047                           "Failed to send a LOAD_DONE command, rc = %d\n", rc);
1048                 return rc;
1049         }
1050
1051 #define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR     (1 << 0)
1052
1053         /* Check if there is a DID mismatch between nvm-cfg/efuse */
1054         if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
1055                 DP_NOTICE(p_hwfn, false,
1056                           "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
1057
1058         return ECORE_SUCCESS;
1059 }
1060
1061 enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn,
1062                                           struct ecore_ptt *p_ptt)
1063 {
1064         u32 wol_param, mcp_resp, mcp_param;
1065
1066         /* @DPDK */
1067         wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
1068
1069         return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param,
1070                              &mcp_resp, &mcp_param);
1071 }
1072
1073 enum _ecore_status_t ecore_mcp_unload_done(struct ecore_hwfn *p_hwfn,
1074                                            struct ecore_ptt *p_ptt)
1075 {
1076         struct ecore_mcp_mb_params mb_params;
1077         struct mcp_mac wol_mac;
1078
1079         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1080         mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
1081
1082         return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1083 }
1084
1085 static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
1086                                     struct ecore_ptt *p_ptt)
1087 {
1088         u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1089                                         PUBLIC_PATH);
1090         u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1091         u32 path_addr = SECTION_ADDR(mfw_path_offsize,
1092                                      ECORE_PATH_ID(p_hwfn));
1093         u32 disabled_vfs[VF_MAX_STATIC / 32];
1094         int i;
1095
1096         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1097                    "Reading Disabled VF information from [offset %08x],"
1098                    " path_addr %08x\n",
1099                    mfw_path_offsize, path_addr);
1100
1101         for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
1102                 disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,
1103                                            path_addr +
1104                                            OFFSETOF(struct public_path,
1105                                                     mcp_vf_disabled) +
1106                                            sizeof(u32) * i);
1107                 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
1108                            "FLR-ed VFs [%08x,...,%08x] - %08x\n",
1109                            i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
1110         }
1111
1112         if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs))
1113                 OSAL_VF_FLR_UPDATE(p_hwfn);
1114 }
1115
1116 enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
1117                                           struct ecore_ptt *p_ptt,
1118                                           u32 *vfs_to_ack)
1119 {
1120         u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1121                                         PUBLIC_FUNC);
1122         u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1123         u32 func_addr = SECTION_ADDR(mfw_func_offsize,
1124                                      MCP_PF_ID(p_hwfn));
1125         struct ecore_mcp_mb_params mb_params;
1126         enum _ecore_status_t rc;
1127         int i;
1128
1129         for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1130                 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
1131                            "Acking VFs [%08x,...,%08x] - %08x\n",
1132                            i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
1133
1134         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1135         mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
1136         mb_params.p_data_src = vfs_to_ack;
1137         mb_params.data_src_size = VF_MAX_STATIC / 8;
1138         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt,
1139                                      &mb_params);
1140         if (rc != ECORE_SUCCESS) {
1141                 DP_NOTICE(p_hwfn, false,
1142                           "Failed to pass ACK for VF flr to MFW\n");
1143                 return ECORE_TIMEOUT;
1144         }
1145
1146         /* TMP - clear the ACK bits; should be done by MFW */
1147         for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1148                 ecore_wr(p_hwfn, p_ptt,
1149                          func_addr +
1150                          OFFSETOF(struct public_func, drv_ack_vf_disabled) +
1151                          i * sizeof(u32), 0);
1152
1153         return rc;
1154 }
1155
1156 static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
1157                                                 struct ecore_ptt *p_ptt)
1158 {
1159         u32 transceiver_state;
1160
1161         transceiver_state = ecore_rd(p_hwfn, p_ptt,
1162                                      p_hwfn->mcp_info->port_addr +
1163                                      OFFSETOF(struct public_port,
1164                                               transceiver_data));
1165
1166         DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP),
1167                    "Received transceiver state update [0x%08x] from mfw"
1168                    " [Addr 0x%x]\n",
1169                    transceiver_state, (u32)(p_hwfn->mcp_info->port_addr +
1170                                             OFFSETOF(struct public_port,
1171                                                      transceiver_data)));
1172
1173         transceiver_state = GET_MFW_FIELD(transceiver_state,
1174                                           ETH_TRANSCEIVER_STATE);
1175
1176         if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
1177                 DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
1178         else
1179                 DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
1180 }
1181
1182 static void ecore_mcp_read_eee_config(struct ecore_hwfn *p_hwfn,
1183                                       struct ecore_ptt *p_ptt,
1184                                       struct ecore_mcp_link_state *p_link)
1185 {
1186         u32 eee_status, val;
1187
1188         p_link->eee_adv_caps = 0;
1189         p_link->eee_lp_adv_caps = 0;
1190         eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1191                                      OFFSETOF(struct public_port, eee_status));
1192         p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT);
1193         val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET;
1194         if (val & EEE_1G_ADV)
1195                 p_link->eee_adv_caps |= ECORE_EEE_1G_ADV;
1196         if (val & EEE_10G_ADV)
1197                 p_link->eee_adv_caps |= ECORE_EEE_10G_ADV;
1198         val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET;
1199         if (val & EEE_1G_ADV)
1200                 p_link->eee_lp_adv_caps |= ECORE_EEE_1G_ADV;
1201         if (val & EEE_10G_ADV)
1202                 p_link->eee_lp_adv_caps |= ECORE_EEE_10G_ADV;
1203 }
1204
1205 static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
1206                                          struct ecore_ptt *p_ptt,
1207                                          bool b_reset)
1208 {
1209         struct ecore_mcp_link_state *p_link;
1210         u8 max_bw, min_bw;
1211         u32 status = 0;
1212
1213         /* Prevent SW/attentions from doing this at the same time */
1214         OSAL_SPIN_LOCK(&p_hwfn->mcp_info->link_lock);
1215
1216         p_link = &p_hwfn->mcp_info->link_output;
1217         OSAL_MEMSET(p_link, 0, sizeof(*p_link));
1218         if (!b_reset) {
1219                 status = ecore_rd(p_hwfn, p_ptt,
1220                                   p_hwfn->mcp_info->port_addr +
1221                                   OFFSETOF(struct public_port, link_status));
1222                 DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP),
1223                            "Received link update [0x%08x] from mfw"
1224                            " [Addr 0x%x]\n",
1225                            status, (u32)(p_hwfn->mcp_info->port_addr +
1226                                           OFFSETOF(struct public_port,
1227                                                    link_status)));
1228         } else {
1229                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1230                            "Resetting link indications\n");
1231                 goto out;
1232         }
1233
1234         if (p_hwfn->b_drv_link_init)
1235                 p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1236         else
1237                 p_link->link_up = false;
1238
1239         p_link->full_duplex = true;
1240         switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
1241         case LINK_STATUS_SPEED_AND_DUPLEX_100G:
1242                 p_link->speed = 100000;
1243                 break;
1244         case LINK_STATUS_SPEED_AND_DUPLEX_50G:
1245                 p_link->speed = 50000;
1246                 break;
1247         case LINK_STATUS_SPEED_AND_DUPLEX_40G:
1248                 p_link->speed = 40000;
1249                 break;
1250         case LINK_STATUS_SPEED_AND_DUPLEX_25G:
1251                 p_link->speed = 25000;
1252                 break;
1253         case LINK_STATUS_SPEED_AND_DUPLEX_20G:
1254                 p_link->speed = 20000;
1255                 break;
1256         case LINK_STATUS_SPEED_AND_DUPLEX_10G:
1257                 p_link->speed = 10000;
1258                 break;
1259         case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
1260                 p_link->full_duplex = false;
1261                 /* Fall-through */
1262         case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
1263                 p_link->speed = 1000;
1264                 break;
1265         default:
1266                 p_link->speed = 0;
1267         }
1268
1269         /* We never store total line speed as p_link->speed is
1270          * again changes according to bandwidth allocation.
1271          */
1272         if (p_link->link_up && p_link->speed)
1273                 p_link->line_speed = p_link->speed;
1274         else
1275                 p_link->line_speed = 0;
1276
1277         max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
1278         min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
1279
1280         /* Max bandwidth configuration */
1281         __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
1282                                            p_link, max_bw);
1283
1284         /* Mintz bandwidth configuration */
1285         __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
1286                                            p_link, min_bw);
1287         ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev, p_ptt,
1288                                               p_link->min_pf_rate);
1289
1290         p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
1291         p_link->an_complete = !!(status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
1292         p_link->parallel_detection = !!(status &
1293                                          LINK_STATUS_PARALLEL_DETECTION_USED);
1294         p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
1295
1296         p_link->partner_adv_speed |=
1297             (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
1298             ECORE_LINK_PARTNER_SPEED_1G_FD : 0;
1299         p_link->partner_adv_speed |=
1300             (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
1301             ECORE_LINK_PARTNER_SPEED_1G_HD : 0;
1302         p_link->partner_adv_speed |=
1303             (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
1304             ECORE_LINK_PARTNER_SPEED_10G : 0;
1305         p_link->partner_adv_speed |=
1306             (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
1307             ECORE_LINK_PARTNER_SPEED_20G : 0;
1308         p_link->partner_adv_speed |=
1309             (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
1310             ECORE_LINK_PARTNER_SPEED_25G : 0;
1311         p_link->partner_adv_speed |=
1312             (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
1313             ECORE_LINK_PARTNER_SPEED_40G : 0;
1314         p_link->partner_adv_speed |=
1315             (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
1316             ECORE_LINK_PARTNER_SPEED_50G : 0;
1317         p_link->partner_adv_speed |=
1318             (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
1319             ECORE_LINK_PARTNER_SPEED_100G : 0;
1320
1321         p_link->partner_tx_flow_ctrl_en =
1322             !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
1323         p_link->partner_rx_flow_ctrl_en =
1324             !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
1325
1326         switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
1327         case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
1328                 p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE;
1329                 break;
1330         case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
1331                 p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE;
1332                 break;
1333         case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
1334                 p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE;
1335                 break;
1336         default:
1337                 p_link->partner_adv_pause = 0;
1338         }
1339
1340         p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
1341
1342         if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
1343                 ecore_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
1344
1345         OSAL_LINK_UPDATE(p_hwfn, p_ptt);
1346 out:
1347         OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->link_lock);
1348 }
1349
1350 enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
1351                                         struct ecore_ptt *p_ptt, bool b_up)
1352 {
1353         struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
1354         struct ecore_mcp_mb_params mb_params;
1355         struct eth_phy_cfg phy_cfg;
1356         enum _ecore_status_t rc = ECORE_SUCCESS;
1357         u32 cmd;
1358
1359 #ifndef ASIC_ONLY
1360         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
1361                 return ECORE_SUCCESS;
1362 #endif
1363
1364         /* Set the shmem configuration according to params */
1365         OSAL_MEM_ZERO(&phy_cfg, sizeof(phy_cfg));
1366         cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
1367         if (!params->speed.autoneg)
1368                 phy_cfg.speed = params->speed.forced_speed;
1369         phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
1370         phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
1371         phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1372         phy_cfg.adv_speed = params->speed.advertised_speeds;
1373         phy_cfg.loopback_mode = params->loopback_mode;
1374
1375         /* There are MFWs that share this capability regardless of whether
1376          * this is feasible or not. And given that at the very least adv_caps
1377          * would be set internally by ecore, we want to make sure LFA would
1378          * still work.
1379          */
1380         if ((p_hwfn->mcp_info->capabilities &
1381              FW_MB_PARAM_FEATURE_SUPPORT_EEE) &&
1382             params->eee.enable) {
1383                 phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
1384                 if (params->eee.tx_lpi_enable)
1385                         phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
1386                 if (params->eee.adv_caps & ECORE_EEE_1G_ADV)
1387                         phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G;
1388                 if (params->eee.adv_caps & ECORE_EEE_10G_ADV)
1389                         phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G;
1390                 phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer <<
1391                                     EEE_TX_TIMER_USEC_OFFSET) &
1392                                         EEE_TX_TIMER_USEC_MASK;
1393         }
1394
1395         p_hwfn->b_drv_link_init = b_up;
1396
1397         if (b_up)
1398                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1399                            "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x\n",
1400                            phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed,
1401                            phy_cfg.loopback_mode);
1402         else
1403                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
1404
1405         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1406         mb_params.cmd = cmd;
1407         mb_params.p_data_src = &phy_cfg;
1408         mb_params.data_src_size = sizeof(phy_cfg);
1409         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1410
1411         /* if mcp fails to respond we must abort */
1412         if (rc != ECORE_SUCCESS) {
1413                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1414                 return rc;
1415         }
1416
1417         /* Mimic link-change attention, done for several reasons:
1418          *  - On reset, there's no guarantee MFW would trigger
1419          *    an attention.
1420          *  - On initialization, older MFWs might not indicate link change
1421          *    during LFA, so we'll never get an UP indication.
1422          */
1423         ecore_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
1424
1425         return rc;
1426 }
1427
1428 u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
1429                                    struct ecore_ptt *p_ptt)
1430 {
1431         u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
1432
1433         /* TODO - Add support for VFs */
1434         if (IS_VF(p_hwfn->p_dev))
1435                 return ECORE_INVAL;
1436
1437         path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1438                                                  PUBLIC_PATH);
1439         path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr);
1440         path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn));
1441
1442         proc_kill_cnt = ecore_rd(p_hwfn, p_ptt,
1443                                  path_addr +
1444                                  OFFSETOF(struct public_path, process_kill)) &
1445             PROCESS_KILL_COUNTER_MASK;
1446
1447         return proc_kill_cnt;
1448 }
1449
1450 static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn,
1451                                           struct ecore_ptt *p_ptt)
1452 {
1453         struct ecore_dev *p_dev = p_hwfn->p_dev;
1454         u32 proc_kill_cnt;
1455
1456         /* Prevent possible attentions/interrupts during the recovery handling
1457          * and till its load phase, during which they will be re-enabled.
1458          */
1459         ecore_int_igu_disable_int(p_hwfn, p_ptt);
1460
1461         DP_NOTICE(p_hwfn, false, "Received a process kill indication\n");
1462
1463         /* The following operations should be done once, and thus in CMT mode
1464          * are carried out by only the first HW function.
1465          */
1466         if (p_hwfn != ECORE_LEADING_HWFN(p_dev))
1467                 return;
1468
1469         if (p_dev->recov_in_prog) {
1470                 DP_NOTICE(p_hwfn, false,
1471                           "Ignoring the indication since a recovery"
1472                           " process is already in progress\n");
1473                 return;
1474         }
1475
1476         p_dev->recov_in_prog = true;
1477
1478         proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt);
1479         DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt);
1480
1481         OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn);
1482 }
1483
1484 static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
1485                                           struct ecore_ptt *p_ptt,
1486                                           enum MFW_DRV_MSG_TYPE type)
1487 {
1488         enum ecore_mcp_protocol_type stats_type;
1489         union ecore_mcp_protocol_stats stats;
1490         struct ecore_mcp_mb_params mb_params;
1491         u32 hsi_param;
1492         enum _ecore_status_t rc;
1493
1494         switch (type) {
1495         case MFW_DRV_MSG_GET_LAN_STATS:
1496                 stats_type = ECORE_MCP_LAN_STATS;
1497                 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1498                 break;
1499         default:
1500                 DP_INFO(p_hwfn, "Invalid protocol type %d\n", type);
1501                 return;
1502         }
1503
1504         OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats);
1505
1506         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1507         mb_params.cmd = DRV_MSG_CODE_GET_STATS;
1508         mb_params.param = hsi_param;
1509         mb_params.p_data_src = &stats;
1510         mb_params.data_src_size = sizeof(stats);
1511         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1512         if (rc != ECORE_SUCCESS)
1513                 DP_ERR(p_hwfn, "Failed to send protocol stats, rc = %d\n", rc);
1514 }
1515
1516 static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
1517                                     struct public_func *p_shmem_info)
1518 {
1519         struct ecore_mcp_function_info *p_info;
1520
1521         p_info = &p_hwfn->mcp_info->func_info;
1522
1523         /* TODO - bandwidth min/max should have valid values of 1-100,
1524          * as well as some indication that the feature is disabled.
1525          * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS
1526          * limit and correct value to min `1' and max `100' if limit isn't in
1527          * range.
1528          */
1529         p_info->bandwidth_min = (p_shmem_info->config &
1530                                  FUNC_MF_CFG_MIN_BW_MASK) >>
1531             FUNC_MF_CFG_MIN_BW_OFFSET;
1532         if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1533                 DP_INFO(p_hwfn,
1534                         "bandwidth minimum out of bounds [%02x]. Set to 1\n",
1535                         p_info->bandwidth_min);
1536                 p_info->bandwidth_min = 1;
1537         }
1538
1539         p_info->bandwidth_max = (p_shmem_info->config &
1540                                  FUNC_MF_CFG_MAX_BW_MASK) >>
1541             FUNC_MF_CFG_MAX_BW_OFFSET;
1542         if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1543                 DP_INFO(p_hwfn,
1544                         "bandwidth maximum out of bounds [%02x]. Set to 100\n",
1545                         p_info->bandwidth_max);
1546                 p_info->bandwidth_max = 100;
1547         }
1548 }
1549
1550 static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
1551                                     struct ecore_ptt *p_ptt,
1552                                     struct public_func *p_data,
1553                                     int pfid)
1554 {
1555         u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1556                                         PUBLIC_FUNC);
1557         u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1558         u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1559         u32 i, size;
1560
1561         OSAL_MEM_ZERO(p_data, sizeof(*p_data));
1562
1563         size = OSAL_MIN_T(u32, sizeof(*p_data),
1564                           SECTION_SIZE(mfw_path_offsize));
1565         for (i = 0; i < size / sizeof(u32); i++)
1566                 ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
1567                                               func_addr + (i << 2));
1568
1569         return size;
1570 }
1571
1572 static void
1573 ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1574 {
1575         struct ecore_mcp_function_info *p_info;
1576         struct public_func shmem_info;
1577         u32 resp = 0, param = 0;
1578
1579         ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1580
1581         ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1582
1583         p_info = &p_hwfn->mcp_info->func_info;
1584
1585         ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min);
1586
1587         ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max);
1588
1589         /* Acknowledge the MFW */
1590         ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1591                       &param);
1592 }
1593
1594 static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn,
1595                                          struct ecore_ptt *p_ptt)
1596 {
1597         /* A single notification should be sent to upper driver in CMT mode */
1598         if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1599                 return;
1600
1601         DP_NOTICE(p_hwfn, false,
1602                   "Fan failure was detected on the network interface card"
1603                   " and it's going to be shut down.\n");
1604
1605         ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
1606 }
1607
1608 struct ecore_mdump_cmd_params {
1609         u32 cmd;
1610         void *p_data_src;
1611         u8 data_src_size;
1612         void *p_data_dst;
1613         u8 data_dst_size;
1614         u32 mcp_resp;
1615 };
1616
1617 static enum _ecore_status_t
1618 ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1619                     struct ecore_mdump_cmd_params *p_mdump_cmd_params)
1620 {
1621         struct ecore_mcp_mb_params mb_params;
1622         enum _ecore_status_t rc;
1623
1624         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1625         mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
1626         mb_params.param = p_mdump_cmd_params->cmd;
1627         mb_params.p_data_src = p_mdump_cmd_params->p_data_src;
1628         mb_params.data_src_size = p_mdump_cmd_params->data_src_size;
1629         mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst;
1630         mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size;
1631         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1632         if (rc != ECORE_SUCCESS)
1633                 return rc;
1634
1635         p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
1636
1637         if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
1638                 DP_INFO(p_hwfn,
1639                         "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n",
1640                         p_mdump_cmd_params->cmd);
1641                 rc = ECORE_NOTIMPL;
1642         } else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
1643                 DP_INFO(p_hwfn,
1644                         "The mdump command is not supported by the MFW\n");
1645                 rc = ECORE_NOTIMPL;
1646         }
1647
1648         return rc;
1649 }
1650
1651 static enum _ecore_status_t ecore_mcp_mdump_ack(struct ecore_hwfn *p_hwfn,
1652                                                 struct ecore_ptt *p_ptt)
1653 {
1654         struct ecore_mdump_cmd_params mdump_cmd_params;
1655
1656         OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1657         mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK;
1658
1659         return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1660 }
1661
1662 enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn,
1663                                                 struct ecore_ptt *p_ptt,
1664                                                 u32 epoch)
1665 {
1666         struct ecore_mdump_cmd_params mdump_cmd_params;
1667
1668         OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1669         mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_SET_VALUES;
1670         mdump_cmd_params.p_data_src = &epoch;
1671         mdump_cmd_params.data_src_size = sizeof(epoch);
1672
1673         return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1674 }
1675
1676 enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
1677                                              struct ecore_ptt *p_ptt)
1678 {
1679         struct ecore_mdump_cmd_params mdump_cmd_params;
1680
1681         OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1682         mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_TRIGGER;
1683
1684         return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1685 }
1686
1687 static enum _ecore_status_t
1688 ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1689                            struct mdump_config_stc *p_mdump_config)
1690 {
1691         struct ecore_mdump_cmd_params mdump_cmd_params;
1692         enum _ecore_status_t rc;
1693
1694         OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1695         mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_CONFIG;
1696         mdump_cmd_params.p_data_dst = p_mdump_config;
1697         mdump_cmd_params.data_dst_size = sizeof(*p_mdump_config);
1698
1699         rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1700         if (rc != ECORE_SUCCESS)
1701                 return rc;
1702
1703         if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1704                 DP_INFO(p_hwfn,
1705                         "Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n",
1706                         mdump_cmd_params.mcp_resp);
1707                 rc = ECORE_UNKNOWN_ERROR;
1708         }
1709
1710         return rc;
1711 }
1712
1713 enum _ecore_status_t
1714 ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1715                          struct ecore_mdump_info *p_mdump_info)
1716 {
1717         u32 addr, global_offsize, global_addr;
1718         struct mdump_config_stc mdump_config;
1719         enum _ecore_status_t rc;
1720
1721         OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info));
1722
1723         addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1724                                     PUBLIC_GLOBAL);
1725         global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1726         global_addr = SECTION_ADDR(global_offsize, 0);
1727         p_mdump_info->reason = ecore_rd(p_hwfn, p_ptt,
1728                                         global_addr +
1729                                         OFFSETOF(struct public_global,
1730                                                  mdump_reason));
1731
1732         if (p_mdump_info->reason) {
1733                 rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config);
1734                 if (rc != ECORE_SUCCESS)
1735                         return rc;
1736
1737                 p_mdump_info->version = mdump_config.version;
1738                 p_mdump_info->config = mdump_config.config;
1739                 p_mdump_info->epoch = mdump_config.epoc;
1740                 p_mdump_info->num_of_logs = mdump_config.num_of_logs;
1741                 p_mdump_info->valid_logs = mdump_config.valid_logs;
1742
1743                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1744                            "MFW mdump info: reason %d, version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n",
1745                            p_mdump_info->reason, p_mdump_info->version,
1746                            p_mdump_info->config, p_mdump_info->epoch,
1747                            p_mdump_info->num_of_logs, p_mdump_info->valid_logs);
1748         } else {
1749                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1750                            "MFW mdump info: reason %d\n", p_mdump_info->reason);
1751         }
1752
1753         return ECORE_SUCCESS;
1754 }
1755
1756 enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
1757                                                 struct ecore_ptt *p_ptt)
1758 {
1759         struct ecore_mdump_cmd_params mdump_cmd_params;
1760
1761         OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1762         mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLEAR_LOGS;
1763
1764         return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1765 }
1766
1767 enum _ecore_status_t
1768 ecore_mcp_mdump_get_retain(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1769                            struct ecore_mdump_retain_data *p_mdump_retain)
1770 {
1771         struct ecore_mdump_cmd_params mdump_cmd_params;
1772         struct mdump_retain_data_stc mfw_mdump_retain;
1773         enum _ecore_status_t rc;
1774
1775         OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1776         mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN;
1777         mdump_cmd_params.p_data_dst = &mfw_mdump_retain;
1778         mdump_cmd_params.data_dst_size = sizeof(mfw_mdump_retain);
1779
1780         rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1781         if (rc != ECORE_SUCCESS)
1782                 return rc;
1783
1784         if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1785                 DP_INFO(p_hwfn,
1786                         "Failed to get the mdump retained data [mcp_resp 0x%x]\n",
1787                         mdump_cmd_params.mcp_resp);
1788                 return ECORE_UNKNOWN_ERROR;
1789         }
1790
1791         p_mdump_retain->valid = mfw_mdump_retain.valid;
1792         p_mdump_retain->epoch = mfw_mdump_retain.epoch;
1793         p_mdump_retain->pf = mfw_mdump_retain.pf;
1794         p_mdump_retain->status = mfw_mdump_retain.status;
1795
1796         return ECORE_SUCCESS;
1797 }
1798
1799 enum _ecore_status_t ecore_mcp_mdump_clr_retain(struct ecore_hwfn *p_hwfn,
1800                                                 struct ecore_ptt *p_ptt)
1801 {
1802         struct ecore_mdump_cmd_params mdump_cmd_params;
1803
1804         OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1805         mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLR_RETAIN;
1806
1807         return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1808 }
1809
1810 static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
1811                                             struct ecore_ptt *p_ptt)
1812 {
1813         struct ecore_mdump_retain_data mdump_retain;
1814         enum _ecore_status_t rc;
1815
1816         /* In CMT mode - no need for more than a single acknowledgment to the
1817          * MFW, and no more than a single notification to the upper driver.
1818          */
1819         if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1820                 return;
1821
1822         rc = ecore_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain);
1823         if (rc == ECORE_SUCCESS && mdump_retain.valid) {
1824                 DP_NOTICE(p_hwfn, false,
1825                           "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n",
1826                           mdump_retain.epoch, mdump_retain.pf,
1827                           mdump_retain.status);
1828         } else {
1829                 DP_NOTICE(p_hwfn, false,
1830                           "The MFW notified that a critical error occurred in the device\n");
1831         }
1832
1833         if (p_hwfn->p_dev->allow_mdump) {
1834                 DP_NOTICE(p_hwfn, false,
1835                           "Not acknowledging the notification to allow the MFW crash dump\n");
1836                 return;
1837         }
1838
1839         DP_NOTICE(p_hwfn, false,
1840                   "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n");
1841         ecore_mcp_mdump_ack(p_hwfn, p_ptt);
1842         ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
1843 }
1844
1845 enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
1846                                              struct ecore_ptt *p_ptt)
1847 {
1848         struct ecore_mcp_info *info = p_hwfn->mcp_info;
1849         enum _ecore_status_t rc = ECORE_SUCCESS;
1850         bool found = false;
1851         u16 i;
1852
1853         DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n");
1854
1855         /* Read Messages from MFW */
1856         ecore_mcp_read_mb(p_hwfn, p_ptt);
1857
1858         /* Compare current messages to old ones */
1859         for (i = 0; i < info->mfw_mb_length; i++) {
1860                 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
1861                         continue;
1862
1863                 found = true;
1864
1865                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1866                            "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
1867                            i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
1868
1869                 switch (i) {
1870                 case MFW_DRV_MSG_LINK_CHANGE:
1871                         ecore_mcp_handle_link_change(p_hwfn, p_ptt, false);
1872                         break;
1873                 case MFW_DRV_MSG_VF_DISABLED:
1874                         ecore_mcp_handle_vf_flr(p_hwfn, p_ptt);
1875                         break;
1876                 case MFW_DRV_MSG_LLDP_DATA_UPDATED:
1877                         ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1878                                                     ECORE_DCBX_REMOTE_LLDP_MIB);
1879                         break;
1880                 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
1881                         ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1882                                                     ECORE_DCBX_REMOTE_MIB);
1883                         break;
1884                 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
1885                         ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1886                                                     ECORE_DCBX_OPERATIONAL_MIB);
1887                         break;
1888                 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
1889                         ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
1890                         break;
1891                 case MFW_DRV_MSG_ERROR_RECOVERY:
1892                         ecore_mcp_handle_process_kill(p_hwfn, p_ptt);
1893                         break;
1894                 case MFW_DRV_MSG_GET_LAN_STATS:
1895                 case MFW_DRV_MSG_GET_FCOE_STATS:
1896                 case MFW_DRV_MSG_GET_ISCSI_STATS:
1897                 case MFW_DRV_MSG_GET_RDMA_STATS:
1898                         ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
1899                         break;
1900                 case MFW_DRV_MSG_BW_UPDATE:
1901                         ecore_mcp_update_bw(p_hwfn, p_ptt);
1902                         break;
1903                 case MFW_DRV_MSG_FAILURE_DETECTED:
1904                         ecore_mcp_handle_fan_failure(p_hwfn, p_ptt);
1905                         break;
1906                 case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
1907                         ecore_mcp_handle_critical_error(p_hwfn, p_ptt);
1908                         break;
1909                 default:
1910                         DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
1911                         rc = ECORE_INVAL;
1912                 }
1913         }
1914
1915         /* ACK everything */
1916         for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
1917                 OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]);
1918
1919                 /* MFW expect answer in BE, so we force write in that format */
1920                 ecore_wr(p_hwfn, p_ptt,
1921                          info->mfw_mb_addr + sizeof(u32) +
1922                          MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
1923                          sizeof(u32) + i * sizeof(u32), val);
1924         }
1925
1926         if (!found) {
1927                 DP_NOTICE(p_hwfn, false,
1928                           "Received an MFW message indication but no"
1929                           " new message!\n");
1930                 rc = ECORE_INVAL;
1931         }
1932
1933         /* Copy the new mfw messages into the shadow */
1934         OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
1935
1936         return rc;
1937 }
1938
1939 enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
1940                                            struct ecore_ptt *p_ptt,
1941                                            u32 *p_mfw_ver,
1942                                            u32 *p_running_bundle_id)
1943 {
1944         u32 global_offsize;
1945
1946 #ifndef ASIC_ONLY
1947         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1948                 DP_NOTICE(p_hwfn, false, "Emulation - can't get MFW version\n");
1949                 return ECORE_SUCCESS;
1950         }
1951 #endif
1952
1953         if (IS_VF(p_hwfn->p_dev)) {
1954                 if (p_hwfn->vf_iov_info) {
1955                         struct pfvf_acquire_resp_tlv *p_resp;
1956
1957                         p_resp = &p_hwfn->vf_iov_info->acquire_resp;
1958                         *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
1959                         return ECORE_SUCCESS;
1960                 } else {
1961                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1962                                    "VF requested MFW version prior to ACQUIRE\n");
1963                         return ECORE_INVAL;
1964                 }
1965         }
1966
1967         global_offsize = ecore_rd(p_hwfn, p_ptt,
1968                                   SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
1969                                                        public_base,
1970                                                        PUBLIC_GLOBAL));
1971         *p_mfw_ver =
1972             ecore_rd(p_hwfn, p_ptt,
1973                      SECTION_ADDR(global_offsize,
1974                                   0) + OFFSETOF(struct public_global, mfw_ver));
1975
1976         if (p_running_bundle_id != OSAL_NULL) {
1977                 *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt,
1978                                                 SECTION_ADDR(global_offsize,
1979                                                              0) +
1980                                                 OFFSETOF(struct public_global,
1981                                                          running_bundle_id));
1982         }
1983
1984         return ECORE_SUCCESS;
1985 }
1986
1987 enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,
1988                                               struct ecore_ptt *p_ptt,
1989                                               u32 *p_media_type)
1990 {
1991
1992         /* TODO - Add support for VFs */
1993         if (IS_VF(p_hwfn->p_dev))
1994                 return ECORE_INVAL;
1995
1996         if (!ecore_mcp_is_init(p_hwfn)) {
1997                 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
1998                 return ECORE_BUSY;
1999         }
2000
2001         if (!p_ptt) {
2002                 *p_media_type = MEDIA_UNSPECIFIED;
2003                 return ECORE_INVAL;
2004         } else {
2005                 *p_media_type = ecore_rd(p_hwfn, p_ptt,
2006                                          p_hwfn->mcp_info->port_addr +
2007                                          OFFSETOF(struct public_port,
2008                                                   media_type));
2009         }
2010
2011         return ECORE_SUCCESS;
2012 }
2013
2014 /* @DPDK */
2015 /* Old MFW has a global configuration for all PFs regarding RDMA support */
2016 static void
2017 ecore_mcp_get_shmem_proto_legacy(struct ecore_hwfn *p_hwfn,
2018                                  enum ecore_pci_personality *p_proto)
2019 {
2020         *p_proto = ECORE_PCI_ETH;
2021
2022         DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2023                    "According to Legacy capabilities, L2 personality is %08x\n",
2024                    (u32)*p_proto);
2025 }
2026
2027 /* @DPDK */
2028 static enum _ecore_status_t
2029 ecore_mcp_get_shmem_proto_mfw(struct ecore_hwfn *p_hwfn,
2030                               struct ecore_ptt *p_ptt,
2031                               enum ecore_pci_personality *p_proto)
2032 {
2033         u32 resp = 0, param = 0;
2034         enum _ecore_status_t rc;
2035
2036         DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2037                    "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
2038                    (u32)*p_proto, resp, param);
2039         return ECORE_SUCCESS;
2040 }
2041
2042 static enum _ecore_status_t
2043 ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn,
2044                           struct public_func *p_info,
2045                           struct ecore_ptt *p_ptt,
2046                           enum ecore_pci_personality *p_proto)
2047 {
2048         enum _ecore_status_t rc = ECORE_SUCCESS;
2049
2050         switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
2051         case FUNC_MF_CFG_PROTOCOL_ETHERNET:
2052                 if (ecore_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto) !=
2053                     ECORE_SUCCESS)
2054                         ecore_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
2055                 break;
2056         default:
2057                 rc = ECORE_INVAL;
2058         }
2059
2060         return rc;
2061 }
2062
2063 enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
2064                                                     struct ecore_ptt *p_ptt)
2065 {
2066         struct ecore_mcp_function_info *info;
2067         struct public_func shmem_info;
2068
2069         ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
2070         info = &p_hwfn->mcp_info->func_info;
2071
2072         info->pause_on_host = (shmem_info.config &
2073                                FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
2074
2075         if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2076                                       &info->protocol)) {
2077                 DP_ERR(p_hwfn, "Unknown personality %08x\n",
2078                        (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
2079                 return ECORE_INVAL;
2080         }
2081
2082         ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
2083
2084         if (shmem_info.mac_upper || shmem_info.mac_lower) {
2085                 info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
2086                 info->mac[1] = (u8)(shmem_info.mac_upper);
2087                 info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
2088                 info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
2089                 info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
2090                 info->mac[5] = (u8)(shmem_info.mac_lower);
2091         } else {
2092                 /* TODO - are there protocols for which there's no MAC? */
2093                 DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n");
2094         }
2095
2096         /* TODO - are these calculations true for BE machine? */
2097         info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
2098                          (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
2099         info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
2100                          (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
2101
2102         info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
2103
2104         info->mtu = (u16)shmem_info.mtu_size;
2105
2106         if (info->mtu == 0)
2107                 info->mtu = 1500;
2108
2109         info->mtu = (u16)shmem_info.mtu_size;
2110
2111         DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP),
2112                    "Read configuration from shmem: pause_on_host %02x"
2113                     " protocol %02x BW [%02x - %02x]"
2114                     " MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %lx"
2115                     " node %lx ovlan %04x\n",
2116                    info->pause_on_host, info->protocol,
2117                    info->bandwidth_min, info->bandwidth_max,
2118                    info->mac[0], info->mac[1], info->mac[2],
2119                    info->mac[3], info->mac[4], info->mac[5],
2120                    (unsigned long)info->wwn_port,
2121                    (unsigned long)info->wwn_node, info->ovlan);
2122
2123         return ECORE_SUCCESS;
2124 }
2125
2126 struct ecore_mcp_link_params
2127 *ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn)
2128 {
2129         if (!p_hwfn || !p_hwfn->mcp_info)
2130                 return OSAL_NULL;
2131         return &p_hwfn->mcp_info->link_input;
2132 }
2133
2134 struct ecore_mcp_link_state
2135 *ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn)
2136 {
2137         if (!p_hwfn || !p_hwfn->mcp_info)
2138                 return OSAL_NULL;
2139
2140 #ifndef ASIC_ONLY
2141         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
2142                 DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n");
2143                 p_hwfn->mcp_info->link_output.link_up = true;
2144         }
2145 #endif
2146
2147         return &p_hwfn->mcp_info->link_output;
2148 }
2149
2150 struct ecore_mcp_link_capabilities
2151 *ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn)
2152 {
2153         if (!p_hwfn || !p_hwfn->mcp_info)
2154                 return OSAL_NULL;
2155         return &p_hwfn->mcp_info->link_capabilities;
2156 }
2157
2158 enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
2159                                      struct ecore_ptt *p_ptt)
2160 {
2161         u32 resp = 0, param = 0;
2162         enum _ecore_status_t rc;
2163
2164         rc = ecore_mcp_cmd(p_hwfn, p_ptt,
2165                            DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, &param);
2166
2167         /* Wait for the drain to complete before returning */
2168         OSAL_MSLEEP(1020);
2169
2170         return rc;
2171 }
2172
2173 const struct ecore_mcp_function_info
2174 *ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn)
2175 {
2176         if (!p_hwfn || !p_hwfn->mcp_info)
2177                 return OSAL_NULL;
2178         return &p_hwfn->mcp_info->func_info;
2179 }
2180
2181 int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
2182                                   struct ecore_ptt *p_ptt, u32 personalities)
2183 {
2184         enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT;
2185         struct public_func shmem_info;
2186         int i, count = 0, num_pfs;
2187
2188         num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
2189
2190         for (i = 0; i < num_pfs; i++) {
2191                 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
2192                                          MCP_PF_ID_BY_REL(p_hwfn, i));
2193                 if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
2194                         continue;
2195
2196                 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2197                                               &protocol) !=
2198                     ECORE_SUCCESS)
2199                         continue;
2200
2201                 if ((1 << ((u32)protocol)) & personalities)
2202                         count++;
2203         }
2204
2205         return count;
2206 }
2207
2208 enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
2209                                               struct ecore_ptt *p_ptt,
2210                                               u32 *p_flash_size)
2211 {
2212         u32 flash_size;
2213
2214 #ifndef ASIC_ONLY
2215         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
2216                 DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n");
2217                 return ECORE_INVAL;
2218         }
2219 #endif
2220
2221         if (IS_VF(p_hwfn->p_dev))
2222                 return ECORE_INVAL;
2223
2224         flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
2225         flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
2226                      MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
2227         flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_OFFSET));
2228
2229         *p_flash_size = flash_size;
2230
2231         return ECORE_SUCCESS;
2232 }
2233
2234 enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
2235                                                   struct ecore_ptt *p_ptt)
2236 {
2237         struct ecore_dev *p_dev = p_hwfn->p_dev;
2238
2239         if (p_dev->recov_in_prog) {
2240                 DP_NOTICE(p_hwfn, false,
2241                           "Avoid triggering a recovery since such a process"
2242                           " is already in progress\n");
2243                 return ECORE_AGAIN;
2244         }
2245
2246         DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n");
2247         ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
2248
2249         return ECORE_SUCCESS;
2250 }
2251
2252 static enum _ecore_status_t
2253 ecore_mcp_config_vf_msix_bb(struct ecore_hwfn *p_hwfn,
2254                             struct ecore_ptt *p_ptt,
2255                             u8 vf_id, u8 num)
2256 {
2257         u32 resp = 0, param = 0, rc_param = 0;
2258         enum _ecore_status_t rc;
2259
2260 /* Only Leader can configure MSIX, and need to take CMT into account */
2261
2262         if (!IS_LEAD_HWFN(p_hwfn))
2263                 return ECORE_SUCCESS;
2264         num *= p_hwfn->p_dev->num_hwfns;
2265
2266         param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_OFFSET) &
2267             DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
2268         param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_OFFSET) &
2269             DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
2270
2271         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
2272                            &resp, &rc_param);
2273
2274         if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
2275                 DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n",
2276                           vf_id);
2277                 rc = ECORE_INVAL;
2278         } else {
2279                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2280                            "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
2281                             num, vf_id);
2282         }
2283
2284         return rc;
2285 }
2286
2287 static enum _ecore_status_t
2288 ecore_mcp_config_vf_msix_ah(struct ecore_hwfn *p_hwfn,
2289                             struct ecore_ptt *p_ptt,
2290                             u8 num)
2291 {
2292         u32 resp = 0, param = num, rc_param = 0;
2293         enum _ecore_status_t rc;
2294
2295         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX,
2296                            param, &resp, &rc_param);
2297
2298         if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) {
2299                 DP_NOTICE(p_hwfn, true, "MFW failed to set MSI-X for VFs\n");
2300                 rc = ECORE_INVAL;
2301         } else {
2302                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2303                            "Requested 0x%02x MSI-x interrupts for VFs\n",
2304                            num);
2305         }
2306
2307         return rc;
2308 }
2309
2310 enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
2311                                               struct ecore_ptt *p_ptt,
2312                                               u8 vf_id, u8 num)
2313 {
2314         if (ECORE_IS_BB(p_hwfn->p_dev))
2315                 return ecore_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
2316         else
2317                 return ecore_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num);
2318 }
2319
2320 enum _ecore_status_t
2321 ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2322                            struct ecore_mcp_drv_version *p_ver)
2323 {
2324         struct ecore_mcp_mb_params mb_params;
2325         struct drv_version_stc drv_version;
2326         u32 num_words, i;
2327         void *p_name;
2328         OSAL_BE32 val;
2329         enum _ecore_status_t rc;
2330
2331 #ifndef ASIC_ONLY
2332         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
2333                 return ECORE_SUCCESS;
2334 #endif
2335
2336         OSAL_MEM_ZERO(&drv_version, sizeof(drv_version));
2337         drv_version.version = p_ver->version;
2338         num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4;
2339         for (i = 0; i < num_words; i++) {
2340                 /* The driver name is expected to be in a big-endian format */
2341                 p_name = &p_ver->name[i * sizeof(u32)];
2342                 val = OSAL_CPU_TO_BE32(*(u32 *)p_name);
2343                 *(u32 *)&drv_version.name[i * sizeof(u32)] = val;
2344         }
2345
2346         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2347         mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
2348         mb_params.p_data_src = &drv_version;
2349         mb_params.data_src_size = sizeof(drv_version);
2350         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2351         if (rc != ECORE_SUCCESS)
2352                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2353
2354         return rc;
2355 }
2356
2357 enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
2358                                     struct ecore_ptt *p_ptt)
2359 {
2360         enum _ecore_status_t rc;
2361         u32 resp = 0, param = 0;
2362
2363         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2364                            &param);
2365         if (rc != ECORE_SUCCESS)
2366                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2367
2368         return rc;
2369 }
2370
2371 enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
2372                                       struct ecore_ptt *p_ptt)
2373 {
2374         u32 value, cpu_mode;
2375
2376         ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
2377
2378         value = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2379         value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2380         ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
2381         cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2382
2383         return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -1 : 0;
2384 }
2385
2386 enum _ecore_status_t
2387 ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
2388                                    struct ecore_ptt *p_ptt,
2389                                    enum ecore_ov_client client)
2390 {
2391         enum _ecore_status_t rc;
2392         u32 resp = 0, param = 0;
2393         u32 drv_mb_param;
2394
2395         switch (client) {
2396         case ECORE_OV_CLIENT_DRV:
2397                 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
2398                 break;
2399         case ECORE_OV_CLIENT_USER:
2400                 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
2401                 break;
2402         case ECORE_OV_CLIENT_VENDOR_SPEC:
2403                 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
2404                 break;
2405         default:
2406                 DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", client);
2407                 return ECORE_INVAL;
2408         }
2409
2410         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
2411                            drv_mb_param, &resp, &param);
2412         if (rc != ECORE_SUCCESS)
2413                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2414
2415         return rc;
2416 }
2417
2418 enum _ecore_status_t
2419 ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
2420                                  struct ecore_ptt *p_ptt,
2421                                  enum ecore_ov_driver_state drv_state)
2422 {
2423         enum _ecore_status_t rc;
2424         u32 resp = 0, param = 0;
2425         u32 drv_mb_param;
2426
2427         switch (drv_state) {
2428         case ECORE_OV_DRIVER_STATE_NOT_LOADED:
2429                 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
2430                 break;
2431         case ECORE_OV_DRIVER_STATE_DISABLED:
2432                 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
2433                 break;
2434         case ECORE_OV_DRIVER_STATE_ACTIVE:
2435                 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
2436                 break;
2437         default:
2438                 DP_NOTICE(p_hwfn, true, "Invalid driver state %d\n", drv_state);
2439                 return ECORE_INVAL;
2440         }
2441
2442         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
2443                            drv_mb_param, &resp, &param);
2444         if (rc != ECORE_SUCCESS)
2445                 DP_ERR(p_hwfn, "Failed to send driver state\n");
2446
2447         return rc;
2448 }
2449
2450 enum _ecore_status_t
2451 ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2452                          struct ecore_fc_npiv_tbl *p_table)
2453 {
2454         return 0;
2455 }
2456
2457 enum _ecore_status_t
2458 ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn,
2459                         struct ecore_ptt *p_ptt, u16 mtu)
2460 {
2461         return 0;
2462 }
2463
2464 enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
2465                                        struct ecore_ptt *p_ptt,
2466                                        enum ecore_led_mode mode)
2467 {
2468         u32 resp = 0, param = 0, drv_mb_param;
2469         enum _ecore_status_t rc;
2470
2471         switch (mode) {
2472         case ECORE_LED_MODE_ON:
2473                 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
2474                 break;
2475         case ECORE_LED_MODE_OFF:
2476                 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
2477                 break;
2478         case ECORE_LED_MODE_RESTORE:
2479                 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
2480                 break;
2481         default:
2482                 DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode);
2483                 return ECORE_INVAL;
2484         }
2485
2486         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
2487                            drv_mb_param, &resp, &param);
2488         if (rc != ECORE_SUCCESS)
2489                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2490
2491         return rc;
2492 }
2493
2494 enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
2495                                              struct ecore_ptt *p_ptt,
2496                                              u32 mask_parities)
2497 {
2498         u32 resp = 0, param = 0;
2499         enum _ecore_status_t rc;
2500
2501         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
2502                            mask_parities, &resp, &param);
2503
2504         if (rc != ECORE_SUCCESS) {
2505                 DP_ERR(p_hwfn,
2506                        "MCP response failure for mask parities, aborting\n");
2507         } else if (resp != FW_MSG_CODE_OK) {
2508                 DP_ERR(p_hwfn,
2509                        "MCP did not ack mask parity request. Old MFW?\n");
2510                 rc = ECORE_INVAL;
2511         }
2512
2513         return rc;
2514 }
2515
2516 enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
2517                                         u8 *p_buf, u32 len)
2518 {
2519         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2520         u32 bytes_left, offset, bytes_to_copy, buf_size;
2521         u32 nvm_offset, resp, param;
2522         struct ecore_ptt *p_ptt;
2523         enum _ecore_status_t rc = ECORE_SUCCESS;
2524
2525         p_ptt = ecore_ptt_acquire(p_hwfn);
2526         if (!p_ptt)
2527                 return ECORE_BUSY;
2528
2529         bytes_left = len;
2530         offset = 0;
2531         while (bytes_left > 0) {
2532                 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
2533                                            MCP_DRV_NVM_BUF_LEN);
2534                 nvm_offset = (addr + offset) | (bytes_to_copy <<
2535                                                 DRV_MB_PARAM_NVM_LEN_OFFSET);
2536                 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
2537                                           DRV_MSG_CODE_NVM_READ_NVRAM,
2538                                           nvm_offset, &resp, &param, &buf_size,
2539                                           (u32 *)(p_buf + offset));
2540                 if (rc != ECORE_SUCCESS) {
2541                         DP_NOTICE(p_dev, false,
2542                                   "ecore_mcp_nvm_rd_cmd() failed, rc = %d\n",
2543                                   rc);
2544                         resp = FW_MSG_CODE_ERROR;
2545                         break;
2546                 }
2547
2548                 if (resp != FW_MSG_CODE_NVM_OK) {
2549                         DP_NOTICE(p_dev, false,
2550                                   "nvm read failed, resp = 0x%08x\n", resp);
2551                         rc = ECORE_UNKNOWN_ERROR;
2552                         break;
2553                 }
2554
2555                 /* This can be a lengthy process, and it's possible scheduler
2556                  * isn't preemptible. Sleep a bit to prevent CPU hogging.
2557                  */
2558                 if (bytes_left % 0x1000 <
2559                     (bytes_left - buf_size) % 0x1000)
2560                         OSAL_MSLEEP(1);
2561
2562                 offset += buf_size;
2563                 bytes_left -= buf_size;
2564         }
2565
2566         p_dev->mcp_nvm_resp = resp;
2567         ecore_ptt_release(p_hwfn, p_ptt);
2568
2569         return rc;
2570 }
2571
2572 enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
2573                                         u32 addr, u8 *p_buf, u32 len)
2574 {
2575         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2576         struct ecore_ptt *p_ptt;
2577         u32 resp, param;
2578         enum _ecore_status_t rc;
2579
2580         p_ptt = ecore_ptt_acquire(p_hwfn);
2581         if (!p_ptt)
2582                 return ECORE_BUSY;
2583
2584         rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
2585                                   (cmd == ECORE_PHY_CORE_READ) ?
2586                                   DRV_MSG_CODE_PHY_CORE_READ :
2587                                   DRV_MSG_CODE_PHY_RAW_READ,
2588                                   addr, &resp, &param, &len, (u32 *)p_buf);
2589         if (rc != ECORE_SUCCESS)
2590                 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2591
2592         p_dev->mcp_nvm_resp = resp;
2593         ecore_ptt_release(p_hwfn, p_ptt);
2594
2595         return rc;
2596 }
2597
2598 enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
2599 {
2600         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2601         struct ecore_ptt *p_ptt;
2602
2603         p_ptt = ecore_ptt_acquire(p_hwfn);
2604         if (!p_ptt)
2605                 return ECORE_BUSY;
2606
2607         OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp));
2608         ecore_ptt_release(p_hwfn, p_ptt);
2609
2610         return ECORE_SUCCESS;
2611 }
2612
2613 enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr)
2614 {
2615         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2616         struct ecore_ptt *p_ptt;
2617         u32 resp, param;
2618         enum _ecore_status_t rc;
2619
2620         p_ptt = ecore_ptt_acquire(p_hwfn);
2621         if (!p_ptt)
2622                 return ECORE_BUSY;
2623         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_DEL_FILE, addr,
2624                            &resp, &param);
2625         p_dev->mcp_nvm_resp = resp;
2626         ecore_ptt_release(p_hwfn, p_ptt);
2627
2628         return rc;
2629 }
2630
2631 enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
2632                                                   u32 addr)
2633 {
2634         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2635         struct ecore_ptt *p_ptt;
2636         u32 resp, param;
2637         enum _ecore_status_t rc;
2638
2639         p_ptt = ecore_ptt_acquire(p_hwfn);
2640         if (!p_ptt)
2641                 return ECORE_BUSY;
2642         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, addr,
2643                            &resp, &param);
2644         p_dev->mcp_nvm_resp = resp;
2645         ecore_ptt_release(p_hwfn, p_ptt);
2646
2647         return rc;
2648 }
2649
2650 /* rc receives ECORE_INVAL as default parameter because
2651  * it might not enter the while loop if the len is 0
2652  */
2653 enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
2654                                          u32 addr, u8 *p_buf, u32 len)
2655 {
2656         u32 buf_idx, buf_size, nvm_cmd, nvm_offset, resp, param;
2657         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2658         enum _ecore_status_t rc = ECORE_INVAL;
2659         struct ecore_ptt *p_ptt;
2660
2661         p_ptt = ecore_ptt_acquire(p_hwfn);
2662         if (!p_ptt)
2663                 return ECORE_BUSY;
2664
2665         switch (cmd) {
2666         case ECORE_PUT_FILE_DATA:
2667                 nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
2668                 break;
2669         case ECORE_NVM_WRITE_NVRAM:
2670                 nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
2671                 break;
2672         case ECORE_EXT_PHY_FW_UPGRADE:
2673                 nvm_cmd = DRV_MSG_CODE_EXT_PHY_FW_UPGRADE;
2674                 break;
2675         default:
2676                 DP_NOTICE(p_hwfn, true, "Invalid nvm write command 0x%x\n",
2677                           cmd);
2678                 rc = ECORE_INVAL;
2679                 goto out;
2680         }
2681
2682         buf_idx = 0;
2683         while (buf_idx < len) {
2684                 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
2685                                       MCP_DRV_NVM_BUF_LEN);
2686                 nvm_offset = ((buf_size << DRV_MB_PARAM_NVM_LEN_OFFSET) |
2687                               addr) +
2688                              buf_idx;
2689                 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset,
2690                                           &resp, &param, buf_size,
2691                                           (u32 *)&p_buf[buf_idx]);
2692                 if (rc != ECORE_SUCCESS) {
2693                         DP_NOTICE(p_dev, false,
2694                                   "ecore_mcp_nvm_write() failed, rc = %d\n",
2695                                   rc);
2696                         resp = FW_MSG_CODE_ERROR;
2697                         break;
2698                 }
2699
2700                 if (resp != FW_MSG_CODE_OK &&
2701                     resp != FW_MSG_CODE_NVM_OK &&
2702                     resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK) {
2703                         DP_NOTICE(p_dev, false,
2704                                   "nvm write failed, resp = 0x%08x\n", resp);
2705                         rc = ECORE_UNKNOWN_ERROR;
2706                         break;
2707                 }
2708
2709                 /* This can be a lengthy process, and it's possible scheduler
2710                  * isn't preemptible. Sleep a bit to prevent CPU hogging.
2711                  */
2712                 if (buf_idx % 0x1000 >
2713                     (buf_idx + buf_size) % 0x1000)
2714                         OSAL_MSLEEP(1);
2715
2716                 buf_idx += buf_size;
2717         }
2718
2719         p_dev->mcp_nvm_resp = resp;
2720 out:
2721         ecore_ptt_release(p_hwfn, p_ptt);
2722
2723         return rc;
2724 }
2725
2726 enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
2727                                          u32 addr, u8 *p_buf, u32 len)
2728 {
2729         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2730         struct ecore_ptt *p_ptt;
2731         u32 resp, param, nvm_cmd;
2732         enum _ecore_status_t rc;
2733
2734         p_ptt = ecore_ptt_acquire(p_hwfn);
2735         if (!p_ptt)
2736                 return ECORE_BUSY;
2737
2738         nvm_cmd = (cmd == ECORE_PHY_CORE_WRITE) ?  DRV_MSG_CODE_PHY_CORE_WRITE :
2739                         DRV_MSG_CODE_PHY_RAW_WRITE;
2740         rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, addr,
2741                                   &resp, &param, len, (u32 *)p_buf);
2742         if (rc != ECORE_SUCCESS)
2743                 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2744         p_dev->mcp_nvm_resp = resp;
2745         ecore_ptt_release(p_hwfn, p_ptt);
2746
2747         return rc;
2748 }
2749
2750 enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
2751                                                    u32 addr)
2752 {
2753         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2754         struct ecore_ptt *p_ptt;
2755         u32 resp, param;
2756         enum _ecore_status_t rc;
2757
2758         p_ptt = ecore_ptt_acquire(p_hwfn);
2759         if (!p_ptt)
2760                 return ECORE_BUSY;
2761
2762         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_SECURE_MODE, addr,
2763                            &resp, &param);
2764         p_dev->mcp_nvm_resp = resp;
2765         ecore_ptt_release(p_hwfn, p_ptt);
2766
2767         return rc;
2768 }
2769
2770 enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
2771                                             struct ecore_ptt *p_ptt,
2772                                             u32 port, u32 addr, u32 offset,
2773                                             u32 len, u8 *p_buf)
2774 {
2775         u32 bytes_left, bytes_to_copy, buf_size, nvm_offset;
2776         u32 resp, param;
2777         enum _ecore_status_t rc;
2778
2779         nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) |
2780                         (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET);
2781         addr = offset;
2782         offset = 0;
2783         bytes_left = len;
2784         while (bytes_left > 0) {
2785                 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
2786                                            MAX_I2C_TRANSACTION_SIZE);
2787                 nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
2788                                DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
2789                 nvm_offset |= ((addr + offset) <<
2790                                 DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET);
2791                 nvm_offset |= (bytes_to_copy <<
2792                                DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET);
2793                 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
2794                                           DRV_MSG_CODE_TRANSCEIVER_READ,
2795                                           nvm_offset, &resp, &param, &buf_size,
2796                                           (u32 *)(p_buf + offset));
2797                 if ((resp & FW_MSG_CODE_MASK) ==
2798                     FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
2799                         return ECORE_NODEV;
2800                 } else if ((resp & FW_MSG_CODE_MASK) !=
2801                            FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
2802                         return ECORE_UNKNOWN_ERROR;
2803
2804                 offset += buf_size;
2805                 bytes_left -= buf_size;
2806         }
2807
2808         return ECORE_SUCCESS;
2809 }
2810
2811 enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
2812                                              struct ecore_ptt *p_ptt,
2813                                              u32 port, u32 addr, u32 offset,
2814                                              u32 len, u8 *p_buf)
2815 {
2816         u32 buf_idx, buf_size, nvm_offset, resp, param;
2817         enum _ecore_status_t rc;
2818
2819         nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) |
2820                         (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET);
2821         buf_idx = 0;
2822         while (buf_idx < len) {
2823                 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
2824                                       MAX_I2C_TRANSACTION_SIZE);
2825                 nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
2826                                  DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
2827                 nvm_offset |= ((offset + buf_idx) <<
2828                                  DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET);
2829                 nvm_offset |= (buf_size <<
2830                                DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET);
2831                 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt,
2832                                           DRV_MSG_CODE_TRANSCEIVER_WRITE,
2833                                           nvm_offset, &resp, &param, buf_size,
2834                                           (u32 *)&p_buf[buf_idx]);
2835                 if ((resp & FW_MSG_CODE_MASK) ==
2836                     FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
2837                         return ECORE_NODEV;
2838                 } else if ((resp & FW_MSG_CODE_MASK) !=
2839                            FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
2840                         return ECORE_UNKNOWN_ERROR;
2841
2842                 buf_idx += buf_size;
2843         }
2844
2845         return ECORE_SUCCESS;
2846 }
2847
2848 enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
2849                                          struct ecore_ptt *p_ptt,
2850                                          u16 gpio, u32 *gpio_val)
2851 {
2852         enum _ecore_status_t rc = ECORE_SUCCESS;
2853         u32 drv_mb_param = 0, rsp;
2854
2855         drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET);
2856
2857         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ,
2858                            drv_mb_param, &rsp, gpio_val);
2859
2860         if (rc != ECORE_SUCCESS)
2861                 return rc;
2862
2863         if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
2864                 return ECORE_UNKNOWN_ERROR;
2865
2866         return ECORE_SUCCESS;
2867 }
2868
2869 enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
2870                                           struct ecore_ptt *p_ptt,
2871                                           u16 gpio, u16 gpio_val)
2872 {
2873         enum _ecore_status_t rc = ECORE_SUCCESS;
2874         u32 drv_mb_param = 0, param, rsp;
2875
2876         drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET) |
2877                 (gpio_val << DRV_MB_PARAM_GPIO_VALUE_OFFSET);
2878
2879         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE,
2880                            drv_mb_param, &rsp, &param);
2881
2882         if (rc != ECORE_SUCCESS)
2883                 return rc;
2884
2885         if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
2886                 return ECORE_UNKNOWN_ERROR;
2887
2888         return ECORE_SUCCESS;
2889 }
2890
2891 enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn,
2892                                          struct ecore_ptt *p_ptt,
2893                                          u16 gpio, u32 *gpio_direction,
2894                                          u32 *gpio_ctrl)
2895 {
2896         u32 drv_mb_param = 0, rsp, val = 0;
2897         enum _ecore_status_t rc = ECORE_SUCCESS;
2898
2899         drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET;
2900
2901         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO,
2902                            drv_mb_param, &rsp, &val);
2903         if (rc != ECORE_SUCCESS)
2904                 return rc;
2905
2906         *gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >>
2907                            DRV_MB_PARAM_GPIO_DIRECTION_OFFSET;
2908         *gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >>
2909                       DRV_MB_PARAM_GPIO_CTRL_OFFSET;
2910
2911         if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
2912                 return ECORE_UNKNOWN_ERROR;
2913
2914         return ECORE_SUCCESS;
2915 }
2916
2917 enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn,
2918                                                   struct ecore_ptt *p_ptt)
2919 {
2920         u32 drv_mb_param = 0, rsp, param;
2921         enum _ecore_status_t rc = ECORE_SUCCESS;
2922
2923         drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
2924                         DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
2925
2926         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2927                            drv_mb_param, &rsp, &param);
2928
2929         if (rc != ECORE_SUCCESS)
2930                 return rc;
2931
2932         if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2933             (param != DRV_MB_PARAM_BIST_RC_PASSED))
2934                 rc = ECORE_UNKNOWN_ERROR;
2935
2936         return rc;
2937 }
2938
2939 enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn,
2940                                                struct ecore_ptt *p_ptt)
2941 {
2942         u32 drv_mb_param, rsp, param;
2943         enum _ecore_status_t rc = ECORE_SUCCESS;
2944
2945         drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
2946                         DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
2947
2948         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2949                            drv_mb_param, &rsp, &param);
2950
2951         if (rc != ECORE_SUCCESS)
2952                 return rc;
2953
2954         if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2955             (param != DRV_MB_PARAM_BIST_RC_PASSED))
2956                 rc = ECORE_UNKNOWN_ERROR;
2957
2958         return rc;
2959 }
2960
2961 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images(
2962         struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *num_images)
2963 {
2964         u32 drv_mb_param = 0, rsp;
2965         enum _ecore_status_t rc = ECORE_SUCCESS;
2966
2967         drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
2968                         DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
2969
2970         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2971                            drv_mb_param, &rsp, num_images);
2972
2973         if (rc != ECORE_SUCCESS)
2974                 return rc;
2975
2976         if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
2977                 rc = ECORE_UNKNOWN_ERROR;
2978
2979         return rc;
2980 }
2981
2982 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att(
2983         struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2984         struct bist_nvm_image_att *p_image_att, u32 image_index)
2985 {
2986         u32 buf_size, nvm_offset, resp, param;
2987         enum _ecore_status_t rc;
2988
2989         nvm_offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
2990                                     DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
2991         nvm_offset |= (image_index <<
2992                        DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_OFFSET);
2993         rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2994                                   nvm_offset, &resp, &param, &buf_size,
2995                                   (u32 *)p_image_att);
2996         if (rc != ECORE_SUCCESS)
2997                 return rc;
2998
2999         if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3000             (p_image_att->return_code != 1))
3001                 rc = ECORE_UNKNOWN_ERROR;
3002
3003         return rc;
3004 }
3005
3006 enum _ecore_status_t
3007 ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn,
3008                                struct ecore_ptt *p_ptt,
3009                                struct ecore_temperature_info *p_temp_info)
3010 {
3011         struct ecore_temperature_sensor *p_temp_sensor;
3012         struct temperature_status_stc mfw_temp_info;
3013         struct ecore_mcp_mb_params mb_params;
3014         u32 val;
3015         enum _ecore_status_t rc;
3016         u8 i;
3017
3018         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3019         mb_params.cmd = DRV_MSG_CODE_GET_TEMPERATURE;
3020         mb_params.p_data_dst = &mfw_temp_info;
3021         mb_params.data_dst_size = sizeof(mfw_temp_info);
3022         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3023         if (rc != ECORE_SUCCESS)
3024                 return rc;
3025
3026         OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS != MAX_NUM_OF_SENSORS);
3027         p_temp_info->num_sensors = OSAL_MIN_T(u32, mfw_temp_info.num_of_sensors,
3028                                               ECORE_MAX_NUM_OF_SENSORS);
3029         for (i = 0; i < p_temp_info->num_sensors; i++) {
3030                 val = mfw_temp_info.sensor[i];
3031                 p_temp_sensor = &p_temp_info->sensors[i];
3032                 p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >>
3033                                                  SENSOR_LOCATION_OFFSET;
3034                 p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >>
3035                                                 THRESHOLD_HIGH_OFFSET;
3036                 p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >>
3037                                           CRITICAL_TEMPERATURE_OFFSET;
3038                 p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >>
3039                                               CURRENT_TEMP_OFFSET;
3040         }
3041
3042         return ECORE_SUCCESS;
3043 }
3044
3045 enum _ecore_status_t ecore_mcp_get_mba_versions(
3046         struct ecore_hwfn *p_hwfn,
3047         struct ecore_ptt *p_ptt,
3048         struct ecore_mba_vers *p_mba_vers)
3049 {
3050         u32 buf_size, resp, param;
3051         enum _ecore_status_t rc;
3052
3053         rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MBA_VERSION,
3054                                   0, &resp, &param, &buf_size,
3055                                   &p_mba_vers->mba_vers[0]);
3056
3057         if (rc != ECORE_SUCCESS)
3058                 return rc;
3059
3060         if ((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
3061                 rc = ECORE_UNKNOWN_ERROR;
3062
3063         if (buf_size != MCP_DRV_NVM_BUF_LEN)
3064                 rc = ECORE_UNKNOWN_ERROR;
3065
3066         return rc;
3067 }
3068
3069 enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
3070                                               struct ecore_ptt *p_ptt,
3071                                               u64 *num_events)
3072 {
3073         u32 rsp;
3074
3075         return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MEM_ECC_EVENTS,
3076                              0, &rsp, (u32 *)num_events);
3077 }
3078
3079 static enum resource_id_enum
3080 ecore_mcp_get_mfw_res_id(enum ecore_resources res_id)
3081 {
3082         enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
3083
3084         switch (res_id) {
3085         case ECORE_SB:
3086                 mfw_res_id = RESOURCE_NUM_SB_E;
3087                 break;
3088         case ECORE_L2_QUEUE:
3089                 mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
3090                 break;
3091         case ECORE_VPORT:
3092                 mfw_res_id = RESOURCE_NUM_VPORT_E;
3093                 break;
3094         case ECORE_RSS_ENG:
3095                 mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
3096                 break;
3097         case ECORE_PQ:
3098                 mfw_res_id = RESOURCE_NUM_PQ_E;
3099                 break;
3100         case ECORE_RL:
3101                 mfw_res_id = RESOURCE_NUM_RL_E;
3102                 break;
3103         case ECORE_MAC:
3104         case ECORE_VLAN:
3105                 /* Each VFC resource can accommodate both a MAC and a VLAN */
3106                 mfw_res_id = RESOURCE_VFC_FILTER_E;
3107                 break;
3108         case ECORE_ILT:
3109                 mfw_res_id = RESOURCE_ILT_E;
3110                 break;
3111         case ECORE_LL2_QUEUE:
3112                 mfw_res_id = RESOURCE_LL2_QUEUE_E;
3113                 break;
3114         case ECORE_RDMA_CNQ_RAM:
3115         case ECORE_CMDQS_CQS:
3116                 /* CNQ/CMDQS are the same resource */
3117                 mfw_res_id = RESOURCE_CQS_E;
3118                 break;
3119         case ECORE_RDMA_STATS_QUEUE:
3120                 mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
3121                 break;
3122         case ECORE_BDQ:
3123                 mfw_res_id = RESOURCE_BDQ_E;
3124                 break;
3125         default:
3126                 break;
3127         }
3128
3129         return mfw_res_id;
3130 }
3131
3132 #define ECORE_RESC_ALLOC_VERSION_MAJOR  2
3133 #define ECORE_RESC_ALLOC_VERSION_MINOR  0
3134 #define ECORE_RESC_ALLOC_VERSION                                \
3135         ((ECORE_RESC_ALLOC_VERSION_MAJOR <<                     \
3136           DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_OFFSET) |   \
3137          (ECORE_RESC_ALLOC_VERSION_MINOR <<                     \
3138           DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_OFFSET))
3139
3140 struct ecore_resc_alloc_in_params {
3141         u32 cmd;
3142         enum ecore_resources res_id;
3143         u32 resc_max_val;
3144 };
3145
3146 struct ecore_resc_alloc_out_params {
3147         u32 mcp_resp;
3148         u32 mcp_param;
3149         u32 resc_num;
3150         u32 resc_start;
3151         u32 vf_resc_num;
3152         u32 vf_resc_start;
3153         u32 flags;
3154 };
3155
3156 #define ECORE_RECOVERY_PROLOG_SLEEP_MS  100
3157
3158 enum _ecore_status_t ecore_recovery_prolog(struct ecore_dev *p_dev)
3159 {
3160         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3161         struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
3162         enum _ecore_status_t rc;
3163
3164         /* Allow ongoing PCIe transactions to complete */
3165         OSAL_MSLEEP(ECORE_RECOVERY_PROLOG_SLEEP_MS);
3166
3167         /* Clear the PF's internal FID_enable in the PXP */
3168         rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_ptt, false);
3169         if (rc != ECORE_SUCCESS)
3170                 DP_NOTICE(p_hwfn, false,
3171                           "ecore_pglueb_set_pfid_enable() failed. rc = %d.\n",
3172                           rc);
3173
3174         return rc;
3175 }
3176
3177 static enum _ecore_status_t
3178 ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn,
3179                               struct ecore_ptt *p_ptt,
3180                               struct ecore_resc_alloc_in_params *p_in_params,
3181                               struct ecore_resc_alloc_out_params *p_out_params)
3182 {
3183         struct ecore_mcp_mb_params mb_params;
3184         struct resource_info mfw_resc_info;
3185         enum _ecore_status_t rc;
3186
3187         OSAL_MEM_ZERO(&mfw_resc_info, sizeof(mfw_resc_info));
3188
3189         mfw_resc_info.res_id = ecore_mcp_get_mfw_res_id(p_in_params->res_id);
3190         if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
3191                 DP_ERR(p_hwfn,
3192                        "Failed to match resource %d [%s] with the MFW resources\n",
3193                        p_in_params->res_id,
3194                        ecore_hw_get_resc_name(p_in_params->res_id));
3195                 return ECORE_INVAL;
3196         }
3197
3198         switch (p_in_params->cmd) {
3199         case DRV_MSG_SET_RESOURCE_VALUE_MSG:
3200                 mfw_resc_info.size = p_in_params->resc_max_val;
3201                 /* Fallthrough */
3202         case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
3203                 break;
3204         default:
3205                 DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
3206                        p_in_params->cmd);
3207                 return ECORE_INVAL;
3208         }
3209
3210         OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3211         mb_params.cmd = p_in_params->cmd;
3212         mb_params.param = ECORE_RESC_ALLOC_VERSION;
3213         mb_params.p_data_src = &mfw_resc_info;
3214         mb_params.data_src_size = sizeof(mfw_resc_info);
3215         mb_params.p_data_dst = mb_params.p_data_src;
3216         mb_params.data_dst_size = mb_params.data_src_size;
3217
3218         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3219                    "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
3220                    p_in_params->cmd, p_in_params->res_id,
3221                    ecore_hw_get_resc_name(p_in_params->res_id),
3222                    GET_MFW_FIELD(mb_params.param,
3223                                  DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3224                    GET_MFW_FIELD(mb_params.param,
3225                                  DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3226                    p_in_params->resc_max_val);
3227
3228         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3229         if (rc != ECORE_SUCCESS)
3230                 return rc;
3231
3232         p_out_params->mcp_resp = mb_params.mcp_resp;
3233         p_out_params->mcp_param = mb_params.mcp_param;
3234         p_out_params->resc_num = mfw_resc_info.size;
3235         p_out_params->resc_start = mfw_resc_info.offset;
3236         p_out_params->vf_resc_num = mfw_resc_info.vf_size;
3237         p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
3238         p_out_params->flags = mfw_resc_info.flags;
3239
3240         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3241                    "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
3242                    GET_MFW_FIELD(p_out_params->mcp_param,
3243                                  FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3244                    GET_MFW_FIELD(p_out_params->mcp_param,
3245                                  FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3246                    p_out_params->resc_num, p_out_params->resc_start,
3247                    p_out_params->vf_resc_num, p_out_params->vf_resc_start,
3248                    p_out_params->flags);
3249
3250         return ECORE_SUCCESS;
3251 }
3252
3253 enum _ecore_status_t
3254 ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3255                            enum ecore_resources res_id, u32 resc_max_val,
3256                            u32 *p_mcp_resp)
3257 {
3258         struct ecore_resc_alloc_out_params out_params;
3259         struct ecore_resc_alloc_in_params in_params;
3260         enum _ecore_status_t rc;
3261
3262         OSAL_MEM_ZERO(&in_params, sizeof(in_params));
3263         in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
3264         in_params.res_id = res_id;
3265         in_params.resc_max_val = resc_max_val;
3266         OSAL_MEM_ZERO(&out_params, sizeof(out_params));
3267         rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3268                                            &out_params);
3269         if (rc != ECORE_SUCCESS)
3270                 return rc;
3271
3272         *p_mcp_resp = out_params.mcp_resp;
3273
3274         return ECORE_SUCCESS;
3275 }
3276
3277 enum _ecore_status_t
3278 ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3279                         enum ecore_resources res_id, u32 *p_mcp_resp,
3280                         u32 *p_resc_num, u32 *p_resc_start)
3281 {
3282         struct ecore_resc_alloc_out_params out_params;
3283         struct ecore_resc_alloc_in_params in_params;
3284         enum _ecore_status_t rc;
3285
3286         OSAL_MEM_ZERO(&in_params, sizeof(in_params));
3287         in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
3288         in_params.res_id = res_id;
3289         OSAL_MEM_ZERO(&out_params, sizeof(out_params));
3290         rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3291                                            &out_params);
3292         if (rc != ECORE_SUCCESS)
3293                 return rc;
3294
3295         *p_mcp_resp = out_params.mcp_resp;
3296
3297         if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
3298                 *p_resc_num = out_params.resc_num;
3299                 *p_resc_start = out_params.resc_start;
3300         }
3301
3302         return ECORE_SUCCESS;
3303 }
3304
3305 enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn,
3306                                                struct ecore_ptt *p_ptt)
3307 {
3308         u32 mcp_resp, mcp_param;
3309
3310         return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
3311                              &mcp_resp, &mcp_param);
3312 }
3313
3314 static enum _ecore_status_t ecore_mcp_resource_cmd(struct ecore_hwfn *p_hwfn,
3315                                                    struct ecore_ptt *p_ptt,
3316                                                    u32 param, u32 *p_mcp_resp,
3317                                                    u32 *p_mcp_param)
3318 {
3319         enum _ecore_status_t rc;
3320
3321         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
3322                            p_mcp_resp, p_mcp_param);
3323         if (rc != ECORE_SUCCESS)
3324                 return rc;
3325
3326         if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3327                 DP_INFO(p_hwfn,
3328                         "The resource command is unsupported by the MFW\n");
3329                 return ECORE_NOTIMPL;
3330         }
3331
3332         if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
3333                 u8 opcode = GET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
3334
3335                 DP_NOTICE(p_hwfn, false,
3336                           "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
3337                           param, opcode);
3338                 return ECORE_INVAL;
3339         }
3340
3341         return rc;
3342 }
3343
3344 enum _ecore_status_t
3345 __ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3346                       struct ecore_resc_lock_params *p_params)
3347 {
3348         u32 param = 0, mcp_resp, mcp_param;
3349         u8 opcode;
3350         enum _ecore_status_t rc;
3351
3352         switch (p_params->timeout) {
3353         case ECORE_MCP_RESC_LOCK_TO_DEFAULT:
3354                 opcode = RESOURCE_OPCODE_REQ;
3355                 p_params->timeout = 0;
3356                 break;
3357         case ECORE_MCP_RESC_LOCK_TO_NONE:
3358                 opcode = RESOURCE_OPCODE_REQ_WO_AGING;
3359                 p_params->timeout = 0;
3360                 break;
3361         default:
3362                 opcode = RESOURCE_OPCODE_REQ_W_AGING;
3363                 break;
3364         }
3365
3366         SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3367         SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3368         SET_MFW_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
3369
3370         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3371                    "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
3372                    param, p_params->timeout, opcode, p_params->resource);
3373
3374         /* Attempt to acquire the resource */
3375         rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
3376                                     &mcp_param);
3377         if (rc != ECORE_SUCCESS)
3378                 return rc;
3379
3380         /* Analyze the response */
3381         p_params->owner = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER);
3382         opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3383
3384         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3385                    "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
3386                    mcp_param, opcode, p_params->owner);
3387
3388         switch (opcode) {
3389         case RESOURCE_OPCODE_GNT:
3390                 p_params->b_granted = true;
3391                 break;
3392         case RESOURCE_OPCODE_BUSY:
3393                 p_params->b_granted = false;
3394                 break;
3395         default:
3396                 DP_NOTICE(p_hwfn, false,
3397                           "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
3398                           mcp_param, opcode);
3399                 return ECORE_INVAL;
3400         }
3401
3402         return ECORE_SUCCESS;
3403 }
3404
3405 enum _ecore_status_t
3406 ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3407                     struct ecore_resc_lock_params *p_params)
3408 {
3409         u32 retry_cnt = 0;
3410         enum _ecore_status_t rc;
3411
3412         do {
3413                 /* No need for an interval before the first iteration */
3414                 if (retry_cnt) {
3415                         if (p_params->sleep_b4_retry) {
3416                                 u16 retry_interval_in_ms =
3417                                         DIV_ROUND_UP(p_params->retry_interval,
3418                                                      1000);
3419
3420                                 OSAL_MSLEEP(retry_interval_in_ms);
3421                         } else {
3422                                 OSAL_UDELAY(p_params->retry_interval);
3423                         }
3424                 }
3425
3426                 rc = __ecore_mcp_resc_lock(p_hwfn, p_ptt, p_params);
3427                 if (rc != ECORE_SUCCESS)
3428                         return rc;
3429
3430                 if (p_params->b_granted)
3431                         break;
3432         } while (retry_cnt++ < p_params->retry_num);
3433
3434         return ECORE_SUCCESS;
3435 }
3436
3437 void
3438 ecore_mcp_resc_lock_default_init(struct ecore_hwfn *p_hwfn,
3439                                  struct ecore_resc_lock_params *p_lock,
3440                                  struct ecore_resc_unlock_params *p_unlock,
3441                                  enum ecore_resc_lock resource,
3442                                  bool b_is_permanent)
3443 {
3444         if (p_lock != OSAL_NULL) {
3445                 OSAL_MEM_ZERO(p_lock, sizeof(*p_lock));
3446
3447                 /* Permanent resources don't require aging, and there's no
3448                  * point in trying to acquire them more than once since it's
3449                  * unexpected another entity would release them.
3450                  */
3451                 if (b_is_permanent) {
3452                         p_lock->timeout = ECORE_MCP_RESC_LOCK_TO_NONE;
3453                 } else {
3454                         p_lock->retry_num = ECORE_MCP_RESC_LOCK_RETRY_CNT_DFLT;
3455                         p_lock->retry_interval =
3456                                         ECORE_MCP_RESC_LOCK_RETRY_VAL_DFLT;
3457                         p_lock->sleep_b4_retry = true;
3458                 }
3459
3460                 p_lock->resource = resource;
3461         }
3462
3463         if (p_unlock != OSAL_NULL) {
3464                 OSAL_MEM_ZERO(p_unlock, sizeof(*p_unlock));
3465                 p_unlock->resource = resource;
3466         }
3467 }
3468
3469 enum _ecore_status_t
3470 ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3471                       struct ecore_resc_unlock_params *p_params)
3472 {
3473         u32 param = 0, mcp_resp, mcp_param;
3474         u8 opcode;
3475         enum _ecore_status_t rc;
3476
3477         opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
3478                                    : RESOURCE_OPCODE_RELEASE;
3479         SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3480         SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3481
3482         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3483                    "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
3484                    param, opcode, p_params->resource);
3485
3486         /* Attempt to release the resource */
3487         rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
3488                                     &mcp_param);
3489         if (rc != ECORE_SUCCESS)
3490                 return rc;
3491
3492         /* Analyze the response */
3493         opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3494
3495         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3496                    "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
3497                    mcp_param, opcode);
3498
3499         switch (opcode) {
3500         case RESOURCE_OPCODE_RELEASED_PREVIOUS:
3501                 DP_INFO(p_hwfn,
3502                         "Resource unlock request for an already released resource [%d]\n",
3503                         p_params->resource);
3504                 /* Fallthrough */
3505         case RESOURCE_OPCODE_RELEASED:
3506                 p_params->b_released = true;
3507                 break;
3508         case RESOURCE_OPCODE_WRONG_OWNER:
3509                 p_params->b_released = false;
3510                 break;
3511         default:
3512                 DP_NOTICE(p_hwfn, false,
3513                           "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
3514                           mcp_param, opcode);
3515                 return ECORE_INVAL;
3516         }
3517
3518         return ECORE_SUCCESS;
3519 }
3520
3521 bool ecore_mcp_is_smart_an_supported(struct ecore_hwfn *p_hwfn)
3522 {
3523         return !!(p_hwfn->mcp_info->capabilities &
3524                   FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ);
3525 }
3526
3527 enum _ecore_status_t ecore_mcp_get_capabilities(struct ecore_hwfn *p_hwfn,
3528                                                 struct ecore_ptt *p_ptt)
3529 {
3530         u32 mcp_resp;
3531         enum _ecore_status_t rc;
3532
3533         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT,
3534                            0, &mcp_resp, &p_hwfn->mcp_info->capabilities);
3535         if (rc == ECORE_SUCCESS)
3536                 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_PROBE),
3537                            "MFW supported features: %08x\n",
3538                            p_hwfn->mcp_info->capabilities);
3539
3540         return rc;
3541 }
3542
3543 enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn,
3544                                                 struct ecore_ptt *p_ptt)
3545 {
3546         u32 mcp_resp, mcp_param, features;
3547
3548         features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ |
3549                    DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE;
3550
3551         return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
3552                              features, &mcp_resp, &mcp_param);
3553 }