net/qede/base: update FW to 8.40.25.0
[dpdk.git] / drivers / net / qede / base / ecore_mcp.c
index 89c9864..a5aa074 100644 (file)
@@ -1,14 +1,13 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
  * All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
  */
 
 #include "bcm_osal.h"
 #include "ecore.h"
 #include "ecore_status.h"
+#include "nvm_cfg.h"
 #include "ecore_mcp.h"
 #include "mcp_public.h"
 #include "reg_addr.h"
 #include "ecore_sp_commands.h"
 #include "ecore_cxt.h"
 
-#define CHIP_MCP_RESP_ITER_US 10
-#define EMUL_MCP_RESP_ITER_US (1000 * 1000)
+#define GRCBASE_MCP    0xe00000
 
+#define ECORE_MCP_RESP_ITER_US         10
 #define ECORE_DRV_MB_MAX_RETRIES (500 * 1000)  /* Account for 5 sec */
 #define ECORE_MCP_RESET_RETRIES (50 * 1000)    /* Account for 500 msec */
 
+#ifndef ASIC_ONLY
+/* Non-ASIC:
+ * The waiting interval is multiplied by 100 to reduce the impact of the
+ * built-in delay of 100usec in each ecore_rd().
+ * In addition, a factor of 4 comparing to ASIC is applied.
+ */
+#define ECORE_EMUL_MCP_RESP_ITER_US    (ECORE_MCP_RESP_ITER_US * 100)
+#define ECORE_EMUL_DRV_MB_MAX_RETRIES  ((ECORE_DRV_MB_MAX_RETRIES / 100) * 4)
+#define ECORE_EMUL_MCP_RESET_RETRIES   ((ECORE_MCP_RESET_RETRIES / 100) * 4)
+#endif
+
 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
        ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
                 _val)
@@ -156,6 +166,9 @@ enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
        if (p_hwfn->mcp_info) {
                struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL, *p_tmp;
 
+               OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
+               OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
+
                OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
                OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_cmd_elem, p_tmp,
                                              &p_hwfn->mcp_info->cmd_list, list,
@@ -164,8 +177,6 @@ enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
                }
                OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
 
-               OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
-               OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
 #ifdef CONFIG_ECORE_LOCK_ALLOC
                OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->cmd_lock);
                OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->link_lock);
@@ -177,27 +188,63 @@ enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
        return ECORE_SUCCESS;
 }
 
+/* Maximum of 1 sec to wait for the SHMEM ready indication */
+#define ECORE_MCP_SHMEM_RDY_MAX_RETRIES        20
+#define ECORE_MCP_SHMEM_RDY_ITER_MS    50
+
 static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
                                                   struct ecore_ptt *p_ptt)
 {
        struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
-       u32 drv_mb_offsize, mfw_mb_offsize;
+       u32 drv_mb_offsize, mfw_mb_offsize, val;
+       u8 cnt = ECORE_MCP_SHMEM_RDY_MAX_RETRIES;
+       u8 msec = ECORE_MCP_SHMEM_RDY_ITER_MS;
        u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
 
+       val = ecore_rd(p_hwfn, p_ptt, MCP_REG_CACHE_PAGING_ENABLE);
+       p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
+       if (!p_info->public_base) {
+               DP_NOTICE(p_hwfn, false,
+                         "The address of the MCP scratch-pad is not configured\n");
 #ifndef ASIC_ONLY
-       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
-               DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n");
-               p_info->public_base = 0;
-               return ECORE_INVAL;
-       }
+               /* Zeroed "public_base" implies no MFW */
+               if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+                       DP_INFO(p_hwfn, "Emulation: Assume no MFW\n");
 #endif
-
-       p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
-       if (!p_info->public_base)
                return ECORE_INVAL;
+       }
 
        p_info->public_base |= GRCBASE_MCP;
 
+       /* Get the MFW MB address and number of supported messages */
+       mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
+                                 SECTION_OFFSIZE_ADDR(p_info->public_base,
+                                 PUBLIC_MFW_MB));
+       p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
+       p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
+                                             p_info->mfw_mb_addr);
+
+       /* @@@TBD:
+        * The driver can notify that there was an MCP reset, and read the SHMEM
+        * values before the MFW has completed initializing them.
+        * As a temporary solution, the "sup_msgs" field is used as a data ready
+        * indication.
+        * This should be replaced with an actual indication when it is provided
+        * by the MFW.
+        */
+       while (!p_info->mfw_mb_length && cnt--) {
+               OSAL_MSLEEP(msec);
+               p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
+                                                     p_info->mfw_mb_addr);
+       }
+
+       if (!cnt) {
+               DP_NOTICE(p_hwfn, false,
+                         "Failed to get the SHMEM ready notification after %d msec\n",
+                         ECORE_MCP_SHMEM_RDY_MAX_RETRIES * msec);
+               return ECORE_TIMEOUT;
+       }
+
        /* Calculate the driver and MFW mailbox address */
        drv_mb_offsize = ecore_rd(p_hwfn, p_ptt,
                                  SECTION_OFFSIZE_ADDR(p_info->public_base,
@@ -208,14 +255,6 @@ static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
                   " mcp_pf_id = 0x%x\n",
                   drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
 
-       /* Set the MFW MB address */
-       mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
-                                 SECTION_OFFSIZE_ADDR(p_info->public_base,
-                                                      PUBLIC_MFW_MB));
-       p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
-       p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
-                                              p_info->mfw_mb_addr);
-
        /* Get the current driver mailbox sequence before sending
         * the first command
         */
@@ -239,14 +278,33 @@ enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
 
        /* Allocate mcp_info structure */
        p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
-                                      sizeof(*p_hwfn->mcp_info));
-       if (!p_hwfn->mcp_info)
-               goto err;
+                       sizeof(*p_hwfn->mcp_info));
+       if (!p_hwfn->mcp_info) {
+               DP_NOTICE(p_hwfn, false, "Failed to allocate mcp_info\n");
+               return ECORE_NOMEM;
+       }
        p_info = p_hwfn->mcp_info;
 
+       /* Initialize the MFW spinlocks */
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+       if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock)) {
+               OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
+               return ECORE_NOMEM;
+       }
+       if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock)) {
+               OSAL_SPIN_LOCK_DEALLOC(&p_info->cmd_lock);
+               OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
+               return ECORE_NOMEM;
+       }
+#endif
+       OSAL_SPIN_LOCK_INIT(&p_info->cmd_lock);
+       OSAL_SPIN_LOCK_INIT(&p_info->link_lock);
+
+       OSAL_LIST_INIT(&p_info->cmd_list);
+
        if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
                DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
-               /* Do not free mcp_info here, since public_base indicate that
+               /* Do not free mcp_info here, since "public_base" indicates that
                 * the MCP is not initialized
                 */
                return ECORE_SUCCESS;
@@ -258,20 +316,10 @@ enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
        if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
                goto err;
 
-       /* Initialize the MFW spinlocks */
-#ifdef CONFIG_ECORE_LOCK_ALLOC
-       OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock);
-       OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock);
-#endif
-       OSAL_SPIN_LOCK_INIT(&p_info->cmd_lock);
-       OSAL_SPIN_LOCK_INIT(&p_info->link_lock);
-
-       OSAL_LIST_INIT(&p_info->cmd_list);
-
        return ECORE_SUCCESS;
 
 err:
-       DP_NOTICE(p_hwfn, true, "Failed to allocate mcp memory\n");
+       DP_NOTICE(p_hwfn, false, "Failed to allocate mcp memory\n");
        ecore_mcp_free(p_hwfn);
        return ECORE_NOMEM;
 }
@@ -297,14 +345,16 @@ static void ecore_mcp_reread_offsets(struct ecore_hwfn *p_hwfn,
 enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
                                     struct ecore_ptt *p_ptt)
 {
-       u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0;
+       u32 prev_generic_por_0, seq, delay = ECORE_MCP_RESP_ITER_US, cnt = 0;
+       u32 retries = ECORE_MCP_RESET_RETRIES;
        enum _ecore_status_t rc = ECORE_SUCCESS;
 
 #ifndef ASIC_ONLY
-       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
-               delay = EMUL_MCP_RESP_ITER_US;
+       if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+               delay = ECORE_EMUL_MCP_RESP_ITER_US;
+               retries = ECORE_EMUL_MCP_RESET_RETRIES;
+       }
 #endif
-
        if (p_hwfn->mcp_info->b_block_cmd) {
                DP_NOTICE(p_hwfn, false,
                          "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
@@ -314,23 +364,24 @@ enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
        /* Ensure that only a single thread is accessing the mailbox */
        OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
 
-       org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
+       prev_generic_por_0 = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
 
        /* Set drv command along with the updated sequence */
        ecore_mcp_reread_offsets(p_hwfn, p_ptt);
        seq = ++p_hwfn->mcp_info->drv_mb_seq;
        DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
 
+       /* Give the MFW up to 500 second (50*1000*10usec) to resume */
        do {
-               /* Wait for MFW response */
                OSAL_UDELAY(delay);
-               /* Give the FW up to 500 second (50*1000*10usec) */
-       } while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt,
-                                               MISCS_REG_GENERIC_POR_0)) &&
-                (cnt++ < ECORE_MCP_RESET_RETRIES));
 
-       if (org_mcp_reset_seq !=
-           ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
+               if (ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0) !=
+                   prev_generic_por_0)
+                       break;
+       } while (cnt++ < retries);
+
+       if (ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0) !=
+           prev_generic_por_0) {
                DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
                           "MCP was reset after %d usec\n", cnt * delay);
        } else {
@@ -343,6 +394,71 @@ enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
        return rc;
 }
 
+#ifndef ASIC_ONLY
+static void ecore_emul_mcp_load_req(struct ecore_hwfn *p_hwfn,
+                                   struct ecore_mcp_mb_params *p_mb_params)
+{
+       if (GET_MFW_FIELD(p_mb_params->param, DRV_ID_MCP_HSI_VER) !=
+           1 /* ECORE_LOAD_REQ_HSI_VER_1 */) {
+               p_mb_params->mcp_resp = FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1;
+               return;
+       }
+
+       if (!loaded)
+               p_mb_params->mcp_resp = FW_MSG_CODE_DRV_LOAD_ENGINE;
+       else if (!loaded_port[p_hwfn->port_id])
+               p_mb_params->mcp_resp = FW_MSG_CODE_DRV_LOAD_PORT;
+       else
+               p_mb_params->mcp_resp = FW_MSG_CODE_DRV_LOAD_FUNCTION;
+
+       /* On CMT, always tell that it's engine */
+       if (ECORE_IS_CMT(p_hwfn->p_dev))
+               p_mb_params->mcp_resp = FW_MSG_CODE_DRV_LOAD_ENGINE;
+
+       loaded++;
+       loaded_port[p_hwfn->port_id]++;
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+                  "Load phase: 0x%08x load cnt: 0x%x port id=%d port_load=%d\n",
+                  p_mb_params->mcp_resp, loaded, p_hwfn->port_id,
+                  loaded_port[p_hwfn->port_id]);
+}
+
+static void ecore_emul_mcp_unload_req(struct ecore_hwfn *p_hwfn)
+{
+       loaded--;
+       loaded_port[p_hwfn->port_id]--;
+       DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n", loaded);
+}
+
+static enum _ecore_status_t
+ecore_emul_mcp_cmd(struct ecore_hwfn *p_hwfn,
+                  struct ecore_mcp_mb_params *p_mb_params)
+{
+       if (!CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+               return ECORE_INVAL;
+
+       switch (p_mb_params->cmd) {
+       case DRV_MSG_CODE_LOAD_REQ:
+               ecore_emul_mcp_load_req(p_hwfn, p_mb_params);
+               break;
+       case DRV_MSG_CODE_UNLOAD_REQ:
+               ecore_emul_mcp_unload_req(p_hwfn);
+               break;
+       case DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT:
+       case DRV_MSG_CODE_RESOURCE_CMD:
+       case DRV_MSG_CODE_MDUMP_CMD:
+       case DRV_MSG_CODE_GET_ENGINE_CONFIG:
+       case DRV_MSG_CODE_GET_PPFID_BITMAP:
+               return ECORE_NOTIMPL;
+       default:
+               break;
+       }
+
+       return ECORE_SUCCESS;
+}
+#endif
+
 /* Must be called while cmd_lock is acquired */
 static bool ecore_mcp_has_pending_cmd(struct ecore_hwfn *p_hwfn)
 {
@@ -433,7 +549,7 @@ static void __ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
        /* Set the drv command along with the sequence number */
        DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
 
-       DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+       DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
                   "MFW mailbox: command 0x%08x param 0x%08x\n",
                   (p_mb_params->cmd | seq_num), p_mb_params->param);
 }
@@ -447,6 +563,29 @@ static void ecore_mcp_cmd_set_blocking(struct ecore_hwfn *p_hwfn,
                block_cmd ? "Block" : "Unblock");
 }
 
+void ecore_mcp_print_cpu_info(struct ecore_hwfn *p_hwfn,
+                             struct ecore_ptt *p_ptt)
+{
+       u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
+       u32 delay = ECORE_MCP_RESP_ITER_US;
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+               delay = ECORE_EMUL_MCP_RESP_ITER_US;
+#endif
+       cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
+       cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
+       cpu_pc_0 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
+       OSAL_UDELAY(delay);
+       cpu_pc_1 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
+       OSAL_UDELAY(delay);
+       cpu_pc_2 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
+
+       DP_NOTICE(p_hwfn, false,
+                 "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
+                 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
+}
+
 static enum _ecore_status_t
 _ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
                         struct ecore_mcp_mb_params *p_mb_params,
@@ -477,6 +616,7 @@ _ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
 
                OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
                OSAL_UDELAY(delay);
+               OSAL_MFW_CMD_PREEMPT(p_hwfn);
        } while (++cnt < max_retries);
 
        if (cnt >= max_retries) {
@@ -518,12 +658,14 @@ _ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
                        goto err;
 
                OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
+               OSAL_MFW_CMD_PREEMPT(p_hwfn);
        } while (++cnt < max_retries);
 
        if (cnt >= max_retries) {
                DP_NOTICE(p_hwfn, false,
                          "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
                          p_mb_params->cmd, p_mb_params->param);
+               ecore_mcp_print_cpu_info(p_hwfn, p_ptt);
 
                OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
                ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
@@ -537,7 +679,7 @@ _ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
        ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
        OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
 
-       DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+       DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
                   "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
                   p_mb_params->mcp_resp, p_mb_params->mcp_param,
                   (cnt * delay) / 1000, (cnt * delay) % 1000);
@@ -559,19 +701,25 @@ ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
 {
        osal_size_t union_data_size = sizeof(union drv_union_data);
        u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
-       u32 delay = CHIP_MCP_RESP_ITER_US;
+       u32 usecs = ECORE_MCP_RESP_ITER_US;
 
 #ifndef ASIC_ONLY
-       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
-               delay = EMUL_MCP_RESP_ITER_US;
-       /* There is a built-in delay of 100usec in each MFW response read */
-       if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
-               max_retries /= 10;
+       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && !ecore_mcp_is_init(p_hwfn))
+               return ecore_emul_mcp_cmd(p_hwfn, p_mb_params);
+
+       if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+               max_retries = ECORE_EMUL_DRV_MB_MAX_RETRIES;
+               usecs = ECORE_EMUL_MCP_RESP_ITER_US;
+       }
 #endif
+       if (ECORE_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) {
+               max_retries = DIV_ROUND_UP(max_retries, 1000);
+               usecs *= 1000;
+       }
 
        /* MCP not initialized */
        if (!ecore_mcp_is_init(p_hwfn)) {
-               DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
+               DP_NOTICE(p_hwfn, true, "MFW is not initialized!\n");
                return ECORE_BUSY;
        }
 
@@ -592,7 +740,7 @@ ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
        }
 
        return _ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
-                                       delay);
+                                       usecs);
 }
 
 enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
@@ -602,18 +750,6 @@ enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
        struct ecore_mcp_mb_params mb_params;
        enum _ecore_status_t rc;
 
-#ifndef ASIC_ONLY
-       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
-               if (cmd == DRV_MSG_CODE_UNLOAD_REQ) {
-                       loaded--;
-                       loaded_port[p_hwfn->port_id]--;
-                       DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n",
-                                  loaded);
-               }
-               return ECORE_SUCCESS;
-       }
-#endif
-
        OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
        mb_params.cmd = cmd;
        mb_params.param = param;
@@ -687,34 +823,6 @@ enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
        return ECORE_SUCCESS;
 }
 
-#ifndef ASIC_ONLY
-static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
-                                   u32 *p_load_code)
-{
-       static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
-
-       if (!loaded)
-               load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
-       else if (!loaded_port[p_hwfn->port_id])
-               load_phase = FW_MSG_CODE_DRV_LOAD_PORT;
-       else
-               load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
-
-       /* On CMT, always tell that it's engine */
-       if (ECORE_IS_CMT(p_hwfn->p_dev))
-               load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
-
-       *p_load_code = load_phase;
-       loaded++;
-       loaded_port[p_hwfn->port_id]++;
-
-       DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
-                  "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n",
-                  *p_load_code, loaded, p_hwfn->port_id,
-                  loaded_port[p_hwfn->port_id]);
-}
-#endif
-
 static bool
 ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role,
                         enum ecore_override_force_load override_force_load)
@@ -946,13 +1054,6 @@ enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
        u8 mfw_drv_role = 0, mfw_force_cmd;
        enum _ecore_status_t rc;
 
-#ifndef ASIC_ONLY
-       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
-               ecore_mcp_mf_workaround(p_hwfn, &p_params->load_code);
-               return ECORE_SUCCESS;
-       }
-#endif
-
        OSAL_MEM_ZERO(&in_params, sizeof(in_params));
        in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT;
        in_params.drv_ver_0 = ECORE_VERSION;
@@ -1068,8 +1169,6 @@ enum _ecore_status_t ecore_mcp_load_done(struct ecore_hwfn *p_hwfn,
                return rc;
        }
 
-#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR     (1 << 0)
-
        /* Check if there is a DID mismatch between nvm-cfg/efuse */
        if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
                DP_NOTICE(p_hwfn, false,
@@ -1110,15 +1209,17 @@ static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
        u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
        u32 path_addr = SECTION_ADDR(mfw_path_offsize,
                                     ECORE_PATH_ID(p_hwfn));
-       u32 disabled_vfs[VF_MAX_STATIC / 32];
+       u32 disabled_vfs[EXT_VF_BITMAP_SIZE_IN_DWORDS];
        int i;
 
+       OSAL_MEM_ZERO(disabled_vfs, EXT_VF_BITMAP_SIZE_IN_BYTES);
+
        DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
                   "Reading Disabled VF information from [offset %08x],"
                   " path_addr %08x\n",
                   mfw_path_offsize, path_addr);
 
-       for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
+       for (i = 0; i < VF_BITMAP_SIZE_IN_DWORDS; i++) {
                disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,
                                           path_addr +
                                           OFFSETOF(struct public_path,
@@ -1137,16 +1238,11 @@ enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
                                          struct ecore_ptt *p_ptt,
                                          u32 *vfs_to_ack)
 {
-       u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
-                                       PUBLIC_FUNC);
-       u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);
-       u32 func_addr = SECTION_ADDR(mfw_func_offsize,
-                                    MCP_PF_ID(p_hwfn));
        struct ecore_mcp_mb_params mb_params;
        enum _ecore_status_t rc;
-       int i;
+       u16 i;
 
-       for (i = 0; i < (VF_MAX_STATIC / 32); i++)
+       for (i = 0; i < VF_BITMAP_SIZE_IN_DWORDS; i++)
                DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
                           "Acking VFs [%08x,...,%08x] - %08x\n",
                           i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
@@ -1154,7 +1250,7 @@ enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
        OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
        mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
        mb_params.p_data_src = vfs_to_ack;
-       mb_params.data_src_size = VF_MAX_STATIC / 8;
+       mb_params.data_src_size = (u8)VF_BITMAP_SIZE_IN_BYTES;
        rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt,
                                     &mb_params);
        if (rc != ECORE_SUCCESS) {
@@ -1163,13 +1259,6 @@ enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
                return ECORE_TIMEOUT;
        }
 
-       /* TMP - clear the ACK bits; should be done by MFW */
-       for (i = 0; i < (VF_MAX_STATIC / 32); i++)
-               ecore_wr(p_hwfn, p_ptt,
-                        func_addr +
-                        OFFSETOF(struct public_func, drv_ack_vf_disabled) +
-                        i * sizeof(u32), 0);
-
        return rc;
 }
 
@@ -1197,6 +1286,8 @@ static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
                DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
        else
                DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
+
+       OSAL_TRANSCEIVER_UPDATE(p_hwfn);
 }
 
 static void ecore_mcp_read_eee_config(struct ecore_hwfn *p_hwfn,
@@ -1222,6 +1313,28 @@ static void ecore_mcp_read_eee_config(struct ecore_hwfn *p_hwfn,
                p_link->eee_lp_adv_caps |= ECORE_EEE_10G_ADV;
 }
 
+static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
+                                   struct ecore_ptt *p_ptt,
+                                   struct public_func *p_data,
+                                   int pfid)
+{
+       u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+                                       PUBLIC_FUNC);
+       u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
+       u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
+       u32 i, size;
+
+       OSAL_MEM_ZERO(p_data, sizeof(*p_data));
+
+       size = OSAL_MIN_T(u32, sizeof(*p_data),
+                         SECTION_SIZE(mfw_path_offsize));
+       for (i = 0; i < size / sizeof(u32); i++)
+               ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
+                                             func_addr + (i << 2));
+
+       return size;
+}
+
 static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
                                         struct ecore_ptt *p_ptt,
                                         bool b_reset)
@@ -1251,10 +1364,24 @@ static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
                goto out;
        }
 
-       if (p_hwfn->b_drv_link_init)
-               p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
-       else
+       if (p_hwfn->b_drv_link_init) {
+               /* Link indication with modern MFW arrives as per-PF
+                * indication.
+                */
+               if (p_hwfn->mcp_info->capabilities &
+                   FW_MB_PARAM_FEATURE_SUPPORT_VLINK) {
+                       struct public_func shmem_info;
+
+                       ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+                                                MCP_PF_ID(p_hwfn));
+                       p_link->link_up = !!(shmem_info.status &
+                                            FUNC_STATUS_VIRTUAL_LINK_UP);
+               } else {
+                       p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
+               }
+       } else {
                p_link->link_up = false;
+       }
 
        p_link->full_duplex = true;
        switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
@@ -1301,7 +1428,7 @@ static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
        __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
                                           p_link, max_bw);
 
-       /* Mintz bandwidth configuration */
+       /* Min bandwidth configuration */
        __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
                                           p_link, min_bw);
        ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev, p_ptt,
@@ -1362,7 +1489,7 @@ static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
        if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
                ecore_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
 
-       OSAL_LINK_UPDATE(p_hwfn, p_ptt);
+       OSAL_LINK_UPDATE(p_hwfn);
 out:
        OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->link_lock);
 }
@@ -1377,8 +1504,11 @@ enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
        u32 cmd;
 
 #ifndef ASIC_ONLY
-       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+               if (b_up)
+                       OSAL_LINK_UPDATE(p_hwfn);
                return ECORE_SUCCESS;
+       }
 #endif
 
        /* Set the shmem configuration according to params */
@@ -1517,7 +1647,8 @@ static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
                hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
                break;
        default:
-               DP_INFO(p_hwfn, "Invalid protocol type %d\n", type);
+               DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+                          "Invalid protocol type %d\n", type);
                return;
        }
 
@@ -1567,28 +1698,6 @@ static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
        }
 }
 
-static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
-                                   struct ecore_ptt *p_ptt,
-                                   struct public_func *p_data,
-                                   int pfid)
-{
-       u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
-                                       PUBLIC_FUNC);
-       u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
-       u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
-       u32 i, size;
-
-       OSAL_MEM_ZERO(p_data, sizeof(*p_data));
-
-       size = OSAL_MIN_T(u32, sizeof(*p_data),
-                         SECTION_SIZE(mfw_path_offsize));
-       for (i = 0; i < size / sizeof(u32); i++)
-               ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
-                                             func_addr + (i << 2));
-
-       return size;
-}
-
 static void
 ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
 {
@@ -1611,6 +1720,49 @@ ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
                      &param);
 }
 
+static void ecore_mcp_update_stag(struct ecore_hwfn *p_hwfn,
+                                 struct ecore_ptt *p_ptt)
+{
+       struct public_func shmem_info;
+       u32 resp = 0, param = 0;
+
+       ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+                                MCP_PF_ID(p_hwfn));
+
+       p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
+                                                FUNC_MF_CFG_OV_STAG_MASK;
+       p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
+       if (OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS, &p_hwfn->p_dev->mf_bits)) {
+               if (p_hwfn->hw_info.ovlan != ECORE_MCP_VLAN_UNSET) {
+                       ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE,
+                                p_hwfn->hw_info.ovlan);
+                       ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 1);
+
+                       /* Configure DB to add external vlan to EDPM packets */
+                       ecore_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
+                       ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID,
+                                p_hwfn->hw_info.ovlan);
+               } else {
+                       ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 0);
+                       ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, 0);
+
+                       /* Configure DB to add external vlan to EDPM packets */
+                       ecore_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 0);
+                       ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID, 0);
+               }
+
+               ecore_sp_pf_update_stag(p_hwfn);
+       }
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "ovlan  = %d hw_mode = 0x%x\n",
+                  p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode);
+       OSAL_HW_INFO_CHANGE(p_hwfn, ECORE_HW_INFO_CHANGE_OVLAN);
+
+       /* Acknowledge the MFW */
+       ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
+                     &resp, &param);
+}
+
 static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn)
 {
        /* A single notification should be sent to upper driver in CMT mode */
@@ -1737,6 +1889,13 @@ ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
        struct mdump_config_stc mdump_config;
        enum _ecore_status_t rc;
 
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && !ecore_mcp_is_init(p_hwfn)) {
+               DP_INFO(p_hwfn, "Emulation: Can't get mdump info\n");
+               return ECORE_NOTIMPL;
+       }
+#endif
+
        OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info));
 
        addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
@@ -1901,7 +2060,7 @@ ecore_mcp_read_ufp_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
                DP_NOTICE(p_hwfn, false, "Unknown Host priority control %d\n",
                          val);
 
-       DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+       DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
                   "UFP shmem config: mode = %d tc = %d pri_type = %d\n",
                   p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc,
                   p_hwfn->ufp_info.pri_type);
@@ -1926,6 +2085,9 @@ ecore_mcp_handle_ufp_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
        /* update storm FW with negotiation results */
        ecore_sp_pf_update_ufp(p_hwfn);
 
+       /* update stag pcp value */
+       ecore_sp_pf_update_stag(p_hwfn);
+
        return ECORE_SUCCESS;
 }
 
@@ -1971,6 +2133,12 @@ enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
                case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
                        ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
                                                    ECORE_DCBX_OPERATIONAL_MIB);
+                       /* clear the user-config cache */
+                       OSAL_MEMSET(&p_hwfn->p_dcbx_info->set, 0,
+                                   sizeof(struct ecore_dcbx_set));
+                       break;
+               case MFW_DRV_MSG_LLDP_RECEIVED_TLVS_UPDATED:
+                       ecore_lldp_mib_update_event(p_hwfn, p_ptt);
                        break;
                case MFW_DRV_MSG_OEM_CFG_UPDATE:
                        ecore_mcp_handle_ufp_event(p_hwfn, p_ptt);
@@ -1990,6 +2158,9 @@ enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
                case MFW_DRV_MSG_BW_UPDATE:
                        ecore_mcp_update_bw(p_hwfn, p_ptt);
                        break;
+               case MFW_DRV_MSG_S_TAG_UPDATE:
+                       ecore_mcp_update_stag(p_hwfn, p_ptt);
+                       break;
                case MFW_DRV_MSG_FAILURE_DETECTED:
                        ecore_mcp_handle_fan_failure(p_hwfn);
                        break;
@@ -2034,9 +2205,9 @@ enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
        u32 global_offsize;
 
 #ifndef ASIC_ONLY
-       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
-               DP_NOTICE(p_hwfn, false, "Emulation - can't get MFW version\n");
-               return ECORE_SUCCESS;
+       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && !ecore_mcp_is_init(p_hwfn)) {
+               DP_INFO(p_hwfn, "Emulation: Can't get MFW version\n");
+               return ECORE_NOTIMPL;
        }
 #endif
 
@@ -2078,29 +2249,231 @@ enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,
                                              struct ecore_ptt *p_ptt,
                                              u32 *p_media_type)
 {
+       *p_media_type = MEDIA_UNSPECIFIED;
 
        /* TODO - Add support for VFs */
        if (IS_VF(p_hwfn->p_dev))
                return ECORE_INVAL;
 
        if (!ecore_mcp_is_init(p_hwfn)) {
-               DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
+#ifndef ASIC_ONLY
+               if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+                       DP_INFO(p_hwfn, "Emulation: Can't get media type\n");
+                       return ECORE_NOTIMPL;
+               }
+#endif
+               DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
                return ECORE_BUSY;
        }
 
-       if (!p_ptt) {
-               *p_media_type = MEDIA_UNSPECIFIED;
+       if (!p_ptt)
+               return ECORE_INVAL;
+
+       *p_media_type = ecore_rd(p_hwfn, p_ptt,
+                                p_hwfn->mcp_info->port_addr +
+                                OFFSETOF(struct public_port, media_type));
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_get_transceiver_data(struct ecore_hwfn *p_hwfn,
+                                                   struct ecore_ptt *p_ptt,
+                                                   u32 *p_transceiver_state,
+                                                   u32 *p_transceiver_type)
+{
+       u32 transceiver_info;
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+
+       /* TODO - Add support for VFs */
+       if (IS_VF(p_hwfn->p_dev))
                return ECORE_INVAL;
+
+       if (!ecore_mcp_is_init(p_hwfn)) {
+               DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
+               return ECORE_BUSY;
+       }
+
+       *p_transceiver_type = ETH_TRANSCEIVER_TYPE_NONE;
+       *p_transceiver_state = ETH_TRANSCEIVER_STATE_UPDATING;
+
+       transceiver_info = ecore_rd(p_hwfn, p_ptt,
+                                   p_hwfn->mcp_info->port_addr +
+                                   offsetof(struct public_port,
+                                   transceiver_data));
+
+       *p_transceiver_state = GET_MFW_FIELD(transceiver_info,
+                                            ETH_TRANSCEIVER_STATE);
+
+       if (*p_transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT) {
+               *p_transceiver_type = GET_MFW_FIELD(transceiver_info,
+                                           ETH_TRANSCEIVER_TYPE);
        } else {
-               *p_media_type = ecore_rd(p_hwfn, p_ptt,
-                                        p_hwfn->mcp_info->port_addr +
-                                        OFFSETOF(struct public_port,
-                                                 media_type));
+               *p_transceiver_type = ETH_TRANSCEIVER_TYPE_UNKNOWN;
+       }
+
+       return rc;
+}
+
+static int is_transceiver_ready(u32 transceiver_state, u32 transceiver_type)
+{
+       if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) &&
+           ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) == 0x0) &&
+           (transceiver_type != ETH_TRANSCEIVER_TYPE_NONE))
+               return 1;
+
+       return 0;
+}
+
+enum _ecore_status_t ecore_mcp_trans_speed_mask(struct ecore_hwfn *p_hwfn,
+                                               struct ecore_ptt *p_ptt,
+                                               u32 *p_speed_mask)
+{
+       u32 transceiver_type, transceiver_state;
+
+       ecore_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state,
+                                      &transceiver_type);
+
+
+       if (is_transceiver_ready(transceiver_state, transceiver_type) == 0)
+               return ECORE_INVAL;
+
+       switch (transceiver_type) {
+       case ETH_TRANSCEIVER_TYPE_1G_LX:
+       case ETH_TRANSCEIVER_TYPE_1G_SX:
+       case ETH_TRANSCEIVER_TYPE_1G_PCC:
+       case ETH_TRANSCEIVER_TYPE_1G_ACC:
+       case ETH_TRANSCEIVER_TYPE_1000BASET:
+               *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+               break;
+
+       case ETH_TRANSCEIVER_TYPE_10G_SR:
+       case ETH_TRANSCEIVER_TYPE_10G_LR:
+       case ETH_TRANSCEIVER_TYPE_10G_LRM:
+       case ETH_TRANSCEIVER_TYPE_10G_ER:
+       case ETH_TRANSCEIVER_TYPE_10G_PCC:
+       case ETH_TRANSCEIVER_TYPE_10G_ACC:
+       case ETH_TRANSCEIVER_TYPE_4x10G:
+               *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+               break;
+
+       case ETH_TRANSCEIVER_TYPE_40G_LR4:
+       case ETH_TRANSCEIVER_TYPE_40G_SR4:
+       case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
+       case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
+               *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
+                NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+               break;
+
+       case ETH_TRANSCEIVER_TYPE_100G_AOC:
+       case ETH_TRANSCEIVER_TYPE_100G_SR4:
+       case ETH_TRANSCEIVER_TYPE_100G_LR4:
+       case ETH_TRANSCEIVER_TYPE_100G_ER4:
+       case ETH_TRANSCEIVER_TYPE_100G_ACC:
+               *p_speed_mask =
+                       NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
+                       NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
+               break;
+
+       case ETH_TRANSCEIVER_TYPE_25G_SR:
+       case ETH_TRANSCEIVER_TYPE_25G_LR:
+       case ETH_TRANSCEIVER_TYPE_25G_AOC:
+       case ETH_TRANSCEIVER_TYPE_25G_ACC_S:
+       case ETH_TRANSCEIVER_TYPE_25G_ACC_M:
+       case ETH_TRANSCEIVER_TYPE_25G_ACC_L:
+               *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
+               break;
+
+       case ETH_TRANSCEIVER_TYPE_25G_CA_N:
+       case ETH_TRANSCEIVER_TYPE_25G_CA_S:
+       case ETH_TRANSCEIVER_TYPE_25G_CA_L:
+       case ETH_TRANSCEIVER_TYPE_4x25G_CR:
+               *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
+                       NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
+                       NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+               break;
+
+       case ETH_TRANSCEIVER_TYPE_40G_CR4:
+       case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR:
+               *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
+                       NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
+                       NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+               break;
+
+       case ETH_TRANSCEIVER_TYPE_100G_CR4:
+       case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
+               *p_speed_mask =
+                       NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
+                       NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G |
+                       NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
+                       NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
+                       NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G |
+                       NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
+                       NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+               break;
+
+       case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
+       case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
+       case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC:
+               *p_speed_mask =
+                       NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
+                       NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
+                       NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
+                       NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+               break;
+
+       case ETH_TRANSCEIVER_TYPE_XLPPI:
+               *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
+               break;
+
+       case ETH_TRANSCEIVER_TYPE_10G_BASET:
+               *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
+                       NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+               break;
+
+       default:
+               DP_INFO(p_hwfn, "Unknown transcevier type 0x%x\n",
+                       transceiver_type);
+               *p_speed_mask = 0xff;
+               break;
        }
 
        return ECORE_SUCCESS;
 }
 
+enum _ecore_status_t ecore_mcp_get_board_config(struct ecore_hwfn *p_hwfn,
+                                               struct ecore_ptt *p_ptt,
+                                               u32 *p_board_config)
+{
+       u32 nvm_cfg_addr, nvm_cfg1_offset, port_cfg_addr;
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+
+       /* TODO - Add support for VFs */
+       if (IS_VF(p_hwfn->p_dev))
+               return ECORE_INVAL;
+
+       if (!ecore_mcp_is_init(p_hwfn)) {
+               DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
+               return ECORE_BUSY;
+       }
+       if (!p_ptt) {
+               *p_board_config = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
+               rc = ECORE_INVAL;
+       } else {
+               nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt,
+                                       MISC_REG_GEN_PURP_CR0);
+               nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt,
+                                          nvm_cfg_addr + 4);
+               port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+                       offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
+               *p_board_config  =  ecore_rd(p_hwfn, p_ptt,
+                                            port_cfg_addr +
+                                            offsetof(struct nvm_cfg1_port,
+                                            board_cfg));
+       }
+
+       return rc;
+}
+
 /* @DPDK */
 /* Old MFW has a global configuration for all PFs regarding RDMA support */
 static void
@@ -2302,9 +2675,9 @@ enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
        u32 flash_size;
 
 #ifndef ASIC_ONLY
-       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
-               DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n");
-               return ECORE_INVAL;
+       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && !ecore_mcp_is_init(p_hwfn)) {
+               DP_INFO(p_hwfn, "Emulation: Can't get flash size\n");
+               return ECORE_NOTIMPL;
        }
 #endif
 
@@ -2401,6 +2774,16 @@ enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
                                              struct ecore_ptt *p_ptt,
                                              u8 vf_id, u8 num)
 {
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && !ecore_mcp_is_init(p_hwfn)) {
+               DP_INFO(p_hwfn,
+                       "Emulation: Avoid sending the %s mailbox command\n",
+                       ECORE_IS_BB(p_hwfn->p_dev) ? "CFG_VF_MSIX" :
+                                                    "CFG_PF_VFS_MSIX");
+               return ECORE_SUCCESS;
+       }
+#endif
+
        if (ECORE_IS_BB(p_hwfn->p_dev))
                return ecore_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
        else
@@ -2513,9 +2896,9 @@ ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
                                   struct ecore_ptt *p_ptt,
                                   enum ecore_ov_client client)
 {
-       enum _ecore_status_t rc;
        u32 resp = 0, param = 0;
        u32 drv_mb_param;
+       enum _ecore_status_t rc;
 
        switch (client) {
        case ECORE_OV_CLIENT_DRV:
@@ -2545,9 +2928,9 @@ ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
                                 struct ecore_ptt *p_ptt,
                                 enum ecore_ov_driver_state drv_state)
 {
-       enum _ecore_status_t rc;
        u32 resp = 0, param = 0;
        u32 drv_mb_param;
+       enum _ecore_status_t rc;
 
        switch (drv_state) {
        case ECORE_OV_DRIVER_STATE_NOT_LOADED:
@@ -2580,10 +2963,72 @@ ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
 }
 
 enum _ecore_status_t
-ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn,
-                       struct ecore_ptt *p_ptt, u16 mtu)
+ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+                       u16 mtu)
 {
-       return 0;
+       u32 resp = 0, param = 0, drv_mb_param = 0;
+       enum _ecore_status_t rc;
+
+       SET_MFW_FIELD(drv_mb_param, DRV_MB_PARAM_OV_MTU_SIZE, (u32)mtu);
+       rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU,
+                          drv_mb_param, &resp, &param);
+       if (rc != ECORE_SUCCESS)
+               DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc);
+
+       return rc;
+}
+
+enum _ecore_status_t
+ecore_mcp_ov_update_mac(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+                       u8 *mac)
+{
+       struct ecore_mcp_mb_params mb_params;
+       union drv_union_data union_data;
+       enum _ecore_status_t rc;
+
+       OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+       mb_params.cmd = DRV_MSG_CODE_SET_VMAC;
+       SET_MFW_FIELD(mb_params.param, DRV_MSG_CODE_VMAC_TYPE,
+                     DRV_MSG_CODE_VMAC_TYPE_MAC);
+       mb_params.param |= MCP_PF_ID(p_hwfn);
+       OSAL_MEMCPY(&union_data.raw_data, mac, ETH_ALEN);
+       mb_params.p_data_src = &union_data;
+       rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+       if (rc != ECORE_SUCCESS)
+               DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
+
+       return rc;
+}
+
+enum _ecore_status_t
+ecore_mcp_ov_update_eswitch(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+                           enum ecore_ov_eswitch eswitch)
+{
+       enum _ecore_status_t rc;
+       u32 resp = 0, param = 0;
+       u32 drv_mb_param;
+
+       switch (eswitch) {
+       case ECORE_OV_ESWITCH_NONE:
+               drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE;
+               break;
+       case ECORE_OV_ESWITCH_VEB:
+               drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB;
+               break;
+       case ECORE_OV_ESWITCH_VEPA:
+               drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA;
+               break;
+       default:
+               DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch);
+               return ECORE_INVAL;
+       }
+
+       rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE,
+                          drv_mb_param, &resp, &param);
+       if (rc != ECORE_SUCCESS)
+               DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc);
+
+       return rc;
 }
 
 enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
@@ -2695,11 +3140,11 @@ enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
 }
 
 enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
-                                       u32 addr, u8 *p_buf, u32 len)
+                                       u32 addr, u8 *p_buf, u32 *p_len)
 {
        struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
        struct ecore_ptt *p_ptt;
-       u32 resp, param;
+       u32 resp = 0, param;
        enum _ecore_status_t rc;
 
        p_ptt = ecore_ptt_acquire(p_hwfn);
@@ -2710,7 +3155,7 @@ enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
                                  (cmd == ECORE_PHY_CORE_READ) ?
                                  DRV_MSG_CODE_PHY_CORE_READ :
                                  DRV_MSG_CODE_PHY_RAW_READ,
-                                 addr, &resp, &param, &len, (u32 *)p_buf);
+                                 addr, &resp, &param, p_len, (u32 *)p_buf);
        if (rc != ECORE_SUCCESS)
                DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
 
@@ -2739,7 +3184,7 @@ enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr)
 {
        struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
        struct ecore_ptt *p_ptt;
-       u32 resp, param;
+       u32 resp = 0, param;
        enum _ecore_status_t rc;
 
        p_ptt = ecore_ptt_acquire(p_hwfn);
@@ -2758,7 +3203,7 @@ enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
 {
        struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
        struct ecore_ptt *p_ptt;
-       u32 resp, param;
+       u32 resp = 0, param;
        enum _ecore_status_t rc;
 
        p_ptt = ecore_ptt_acquire(p_hwfn);
@@ -2852,8 +3297,8 @@ enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
                                         u32 addr, u8 *p_buf, u32 len)
 {
        struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+       u32 resp = 0, param, nvm_cmd;
        struct ecore_ptt *p_ptt;
-       u32 resp, param, nvm_cmd;
        enum _ecore_status_t rc;
 
        p_ptt = ecore_ptt_acquire(p_hwfn);
@@ -2919,11 +3364,16 @@ enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
                                          DRV_MSG_CODE_TRANSCEIVER_READ,
                                          nvm_offset, &resp, &param, &buf_size,
                                          (u32 *)(p_buf + offset));
-               if ((resp & FW_MSG_CODE_MASK) ==
-                   FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
+               if (rc != ECORE_SUCCESS) {
+                       DP_NOTICE(p_hwfn, false,
+                                 "Failed to send a transceiver read command to the MFW. rc = %d.\n",
+                                 rc);
+                       return rc;
+               }
+
+               if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
                        return ECORE_NODEV;
-               } else if ((resp & FW_MSG_CODE_MASK) !=
-                          FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
+               else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
                        return ECORE_UNKNOWN_ERROR;
 
                offset += buf_size;
@@ -2957,11 +3407,16 @@ enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
                                          DRV_MSG_CODE_TRANSCEIVER_WRITE,
                                          nvm_offset, &resp, &param, buf_size,
                                          (u32 *)&p_buf[buf_idx]);
-               if ((resp & FW_MSG_CODE_MASK) ==
-                   FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
+               if (rc != ECORE_SUCCESS) {
+                       DP_NOTICE(p_hwfn, false,
+                                 "Failed to send a transceiver write command to the MFW. rc = %d.\n",
+                                 rc);
+                       return rc;
+               }
+
+               if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
                        return ECORE_NODEV;
-               } else if ((resp & FW_MSG_CODE_MASK) !=
-                          FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
+               else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
                        return ECORE_UNKNOWN_ERROR;
 
                buf_idx += buf_size;
@@ -3669,7 +4124,8 @@ enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn,
        u32 mcp_resp, mcp_param, features;
 
        features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ |
-                  DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE;
+                  DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE |
+                  DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK;
 
        return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
                             features, &mcp_resp, &mcp_param);
@@ -3747,3 +4203,99 @@ ecore_mcp_drv_attribute(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
 
        return ECORE_SUCCESS;
 }
+
+enum _ecore_status_t ecore_mcp_get_engine_config(struct ecore_hwfn *p_hwfn,
+                                                struct ecore_ptt *p_ptt)
+{
+       struct ecore_dev *p_dev = p_hwfn->p_dev;
+       struct ecore_mcp_mb_params mb_params;
+       u8 fir_valid, l2_valid;
+       enum _ecore_status_t rc;
+
+       OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+       mb_params.cmd = DRV_MSG_CODE_GET_ENGINE_CONFIG;
+       rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+               DP_INFO(p_hwfn,
+                       "The get_engine_config command is unsupported by the MFW\n");
+               return ECORE_NOTIMPL;
+       }
+
+       fir_valid = GET_MFW_FIELD(mb_params.mcp_param,
+                                 FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID);
+       if (fir_valid)
+               p_dev->fir_affin =
+                       GET_MFW_FIELD(mb_params.mcp_param,
+                                     FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE);
+
+       l2_valid = GET_MFW_FIELD(mb_params.mcp_param,
+                                FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID);
+       if (l2_valid)
+               p_dev->l2_affin_hint =
+                       GET_MFW_FIELD(mb_params.mcp_param,
+                                     FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE);
+
+       DP_INFO(p_hwfn,
+               "Engine affinity config: FIR={valid %hhd, value %hhd}, L2_hint={valid %hhd, value %hhd}\n",
+               fir_valid, p_dev->fir_affin, l2_valid, p_dev->l2_affin_hint);
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_get_ppfid_bitmap(struct ecore_hwfn *p_hwfn,
+                                               struct ecore_ptt *p_ptt)
+{
+       struct ecore_dev *p_dev = p_hwfn->p_dev;
+       struct ecore_mcp_mb_params mb_params;
+       enum _ecore_status_t rc;
+
+       OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+       mb_params.cmd = DRV_MSG_CODE_GET_PPFID_BITMAP;
+       rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+               DP_INFO(p_hwfn,
+                       "The get_ppfid_bitmap command is unsupported by the MFW\n");
+               return ECORE_NOTIMPL;
+       }
+
+       p_dev->ppfid_bitmap = GET_MFW_FIELD(mb_params.mcp_param,
+                                           FW_MB_PARAM_PPFID_BITMAP);
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "PPFID bitmap 0x%hhx\n",
+                  p_dev->ppfid_bitmap);
+
+       return ECORE_SUCCESS;
+}
+
+void ecore_mcp_wol_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+                     u32 offset, u32 val)
+{
+       enum _ecore_status_t       rc = ECORE_SUCCESS;
+       u32                        dword = val;
+       struct ecore_mcp_mb_params mb_params;
+
+       OSAL_MEMSET(&mb_params, 0, sizeof(struct ecore_mcp_mb_params));
+       mb_params.cmd = DRV_MSG_CODE_WRITE_WOL_REG;
+       mb_params.param = offset;
+       mb_params.p_data_src = &dword;
+       mb_params.data_src_size = sizeof(dword);
+
+       rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+       if (rc != ECORE_SUCCESS) {
+               DP_NOTICE(p_hwfn, false,
+                         "Failed to wol write request, rc = %d\n", rc);
+       }
+
+       if (mb_params.mcp_resp != FW_MSG_CODE_WOL_READ_WRITE_OK) {
+               DP_NOTICE(p_hwfn, false,
+                         "Failed to write value 0x%x to offset 0x%x [mcp_resp 0x%x]\n",
+                         val, offset, mb_params.mcp_resp);
+               rc = ECORE_UNKNOWN_ERROR;
+       }
+}