qede: add base driver
authorRasesh Mody <rasesh.mody@qlogic.com>
Wed, 27 Apr 2016 14:18:36 +0000 (07:18 -0700)
committerBruce Richardson <bruce.richardson@intel.com>
Fri, 6 May 2016 13:51:22 +0000 (15:51 +0200)
The base driver is the backend module for the QLogic FastLinQ QL4xxxx
25G/40G CNA family of adapters as well as their virtual functions (VF)
in SR-IOV context.

The purpose of the base module is to:
 - provide all the common code that will be shared between the various
   drivers that would be used with said line of products. Flows such as
   chip initialization and de-initialization fall under this category.
 - abstract the protocol-specific HW & FW components, allowing the
   protocol drivers to have clean APIs, which are detached in its
   slowpath configuration from the actual Hardware Software Interface(HSI).

This patch adds a base module without any protocol-specific bits.
I.e., this adds a basic implementation that almost entirely falls under
the first category.

Signed-off-by: Harish Patil <harish.patil@qlogic.com>
Signed-off-by: Rasesh Mody <rasesh.mody@qlogic.com>
Signed-off-by: Sony Chacko <sony.chacko@qlogic.com>
46 files changed:
MAINTAINERS
drivers/net/qede/LICENSE.qede_pmd [new file with mode: 0644]
drivers/net/qede/Makefile [new file with mode: 0644]
drivers/net/qede/base/bcm_osal.c [new file with mode: 0644]
drivers/net/qede/base/bcm_osal.h [new file with mode: 0644]
drivers/net/qede/base/common_hsi.h [new file with mode: 0644]
drivers/net/qede/base/ecore.h [new file with mode: 0644]
drivers/net/qede/base/ecore_chain.h [new file with mode: 0644]
drivers/net/qede/base/ecore_cxt.c [new file with mode: 0644]
drivers/net/qede/base/ecore_cxt.h [new file with mode: 0644]
drivers/net/qede/base/ecore_cxt_api.h [new file with mode: 0644]
drivers/net/qede/base/ecore_dev.c [new file with mode: 0644]
drivers/net/qede/base/ecore_dev_api.h [new file with mode: 0644]
drivers/net/qede/base/ecore_gtt_reg_addr.h [new file with mode: 0644]
drivers/net/qede/base/ecore_gtt_values.h [new file with mode: 0644]
drivers/net/qede/base/ecore_hsi_common.h [new file with mode: 0644]
drivers/net/qede/base/ecore_hsi_eth.h [new file with mode: 0644]
drivers/net/qede/base/ecore_hsi_tools.h [new file with mode: 0644]
drivers/net/qede/base/ecore_hw.c [new file with mode: 0644]
drivers/net/qede/base/ecore_hw.h [new file with mode: 0644]
drivers/net/qede/base/ecore_hw_defs.h [new file with mode: 0644]
drivers/net/qede/base/ecore_init_fw_funcs.c [new file with mode: 0644]
drivers/net/qede/base/ecore_init_fw_funcs.h [new file with mode: 0644]
drivers/net/qede/base/ecore_init_ops.c [new file with mode: 0644]
drivers/net/qede/base/ecore_init_ops.h [new file with mode: 0644]
drivers/net/qede/base/ecore_int.c [new file with mode: 0644]
drivers/net/qede/base/ecore_int.h [new file with mode: 0644]
drivers/net/qede/base/ecore_int_api.h [new file with mode: 0644]
drivers/net/qede/base/ecore_iro.h [new file with mode: 0644]
drivers/net/qede/base/ecore_iro_values.h [new file with mode: 0644]
drivers/net/qede/base/ecore_mcp.c [new file with mode: 0644]
drivers/net/qede/base/ecore_mcp.h [new file with mode: 0644]
drivers/net/qede/base/ecore_mcp_api.h [new file with mode: 0644]
drivers/net/qede/base/ecore_proto_if.h [new file with mode: 0644]
drivers/net/qede/base/ecore_rt_defs.h [new file with mode: 0644]
drivers/net/qede/base/ecore_sp_api.h [new file with mode: 0644]
drivers/net/qede/base/ecore_sp_commands.c [new file with mode: 0644]
drivers/net/qede/base/ecore_sp_commands.h [new file with mode: 0644]
drivers/net/qede/base/ecore_spq.c [new file with mode: 0644]
drivers/net/qede/base/ecore_spq.h [new file with mode: 0644]
drivers/net/qede/base/ecore_status.h [new file with mode: 0644]
drivers/net/qede/base/ecore_utils.h [new file with mode: 0644]
drivers/net/qede/base/eth_common.h [new file with mode: 0644]
drivers/net/qede/base/mcp_public.h [new file with mode: 0644]
drivers/net/qede/base/nvm_cfg.h [new file with mode: 0644]
drivers/net/qede/base/reg_addr.h [new file with mode: 0644]

index 1953ea2..b673cc7 100644 (file)
@@ -332,6 +332,12 @@ M: Rasesh Mody <rasesh.mody@qlogic.com>
 F: drivers/net/bnx2x/
 F: doc/guides/nics/bnx2x.rst
 
+QLogic qede PMD
+M: Harish Patil <harish.patil@qlogic.com>
+M: Rasesh Mody <rasesh.mody@qlogic.com>
+M: Sony Chacko <sony.chacko@qlogic.com>
+F: drivers/net/qede/
+
 RedHat virtio
 M: Huawei Xie <huawei.xie@intel.com>
 M: Yuanhan Liu <yuanhan.liu@linux.intel.com>
diff --git a/drivers/net/qede/LICENSE.qede_pmd b/drivers/net/qede/LICENSE.qede_pmd
new file mode 100644 (file)
index 0000000..c7cbdcc
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of QLogic Corporation nor the name of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written consent.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
diff --git a/drivers/net/qede/Makefile b/drivers/net/qede/Makefile
new file mode 100644 (file)
index 0000000..febd41d
--- /dev/null
@@ -0,0 +1,81 @@
+#    Copyright (c) 2016 QLogic Corporation.
+#    All rights reserved.
+#    www.qlogic.com
+#
+#    See LICENSE.qede_pmd for copyright and licensing details.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+#
+# OS
+#
+OS_TYPE := $(shell uname -s)
+
+#
+# CFLAGS
+#
+CFLAGS_BASE_DRIVER = -Wno-unused-parameter
+CFLAGS_BASE_DRIVER += -Wno-unused-value
+CFLAGS_BASE_DRIVER += -Wno-sign-compare
+CFLAGS_BASE_DRIVER += -Wno-missing-prototypes
+CFLAGS_BASE_DRIVER += -Wno-cast-qual
+CFLAGS_BASE_DRIVER += -Wno-unused-function
+CFLAGS_BASE_DRIVER += -Wno-unused-variable
+CFLAGS_BASE_DRIVER += -Wno-strict-aliasing
+CFLAGS_BASE_DRIVER += -Wno-missing-prototypes
+CFLAGS_BASE_DRIVER += -Wno-format-nonliteral
+ifeq ($(OS_TYPE),Linux)
+ifeq ($(shell clang -Wno-shift-negative-value -Werror -E - < /dev/null > /dev/null 2>&1; echo $$?),0)
+CFLAGS_BASE_DRIVER += -Wno-shift-negative-value
+endif
+endif
+
+ifneq (,$(filter gcc gcc48,$(CC)))
+CFLAGS_BASE_DRIVER += -Wno-unused-but-set-variable
+CFLAGS_BASE_DRIVER += -Wno-missing-declarations
+CFLAGS_BASE_DRIVER += -Wno-maybe-uninitialized
+CFLAGS_BASE_DRIVER += -Wno-strict-prototypes
+else ifeq ($(CC), clang)
+CFLAGS_BASE_DRIVER += -Wno-format-extra-args
+CFLAGS_BASE_DRIVER += -Wno-visibility
+CFLAGS_BASE_DRIVER += -Wno-empty-body
+CFLAGS_BASE_DRIVER += -Wno-invalid-source-encoding
+CFLAGS_BASE_DRIVER += -Wno-sometimes-uninitialized
+ifeq ($(shell clang -Wno-pointer-bool-conversion -Werror -E - < /dev/null > /dev/null 2>&1; echo $$?),0)
+CFLAGS_BASE_DRIVER += -Wno-pointer-bool-conversion
+endif
+else
+#icc flags
+endif
+
+#
+# Add extra flags for base ecore driver files
+# to disable warnings in them
+#
+#
+BASE_DRIVER_OBJS=$(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c)))
+$(foreach obj, $(BASE_DRIVER_OBJS), $(eval CFLAGS+=$(CFLAGS_BASE_DRIVER)))
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_dev.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_hw.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_cxt.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_sp_commands.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_init_fw_funcs.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_spq.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_init_ops.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_mcp.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_int.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/bcm_osal.c
+
+# dependent libs:
+DEPDIRS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += lib/librte_eal lib/librte_ether
+DEPDIRS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += lib/librte_mempool lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += lib/librte_net lib/librte_malloc
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/qede/base/bcm_osal.c b/drivers/net/qede/base/bcm_osal.c
new file mode 100644 (file)
index 0000000..f46f31b
--- /dev/null
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include <zlib.h>
+
+#include <rte_memzone.h>
+#include <rte_errno.h>
+
+#include "bcm_osal.h"
+#include "ecore.h"
+#include "ecore_hw.h"
+
+unsigned long qede_log2_align(unsigned long n)
+{
+       unsigned long ret = n ? 1 : 0;
+       unsigned long _n = n >> 1;
+
+       while (_n) {
+               _n >>= 1;
+               ret <<= 1;
+       }
+
+       if (ret < n)
+               ret <<= 1;
+
+       return ret;
+}
+
+u32 qede_osal_log2(u32 val)
+{
+       u32 log = 0;
+
+       while (val >>= 1)
+               log++;
+
+       return log;
+}
+
+inline void qede_set_bit(u32 nr, unsigned long *addr)
+{
+       __sync_fetch_and_or(addr, (1UL << nr));
+}
+
+inline void qede_clr_bit(u32 nr, unsigned long *addr)
+{
+       __sync_fetch_and_and(addr, ~(1UL << nr));
+}
+
+inline bool qede_test_bit(u32 nr, unsigned long *addr)
+{
+       bool res;
+
+       rte_mb();
+       res = ((*addr) & (1UL << nr)) != 0;
+       rte_mb();
+       return res;
+}
+
+static inline u32 qede_ffz(unsigned long word)
+{
+       unsigned long first_zero;
+
+       first_zero = __builtin_ffsl(~word);
+       return first_zero ? (first_zero - 1) : OSAL_BITS_PER_UL;
+}
+
+inline u32 qede_find_first_zero_bit(unsigned long *addr, u32 limit)
+{
+       u32 i;
+       u32 nwords = 0;
+       OSAL_BUILD_BUG_ON(!limit);
+       nwords = (limit - 1) / OSAL_BITS_PER_UL + 1;
+       for (i = 0; i < nwords; i++)
+               if (~(addr[i] != 0))
+                       break;
+       return (i == nwords) ? limit : i * OSAL_BITS_PER_UL + qede_ffz(addr[i]);
+}
+
+void *osal_dma_alloc_coherent(struct ecore_dev *p_dev,
+                             dma_addr_t *phys, size_t size)
+{
+       const struct rte_memzone *mz;
+       char mz_name[RTE_MEMZONE_NAMESIZE];
+       uint32_t core_id = rte_lcore_id();
+       unsigned int socket_id;
+
+       OSAL_MEM_ZERO(mz_name, sizeof(*mz_name));
+       snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
+                                       (unsigned long)rte_get_timer_cycles());
+       if (core_id == (unsigned int)LCORE_ID_ANY)
+               core_id = 0;
+       socket_id = rte_lcore_to_socket_id(core_id);
+       mz = rte_memzone_reserve_aligned(mz_name, size,
+                                        socket_id, 0, RTE_CACHE_LINE_SIZE);
+       if (!mz) {
+               DP_ERR(p_dev, "Unable to allocate DMA memory "
+                      "of size %zu bytes - %s\n",
+                      size, rte_strerror(rte_errno));
+               *phys = 0;
+               return OSAL_NULL;
+       }
+       *phys = mz->phys_addr;
+       DP_VERBOSE(p_dev, ECORE_MSG_PROBE,
+                  "size=%zu phys=0x%lx virt=%p on socket=%u\n",
+                  mz->len, mz->phys_addr, mz->addr, socket_id);
+       return mz->addr;
+}
+
+void *osal_dma_alloc_coherent_aligned(struct ecore_dev *p_dev,
+                                     dma_addr_t *phys, size_t size, int align)
+{
+       const struct rte_memzone *mz;
+       char mz_name[RTE_MEMZONE_NAMESIZE];
+       uint32_t core_id = rte_lcore_id();
+       unsigned int socket_id;
+
+       OSAL_MEM_ZERO(mz_name, sizeof(*mz_name));
+       snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
+                                       (unsigned long)rte_get_timer_cycles());
+       if (core_id == (unsigned int)LCORE_ID_ANY)
+               core_id = 0;
+       socket_id = rte_lcore_to_socket_id(core_id);
+       mz = rte_memzone_reserve_aligned(mz_name, size, socket_id, 0, align);
+       if (!mz) {
+               DP_ERR(p_dev, "Unable to allocate DMA memory "
+                      "of size %zu bytes - %s\n",
+                      size, rte_strerror(rte_errno));
+               *phys = 0;
+               return OSAL_NULL;
+       }
+       *phys = mz->phys_addr;
+       DP_VERBOSE(p_dev, ECORE_MSG_PROBE,
+                  "aligned memory size=%zu phys=0x%lx virt=%p core=%d\n",
+                  mz->len, mz->phys_addr, mz->addr, core_id);
+       return mz->addr;
+}
+
+u32 qede_unzip_data(struct ecore_hwfn *p_hwfn, u32 input_len,
+                   u8 *input_buf, u32 max_size, u8 *unzip_buf)
+{
+       int rc;
+
+       p_hwfn->stream->next_in = input_buf;
+       p_hwfn->stream->avail_in = input_len;
+       p_hwfn->stream->next_out = unzip_buf;
+       p_hwfn->stream->avail_out = max_size;
+
+       rc = inflateInit2(p_hwfn->stream, MAX_WBITS);
+
+       if (rc != Z_OK) {
+               DP_ERR(p_hwfn,
+                          "zlib init failed, rc = %d\n", rc);
+               return 0;
+       }
+
+       rc = inflate(p_hwfn->stream, Z_FINISH);
+       inflateEnd(p_hwfn->stream);
+
+       if (rc != Z_OK && rc != Z_STREAM_END) {
+               DP_ERR(p_hwfn,
+                          "FW unzip error: %s, rc=%d\n", p_hwfn->stream->msg,
+                          rc);
+               return 0;
+       }
+
+       return p_hwfn->stream->total_out / 4;
+}
diff --git a/drivers/net/qede/base/bcm_osal.h b/drivers/net/qede/base/bcm_osal.h
new file mode 100644 (file)
index 0000000..f15242d
--- /dev/null
@@ -0,0 +1,389 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __BCM_OSAL_H
+#define __BCM_OSAL_H
+
+#include <rte_byteorder.h>
+#include <rte_spinlock.h>
+#include <rte_malloc.h>
+#include <rte_atomic.h>
+#include <rte_memcpy.h>
+#include <rte_log.h>
+#include <rte_cycles.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+
+/* Forward declaration */
+struct ecore_dev;
+struct ecore_hwfn;
+
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+#undef __BIG_ENDIAN
+#ifndef __LITTLE_ENDIAN
+#define __LITTLE_ENDIAN
+#endif
+#else
+#undef __LITTLE_ENDIAN
+#ifndef __BIG_ENDIAN
+#define __BIG_ENDIAN
+#endif
+#endif
+
+/* Memory Types */
+typedef uint8_t u8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef uint64_t u64;
+
+typedef int16_t s16;
+typedef int32_t s32;
+
+typedef u16 __le16;
+typedef u32 __le32;
+typedef u32 OSAL_BE32;
+
+#define osal_uintptr_t uintptr_t
+
+typedef phys_addr_t dma_addr_t;
+
+typedef rte_spinlock_t osal_spinlock_t;
+
+typedef void *osal_dpc_t;
+
+typedef size_t osal_size_t;
+
+typedef intptr_t osal_int_ptr_t;
+
+typedef int bool;
+#define true 1
+#define false 0
+
+#define nothing do {} while (0)
+
+/* Delays */
+
+#define DELAY(x) rte_delay_us(x)
+#define usec_delay(x) DELAY(x)
+#define msec_delay(x) DELAY(1000 * (x))
+#define OSAL_UDELAY(time) usec_delay(time)
+#define OSAL_MSLEEP(time) msec_delay(time)
+
+/* Memory allocations and deallocations */
+
+#define OSAL_NULL ((void *)0)
+#define OSAL_ALLOC(dev, GFP, size) rte_malloc("qede", size, 0)
+#define OSAL_ZALLOC(dev, GFP, size) rte_zmalloc("qede", size, 0)
+#define OSAL_CALLOC(dev, GFP, num, size) rte_calloc("qede", num, size, 0)
+#define OSAL_VALLOC(dev, size) rte_malloc("qede", size, 0)
+#define OSAL_FREE(dev, memory) rte_free((void *)memory)
+#define OSAL_VFREE(dev, memory) OSAL_FREE(dev, memory)
+#define OSAL_MEM_ZERO(mem, size) bzero(mem, size)
+#define OSAL_MEMCPY(dst, src, size) rte_memcpy(dst, src, size)
+#define OSAL_MEMCMP(s1, s2, size) memcmp(s1, s2, size)
+#define OSAL_MEMSET(dst, val, length) \
+       memset(dst, val, length)
+
+void *osal_dma_alloc_coherent(struct ecore_dev *, dma_addr_t *, size_t);
+
+void *osal_dma_alloc_coherent_aligned(struct ecore_dev *, dma_addr_t *,
+                                     size_t, int);
+
+#define OSAL_DMA_ALLOC_COHERENT(dev, phys, size) \
+       osal_dma_alloc_coherent(dev, phys, size)
+
+#define OSAL_DMA_ALLOC_COHERENT_ALIGNED(dev, phys, size, align) \
+       osal_dma_alloc_coherent_aligned(dev, phys, size, align)
+
+/* TODO: */
+#define OSAL_DMA_FREE_COHERENT(dev, virt, phys, size) nothing
+
+/* HW reads/writes */
+
+#define DIRECT_REG_RD(_dev, _reg_addr) \
+       (*((volatile u32 *) (_reg_addr)))
+
+#define REG_RD(_p_hwfn, _reg_offset) \
+       DIRECT_REG_RD(_p_hwfn,          \
+                       ((u8 *)(uintptr_t)(_p_hwfn->regview) + (_reg_offset)))
+
+#define DIRECT_REG_WR16(_reg_addr, _val) \
+       (*((volatile u16 *)(_reg_addr)) = _val)
+
+#define DIRECT_REG_WR(_dev, _reg_addr, _val) \
+       (*((volatile u32 *)(_reg_addr)) = _val)
+
+#define REG_WR(_p_hwfn, _reg_offset, _val) \
+       DIRECT_REG_WR(NULL,  \
+       ((u8 *)((uintptr_t)(_p_hwfn->regview)) + (_reg_offset)), (u32)_val)
+
+#define REG_WR16(_p_hwfn, _reg_offset, _val) \
+       DIRECT_REG_WR16(((u8 *)(uintptr_t)(_p_hwfn->regview) + \
+                       (_reg_offset)), (u16)_val)
+
+#define DOORBELL(_p_hwfn, _db_addr, _val) \
+       DIRECT_REG_WR(_p_hwfn, \
+            ((u8 *)(uintptr_t)(_p_hwfn->doorbells) + (_db_addr)), (u32)_val)
+
+/* Mutexes */
+
+typedef pthread_mutex_t osal_mutex_t;
+#define OSAL_MUTEX_RELEASE(lock) pthread_mutex_unlock(lock)
+#define OSAL_MUTEX_INIT(lock) pthread_mutex_init(lock, NULL)
+#define OSAL_MUTEX_ACQUIRE(lock) pthread_mutex_lock(lock)
+#define OSAL_MUTEX_ALLOC(hwfn, lock) nothing
+#define OSAL_MUTEX_DEALLOC(lock) nothing
+
+/* Spinlocks */
+
+#define OSAL_SPIN_LOCK_INIT(lock) rte_spinlock_init(lock)
+#define OSAL_SPIN_LOCK(lock) rte_spinlock_lock(lock)
+#define OSAL_SPIN_UNLOCK(lock) rte_spinlock_unlock(lock)
+#define OSAL_SPIN_LOCK_IRQSAVE(lock, flags) nothing
+#define OSAL_SPIN_UNLOCK_IRQSAVE(lock, flags) nothing
+#define OSAL_SPIN_LOCK_ALLOC(hwfn, lock) nothing
+#define OSAL_SPIN_LOCK_DEALLOC(lock) nothing
+
+/* DPC */
+
+#define OSAL_DPC_ALLOC(hwfn) OSAL_ALLOC(hwfn, GFP, sizeof(osal_dpc_t))
+#define OSAL_DPC_INIT(dpc, hwfn) nothing
+#define OSAL_POLL_MODE_DPC(hwfn) nothing
+
+/* Lists */
+
+#define OSAL_LIST_SPLICE_INIT(new_list, list) nothing
+#define OSAL_LIST_SPLICE_TAIL_INIT(new_list, list) nothing
+
+typedef struct _osal_list_entry_t {
+       struct _osal_list_entry_t *next, *prev;
+} osal_list_entry_t;
+
+typedef struct osal_list_t {
+       osal_list_entry_t *head, *tail;
+       unsigned long cnt;
+} osal_list_t;
+
+#define OSAL_LIST_INIT(list) \
+       do {                    \
+               (list)->head = NULL;  \
+               (list)->tail = NULL;  \
+               (list)->cnt  = 0;       \
+       } while (0)
+
+#define OSAL_LIST_PUSH_HEAD(entry, list)               \
+       do {                                            \
+               (entry)->prev = (osal_list_entry_t *)0;         \
+               (entry)->next = (list)->head;                   \
+               if ((list)->tail == (osal_list_entry_t *)0) {   \
+                       (list)->tail = (entry);                 \
+               } else {                                        \
+                       (list)->head->prev = (entry);           \
+               }                                               \
+               (list)->head = (entry);                         \
+               (list)->cnt++;                                  \
+       } while (0)
+
+#define OSAL_LIST_PUSH_TAIL(entry, list)       \
+       do {                                    \
+               (entry)->next = (osal_list_entry_t *)0; \
+               (entry)->prev = (list)->tail;           \
+               if ((list)->tail) {                     \
+                       (list)->tail->next = (entry);   \
+               } else {                                \
+                       (list)->head = (entry);         \
+               }                                       \
+               (list)->tail = (entry);                 \
+               (list)->cnt++;                          \
+       } while (0)
+
+#define OSAL_LIST_FIRST_ENTRY(list, type, field) \
+       (type *)((list)->head)
+
+#define OSAL_LIST_REMOVE_ENTRY(entry, list)                    \
+       do {                                                    \
+               if ((list)->head == (entry)) {                          \
+                       if ((list)->head) {                             \
+                               (list)->head = (list)->head->next;      \
+                       if ((list)->head) {                             \
+                               (list)->head->prev = (osal_list_entry_t *)0;\
+                       } else {                                        \
+                               (list)->tail = (osal_list_entry_t *)0;  \
+                       }                                               \
+                       (list)->cnt--;                                  \
+                       }                                               \
+               } else if ((list)->tail == (entry)) {                   \
+                       if ((list)->tail) {                             \
+                               (list)->tail = (list)->tail->prev;      \
+                       if ((list)->tail) {                             \
+                               (list)->tail->next = (osal_list_entry_t *)0;\
+                       } else {                                        \
+                               (list)->head = (osal_list_entry_t *)0;  \
+                       }                                               \
+                       (list)->cnt--;                                  \
+                       }                                               \
+               } else {                                                \
+                       (entry)->prev->next = (entry)->next;            \
+                       (entry)->next->prev = (entry)->prev;            \
+                       (list)->cnt--;                                  \
+               }                                                       \
+       } while (0)
+
+#define OSAL_LIST_IS_EMPTY(list) \
+       ((list)->cnt == 0)
+
+#define OSAL_LIST_NEXT(entry, field, type) \
+       (type *)((&((entry)->field))->next)
+
+/* TODO: Check field, type order */
+
+#define OSAL_LIST_FOR_EACH_ENTRY(entry, list, field, type) \
+       for (entry = OSAL_LIST_FIRST_ENTRY(list, type, field); \
+               entry;                                          \
+               entry = OSAL_LIST_NEXT(entry, field, type))
+
+#define OSAL_LIST_FOR_EACH_ENTRY_SAFE(entry, tmp_entry, list, field, type) \
+        for (entry = OSAL_LIST_FIRST_ENTRY(list, type, field), \
+         tmp_entry = (entry) ? OSAL_LIST_NEXT(entry, field, type) : NULL;    \
+         entry != NULL;                                                \
+         entry = (type *)tmp_entry,                                     \
+         tmp_entry = (entry) ? OSAL_LIST_NEXT(entry, field, type) : NULL)
+
+/* TODO: OSAL_LIST_INSERT_ENTRY_AFTER */
+#define OSAL_LIST_INSERT_ENTRY_AFTER(new_entry, entry, list) \
+       OSAL_LIST_PUSH_HEAD(new_entry, list)
+
+/* PCI config space */
+
+#define OSAL_PCI_READ_CONFIG_BYTE(dev, address, dst) nothing
+#define OSAL_PCI_READ_CONFIG_WORD(dev, address, dst) nothing
+#define OSAL_PCI_READ_CONFIG_DWORD(dev, address, dst) nothing
+#define OSAL_PCI_FIND_EXT_CAPABILITY(dev, pcie_id) 0
+#define OSAL_PCI_FIND_CAPABILITY(dev, pcie_id) 0
+#define OSAL_PCI_WRITE_CONFIG_WORD(dev, address, val) nothing
+#define OSAL_BAR_SIZE(dev, bar_id) 0
+
+/* Barriers */
+
+#define OSAL_MMIOWB(dev)               rte_wmb()
+#define OSAL_BARRIER(dev)              rte_compiler_barrier()
+#define OSAL_SMP_RMB(dev)              rte_rmb()
+#define OSAL_SMP_WMB(dev)              rte_wmb()
+#define OSAL_RMB(dev)                  rte_rmb()
+#define OSAL_WMB(dev)                  rte_wmb()
+#define OSAL_DMA_SYNC(dev, addr, length, is_post) nothing
+
+#define OSAL_BITS_PER_BYTE             (8)
+#define OSAL_BITS_PER_UL       (sizeof(unsigned long) * OSAL_BITS_PER_BYTE)
+#define OSAL_BITS_PER_UL_MASK          (OSAL_BITS_PER_UL - 1)
+
+/* Bitops */
+void qede_set_bit(u32, unsigned long *);
+#define OSAL_SET_BIT(bit, bitmap) \
+       qede_set_bit(bit, bitmap)
+
+void qede_clr_bit(u32, unsigned long *);
+#define OSAL_CLEAR_BIT(bit, bitmap) \
+       qede_clr_bit(bit, bitmap)
+
+bool qede_test_bit(u32, unsigned long *);
+#define OSAL_TEST_BIT(bit, bitmap) \
+       qede_test_bit(bit, bitmap)
+
+u32 qede_find_first_zero_bit(unsigned long *, u32);
+#define OSAL_FIND_FIRST_ZERO_BIT(bitmap, length) \
+       qede_find_first_zero_bit(bitmap, length)
+
+#define OSAL_BUILD_BUG_ON(cond)                nothing
+#define ETH_ALEN                       ETHER_ADDR_LEN
+
+#define OSAL_LINK_UPDATE(hwfn) nothing
+
+/* SR-IOV channel */
+
+#define OSAL_VF_FLR_UPDATE(hwfn) nothing
+#define OSAL_VF_SEND_MSG2PF(dev, done, msg, reply_addr, msg_size, reply_size) 0
+#define OSAL_VF_CQE_COMPLETION(_dev_p, _cqe, _protocol)        (0)
+#define OSAL_PF_VF_MSG(hwfn, vfid) 0
+#define OSAL_IOV_CHK_UCAST(hwfn, vfid, params) 0
+#define OSAL_IOV_POST_START_VPORT(hwfn, vf, vport_id, opaque_fid) nothing
+#define OSAL_IOV_VF_ACQUIRE(hwfn, vfid) 0
+#define OSAL_IOV_VF_CLEANUP(hwfn, vfid) nothing
+#define OSAL_IOV_VF_VPORT_UPDATE(hwfn, vfid, p_params, p_mask) 0
+#define OSAL_VF_FILL_ACQUIRE_RESC_REQ(_dev_p, _resc_req, _os_info) nothing
+#define OSAL_VF_UPDATE_ACQUIRE_RESC_RESP(_dev_p, _resc_resp) 0
+#define OSAL_IOV_GET_OS_TYPE() 0
+
+u32 qede_unzip_data(struct ecore_hwfn *p_hwfn, u32 input_len,
+                  u8 *input_buf, u32 max_size, u8 *unzip_buf);
+
+#define OSAL_UNZIP_DATA(p_hwfn, input_len, buf, max_size, unzip_buf) \
+       qede_unzip_data(p_hwfn, input_len, buf, max_size, unzip_buf)
+
+/* TODO: */
+#define OSAL_SCHEDULE_RECOVERY_HANDLER(hwfn) nothing
+#define OSAL_HW_ERROR_OCCURRED(hwfn, err_type) nothing
+
+#define OSAL_NVM_IS_ACCESS_ENABLED(hwfn) (1)
+#define OSAL_NUM_ACTIVE_CPU()  0
+
+/* Utility functions */
+
+#define RTE_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#define DIV_ROUND_UP(size, to_what) RTE_DIV_ROUND_UP(size, to_what)
+#define RTE_ROUNDUP(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
+#define ROUNDUP(value, to_what) RTE_ROUNDUP((value), (to_what))
+
+unsigned long qede_log2_align(unsigned long n);
+#define OSAL_ROUNDUP_POW_OF_TWO(val) \
+       qede_log2_align(val)
+
+u32 qede_osal_log2(u32);
+#define OSAL_LOG2(val) \
+       qede_osal_log2(val)
+
+#define PRINT(format, ...) printf
+#define PRINT_ERR(format, ...) PRINT
+
+#define OFFSETOF(str, field) __builtin_offsetof(str, field)
+#define OSAL_ASSERT(is_assert) assert(is_assert)
+#define OSAL_BEFORE_PF_START(file, engine) nothing
+#define OSAL_AFTER_PF_STOP(file, engine) nothing
+
+/* Endian macros */
+#define OSAL_CPU_TO_BE32(val) rte_cpu_to_be_32(val)
+#define OSAL_BE32_TO_CPU(val) rte_be_to_cpu_32(val)
+#define OSAL_CPU_TO_LE32(val) rte_cpu_to_le_32(val)
+#define OSAL_CPU_TO_LE16(val) rte_cpu_to_le_16(val)
+#define OSAL_LE32_TO_CPU(val) rte_le_to_cpu_32(val)
+#define OSAL_LE16_TO_CPU(val) rte_le_to_cpu_16(val)
+#define OSAL_CPU_TO_BE64(val) rte_cpu_to_be_64(val)
+
+#define OSAL_ARRAY_SIZE(arr) RTE_DIM(arr)
+#define OSAL_SPRINTF(name, pattern, ...) \
+       sprintf(name, pattern, ##__VA_ARGS__)
+#define OSAL_STRLEN(string) strlen(string)
+#define OSAL_STRCPY(dst, string) strcpy(dst, string)
+#define OSAL_STRNCPY(dst, string, len) strncpy(dst, string, len)
+#define OSAL_STRCMP(str1, str2) strcmp(str1, str2)
+
+#define OSAL_INLINE inline
+#define OSAL_REG_ADDR(_p_hwfn, _offset) \
+               (void *)((u8 *)(uintptr_t)(_p_hwfn->regview) + (_offset))
+#define OSAL_PAGE_SIZE 4096
+#define OSAL_IOMEM volatile
+#define OSAL_UNLIKELY(x)  __builtin_expect(!!(x), 0)
+#define OSAL_MIN_T(type, __min1, __min2)       \
+       ((type)(__min1) < (type)(__min2) ? (type)(__min1) : (type)(__min2))
+#define OSAL_MAX_T(type, __max1, __max2)       \
+       ((type)(__max1) > (type)(__max2) ? (type)(__max1) : (type)(__max2))
+
+#define        OSAL_GET_PROTOCOL_STATS(p_hwfn, type, stats) (0)
+#define        OSAL_SLOWPATH_IRQ_REQ(p_hwfn) (0)
+
+#endif /* __BCM_OSAL_H */
diff --git a/drivers/net/qede/base/common_hsi.h b/drivers/net/qede/base/common_hsi.h
new file mode 100644 (file)
index 0000000..295a41f
--- /dev/null
@@ -0,0 +1,714 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __COMMON_HSI__
+#define __COMMON_HSI__
+
+#define CORE_SPQE_PAGE_SIZE_BYTES                       4096
+
+#define FW_MAJOR_VERSION       8
+#define FW_MINOR_VERSION       7
+#define FW_REVISION_VERSION    7
+#define FW_ENGINEERING_VERSION 0
+
+/***********************/
+/* COMMON HW CONSTANTS */
+/***********************/
+
+/* PCI functions */
+#define MAX_NUM_PORTS_K2       (4)
+#define MAX_NUM_PORTS_BB       (2)
+#define MAX_NUM_PORTS          (MAX_NUM_PORTS_K2)
+
+#define MAX_NUM_PFS_K2 (16)
+#define MAX_NUM_PFS_BB (8)
+#define MAX_NUM_PFS    (MAX_NUM_PFS_K2)
+#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */
+
+#define MAX_NUM_VFS_K2 (192)
+#define MAX_NUM_VFS_BB (120)
+#define MAX_NUM_VFS    (MAX_NUM_VFS_K2)
+
+#define MAX_NUM_FUNCTIONS_BB   (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB)
+#define MAX_NUM_FUNCTIONS      (MAX_NUM_PFS + MAX_NUM_VFS)
+
+#define MAX_FUNCTION_NUMBER_BB (MAX_NUM_PFS + MAX_NUM_VFS_BB)
+#define MAX_FUNCTION_NUMBER    (MAX_NUM_PFS + MAX_NUM_VFS)
+
+#define MAX_NUM_VPORTS_K2      (208)
+#define MAX_NUM_VPORTS_BB      (160)
+#define MAX_NUM_VPORTS         (MAX_NUM_VPORTS_K2)
+
+#define MAX_NUM_L2_QUEUES_K2   (320)
+#define MAX_NUM_L2_QUEUES_BB   (256)
+#define MAX_NUM_L2_QUEUES      (MAX_NUM_L2_QUEUES_K2)
+
+/* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */
+#define NUM_PHYS_TCS_4PORT_K2  (4)
+#define NUM_OF_PHYS_TCS                (8)
+
+#define NUM_TCS_4PORT_K2       (NUM_PHYS_TCS_4PORT_K2 + 1)
+#define NUM_OF_TCS             (NUM_OF_PHYS_TCS + 1)
+
+#define LB_TC                  (NUM_OF_PHYS_TCS)
+
+/* Num of possible traffic priority values */
+#define NUM_OF_PRIO            (8)
+
+#define MAX_NUM_VOQS_K2                (NUM_TCS_4PORT_K2 * MAX_NUM_PORTS_K2)
+#define MAX_NUM_VOQS_BB                (NUM_OF_TCS * MAX_NUM_PORTS_BB)
+#define MAX_NUM_VOQS           (MAX_NUM_VOQS_K2)
+#define MAX_PHYS_VOQS          (NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB)
+
+/* CIDs */
+#define NUM_OF_CONNECTION_TYPES        (8)
+#define NUM_OF_LCIDS           (320)
+#define NUM_OF_LTIDS           (320)
+
+/*****************/
+/* CDU CONSTANTS */
+/*****************/
+
+#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT              (17)
+#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK             (0x1ffff)
+
+/*****************/
+/* DQ CONSTANTS  */
+/*****************/
+
+/* DEMS */
+#define DQ_DEMS_LEGACY                 0
+
+/* XCM agg val selection */
+#define DQ_XCM_AGG_VAL_SEL_WORD2  0
+#define DQ_XCM_AGG_VAL_SEL_WORD3  1
+#define DQ_XCM_AGG_VAL_SEL_WORD4  2
+#define DQ_XCM_AGG_VAL_SEL_WORD5  3
+#define DQ_XCM_AGG_VAL_SEL_REG3   4
+#define DQ_XCM_AGG_VAL_SEL_REG4   5
+#define DQ_XCM_AGG_VAL_SEL_REG5   6
+#define DQ_XCM_AGG_VAL_SEL_REG6   7
+
+/* XCM agg val selection */
+#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD \
+       DQ_XCM_AGG_VAL_SEL_WORD2
+#define DQ_XCM_ETH_TX_BD_CONS_CMD \
+       DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_CORE_TX_BD_CONS_CMD \
+       DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_ETH_TX_BD_PROD_CMD \
+       DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_CORE_TX_BD_PROD_CMD \
+       DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_CORE_SPQ_PROD_CMD \
+       DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD            DQ_XCM_AGG_VAL_SEL_WORD5
+
+/* XCM agg counter flag selection */
+#define DQ_XCM_AGG_FLG_SHIFT_BIT14  0
+#define DQ_XCM_AGG_FLG_SHIFT_BIT15  1
+#define DQ_XCM_AGG_FLG_SHIFT_CF12   2
+#define DQ_XCM_AGG_FLG_SHIFT_CF13   3
+#define DQ_XCM_AGG_FLG_SHIFT_CF18   4
+#define DQ_XCM_AGG_FLG_SHIFT_CF19   5
+#define DQ_XCM_AGG_FLG_SHIFT_CF22   6
+#define DQ_XCM_AGG_FLG_SHIFT_CF23   7
+
+/* XCM agg counter flag selection */
+#define DQ_XCM_ETH_DQ_CF_CMD           (1 << \
+                                       DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_CORE_DQ_CF_CMD          (1 << \
+                                       DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_ETH_TERMINATE_CMD       (1 << \
+                                       DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_CORE_TERMINATE_CMD      (1 << \
+                                       DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_ETH_SLOW_PATH_CMD       (1 << \
+                                       DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_CORE_SLOW_PATH_CMD      (1 << \
+                                       DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ETH_TPH_EN_CMD          (1 << \
+                                       DQ_XCM_AGG_FLG_SHIFT_CF23)
+
+/*****************/
+/* QM CONSTANTS  */
+/*****************/
+
+/* number of TX queues in the QM */
+#define MAX_QM_TX_QUEUES_K2    512
+#define MAX_QM_TX_QUEUES_BB    448
+#define MAX_QM_TX_QUEUES       MAX_QM_TX_QUEUES_K2
+
+/* number of Other queues in the QM */
+#define MAX_QM_OTHER_QUEUES_BB 64
+#define MAX_QM_OTHER_QUEUES_K2 128
+#define MAX_QM_OTHER_QUEUES    MAX_QM_OTHER_QUEUES_K2
+
+/* number of queues in a PF queue group */
+#define QM_PF_QUEUE_GROUP_SIZE 8
+
+/* base number of Tx PQs in the CM PQ representation.
+ * should be used when storing PQ IDs in CM PQ registers and context
+ */
+#define CM_TX_PQ_BASE  0x200
+
+/* QM registers data */
+#define QM_LINE_CRD_REG_WIDTH          16
+#define QM_LINE_CRD_REG_SIGN_BIT       (1 << (QM_LINE_CRD_REG_WIDTH - 1))
+#define QM_BYTE_CRD_REG_WIDTH          24
+#define QM_BYTE_CRD_REG_SIGN_BIT       (1 << (QM_BYTE_CRD_REG_WIDTH - 1))
+#define QM_WFQ_CRD_REG_WIDTH           32
+#define QM_WFQ_CRD_REG_SIGN_BIT                (1 << (QM_WFQ_CRD_REG_WIDTH - 1))
+#define QM_RL_CRD_REG_WIDTH            32
+#define QM_RL_CRD_REG_SIGN_BIT         (1 << (QM_RL_CRD_REG_WIDTH - 1))
+
+/*****************/
+/* CAU CONSTANTS */
+/*****************/
+
+#define CAU_FSM_ETH_RX  0
+#define CAU_FSM_ETH_TX  1
+
+/* Number of Protocol Indices per Status Block */
+#define PIS_PER_SB    12
+
+#define CAU_HC_STOPPED_STATE   3
+#define CAU_HC_DISABLE_STATE   4
+#define CAU_HC_ENABLE_STATE    0
+
+/*****************/
+/* IGU CONSTANTS */
+/*****************/
+
+#define MAX_SB_PER_PATH_K2     (368)
+#define MAX_SB_PER_PATH_BB     (288)
+#define MAX_TOT_SB_PER_PATH \
+       MAX_SB_PER_PATH_K2
+
+#define MAX_SB_PER_PF_MIMD     129
+#define MAX_SB_PER_PF_SIMD     64
+#define MAX_SB_PER_VF          64
+
+/* Memory addresses on the BAR for the IGU Sub Block */
+#define IGU_MEM_BASE                   0x0000
+
+#define IGU_MEM_MSIX_BASE              0x0000
+#define IGU_MEM_MSIX_UPPER             0x0101
+#define IGU_MEM_MSIX_RESERVED_UPPER    0x01ff
+
+#define IGU_MEM_PBA_MSIX_BASE          0x0200
+#define IGU_MEM_PBA_MSIX_UPPER         0x0202
+#define IGU_MEM_PBA_MSIX_RESERVED_UPPER        0x03ff
+
+#define IGU_CMD_INT_ACK_BASE           0x0400
+#define IGU_CMD_INT_ACK_UPPER          (IGU_CMD_INT_ACK_BASE + \
+                                        MAX_TOT_SB_PER_PATH -  \
+                                        1)
+#define IGU_CMD_INT_ACK_RESERVED_UPPER 0x05ff
+
+#define IGU_CMD_ATTN_BIT_UPD_UPPER     0x05f0
+#define IGU_CMD_ATTN_BIT_SET_UPPER     0x05f1
+#define IGU_CMD_ATTN_BIT_CLR_UPPER     0x05f2
+
+#define IGU_REG_SISR_MDPC_WMASK_UPPER          0x05f3
+#define IGU_REG_SISR_MDPC_WMASK_LSB_UPPER      0x05f4
+#define IGU_REG_SISR_MDPC_WMASK_MSB_UPPER      0x05f5
+#define IGU_REG_SISR_MDPC_WOMASK_UPPER         0x05f6
+
+#define IGU_CMD_PROD_UPD_BASE                  0x0600
+#define IGU_CMD_PROD_UPD_UPPER                 (IGU_CMD_PROD_UPD_BASE +\
+                                                MAX_TOT_SB_PER_PATH - \
+                                                1)
+#define IGU_CMD_PROD_UPD_RESERVED_UPPER                0x07ff
+
+/*****************/
+/* PXP CONSTANTS */
+/*****************/
+
+/* PTT and GTT */
+#define PXP_NUM_PF_WINDOWS             12
+#define PXP_PER_PF_ENTRY_SIZE          8
+#define PXP_NUM_GLOBAL_WINDOWS         243
+#define PXP_GLOBAL_ENTRY_SIZE          4
+#define PXP_ADMIN_WINDOW_ALLOWED_LENGTH        4
+#define PXP_PF_WINDOW_ADMIN_START      0
+#define PXP_PF_WINDOW_ADMIN_LENGTH     0x1000
+#define PXP_PF_WINDOW_ADMIN_END                (PXP_PF_WINDOW_ADMIN_START + \
+                                        PXP_PF_WINDOW_ADMIN_LENGTH - 1)
+#define PXP_PF_WINDOW_ADMIN_PER_PF_START       0
+#define PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH      (PXP_NUM_PF_WINDOWS * \
+                                                PXP_PER_PF_ENTRY_SIZE)
+#define PXP_PF_WINDOW_ADMIN_PER_PF_END (PXP_PF_WINDOW_ADMIN_PER_PF_START + \
+                                        PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH - 1)
+#define PXP_PF_WINDOW_ADMIN_GLOBAL_START       0x200
+#define PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH      (PXP_NUM_GLOBAL_WINDOWS * \
+                                                PXP_GLOBAL_ENTRY_SIZE)
+#define PXP_PF_WINDOW_ADMIN_GLOBAL_END \
+               (PXP_PF_WINDOW_ADMIN_GLOBAL_START + \
+                PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH - 1)
+#define PXP_PF_GLOBAL_PRETEND_ADDR     0x1f0
+#define PXP_PF_ME_OPAQUE_MASK_ADDR     0xf4
+#define PXP_PF_ME_OPAQUE_ADDR          0x1f8
+#define PXP_PF_ME_CONCRETE_ADDR                0x1fc
+
+#define PXP_EXTERNAL_BAR_PF_WINDOW_START       0x1000
+#define PXP_EXTERNAL_BAR_PF_WINDOW_NUM         PXP_NUM_PF_WINDOWS
+#define PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE 0x1000
+#define PXP_EXTERNAL_BAR_PF_WINDOW_LENGTH \
+       (PXP_EXTERNAL_BAR_PF_WINDOW_NUM * \
+        PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE)
+#define PXP_EXTERNAL_BAR_PF_WINDOW_END \
+       (PXP_EXTERNAL_BAR_PF_WINDOW_START + \
+        PXP_EXTERNAL_BAR_PF_WINDOW_LENGTH - 1)
+
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START \
+       (PXP_EXTERNAL_BAR_PF_WINDOW_END + 1)
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_NUM             PXP_NUM_GLOBAL_WINDOWS
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_SINGLE_SIZE     0x1000
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH \
+       (PXP_EXTERNAL_BAR_GLOBAL_WINDOW_NUM * \
+        PXP_EXTERNAL_BAR_GLOBAL_WINDOW_SINGLE_SIZE)
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_END \
+       (PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START + \
+        PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1)
+
+#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12
+#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER        1024
+
+/* ILT Records */
+#define PXP_NUM_ILT_RECORDS_BB 7600
+#define PXP_NUM_ILT_RECORDS_K2 11000
+#define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2)
+
+/******************/
+/* PBF CONSTANTS  */
+/******************/
+
+/* Number of PBF command queue lines. Each line is 32B. */
+#define PBF_MAX_CMD_LINES 3328
+
+/* Number of BTB blocks. Each block is 256B. */
+#define BTB_MAX_BLOCKS 1440
+
+/*****************/
+/* PRS CONSTANTS */
+/*****************/
+
+/* Async data KCQ CQE */
+struct async_data {
+       __le32  cid;
+       __le16  itid;
+       u8      error_code;
+       u8      fw_debug_param;
+};
+
+struct regpair {
+       __le32 lo /* low word for reg-pair */;
+       __le32 hi /* high word for reg-pair */;
+};
+
+struct vf_pf_channel_eqe_data {
+       struct regpair msg_addr /* VF-PF message address */;
+};
+
+struct iscsi_eqe_data {
+       __le32 cid /* Context ID of the connection */;
+       __le16 conn_id
+           /* Task Id of the task (for error that happened on a a task) */;
+       u8 error_code;
+       u8 reserved0;
+};
+
+/*
+ * Event Ring malicious VF data
+ */
+struct malicious_vf_eqe_data {
+       u8 vf_id /* Malicious VF ID */; /* WARNING:CAMELCASE */
+       u8 err_id /* Malicious VF error */;
+       __le16 reserved[3];
+};
+
+/*
+ * Event Ring initial cleanup data
+ */
+struct initial_cleanup_eqe_data {
+       u8 vf_id /* VF ID */; /* WARNING:CAMELCASE */
+       u8 reserved[7];
+};
+
+
+union event_ring_data {
+       u8 bytes[8] /* Byte Array */;
+       struct vf_pf_channel_eqe_data vf_pf_channel /* VF-PF Channel data */;
+       struct iscsi_eqe_data iscsi_info /* Dedicated fields to iscsi data */;
+       struct regpair roce_handle /* WARNING:CAMELCASE */
+           /* Dedicated field for RoCE affiliated asynchronous error */;
+       struct malicious_vf_eqe_data malicious_vf /* Malicious VF data */;
+       struct initial_cleanup_eqe_data vf_init_cleanup
+           /* VF Initial Cleanup data */;
+};
+/* Event Ring Entry */
+struct event_ring_entry {
+       u8                      protocol_id;
+       u8                      opcode;
+       __le16                  reserved0;
+       __le16                  echo;
+       u8                      fw_return_code;
+       u8                      flags;
+#define EVENT_RING_ENTRY_ASYNC_MASK      0x1
+#define EVENT_RING_ENTRY_ASYNC_SHIFT     0
+#define EVENT_RING_ENTRY_RESERVED1_MASK  0x7F
+#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1
+       union event_ring_data   data;
+};
+
+/* Multi function mode */
+enum mf_mode {
+       SF,
+       MF_OVLAN,
+       MF_NPAR,
+       MAX_MF_MODE
+};
+
+/* Per-protocol connection types */
+enum protocol_type {
+       PROTOCOLID_ISCSI /* iSCSI */,
+       PROTOCOLID_FCOE /* FCoE */,
+       PROTOCOLID_ROCE /* RoCE */,
+       PROTOCOLID_CORE /* Core (light L2, slow path core) */,
+       PROTOCOLID_ETH /* Ethernet */,
+       PROTOCOLID_IWARP /* iWARP */,
+       PROTOCOLID_TOE /* TOE */,
+       PROTOCOLID_PREROCE /* Pre (tapeout) RoCE */,
+       PROTOCOLID_COMMON /* ProtocolCommon */,
+       PROTOCOLID_TCP /* TCP */,
+       MAX_PROTOCOL_TYPE
+};
+
+/* status block structure */
+struct cau_pi_entry {
+       u32 prod;
+#define CAU_PI_ENTRY_PROD_VAL_MASK    0xFFFF
+#define CAU_PI_ENTRY_PROD_VAL_SHIFT   0
+#define CAU_PI_ENTRY_PI_TIMESET_MASK  0x7F
+#define CAU_PI_ENTRY_PI_TIMESET_SHIFT 16
+#define CAU_PI_ENTRY_FSM_SEL_MASK     0x1
+#define CAU_PI_ENTRY_FSM_SEL_SHIFT    23
+#define CAU_PI_ENTRY_RESERVED_MASK    0xFF
+#define CAU_PI_ENTRY_RESERVED_SHIFT   24
+};
+
+/* status block structure */
+struct cau_sb_entry {
+       u32 data;
+#define CAU_SB_ENTRY_SB_PROD_MASK      0xFFFFFF
+#define CAU_SB_ENTRY_SB_PROD_SHIFT     0
+#define CAU_SB_ENTRY_STATE0_MASK       0xF
+#define CAU_SB_ENTRY_STATE0_SHIFT      24
+#define CAU_SB_ENTRY_STATE1_MASK       0xF
+#define CAU_SB_ENTRY_STATE1_SHIFT      28
+       u32 params;
+#define CAU_SB_ENTRY_SB_TIMESET0_MASK  0x7F
+#define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0
+#define CAU_SB_ENTRY_SB_TIMESET1_MASK  0x7F
+#define CAU_SB_ENTRY_SB_TIMESET1_SHIFT 7
+#define CAU_SB_ENTRY_TIMER_RES0_MASK   0x3
+#define CAU_SB_ENTRY_TIMER_RES0_SHIFT  14
+#define CAU_SB_ENTRY_TIMER_RES1_MASK   0x3
+#define CAU_SB_ENTRY_TIMER_RES1_SHIFT  16
+#define CAU_SB_ENTRY_VF_NUMBER_MASK    0xFF
+#define CAU_SB_ENTRY_VF_NUMBER_SHIFT   18
+#define CAU_SB_ENTRY_VF_VALID_MASK     0x1
+#define CAU_SB_ENTRY_VF_VALID_SHIFT    26
+#define CAU_SB_ENTRY_PF_NUMBER_MASK    0xF
+#define CAU_SB_ENTRY_PF_NUMBER_SHIFT   27
+#define CAU_SB_ENTRY_TPH_MASK          0x1
+#define CAU_SB_ENTRY_TPH_SHIFT         31
+};
+
+/* core doorbell data */
+struct core_db_data {
+       u8 params;
+#define CORE_DB_DATA_DEST_MASK         0x3
+#define CORE_DB_DATA_DEST_SHIFT        0
+#define CORE_DB_DATA_AGG_CMD_MASK      0x3
+#define CORE_DB_DATA_AGG_CMD_SHIFT     2
+#define CORE_DB_DATA_BYPASS_EN_MASK    0x1
+#define CORE_DB_DATA_BYPASS_EN_SHIFT   4
+#define CORE_DB_DATA_RESERVED_MASK     0x1
+#define CORE_DB_DATA_RESERVED_SHIFT    5
+#define CORE_DB_DATA_AGG_VAL_SEL_MASK  0x3
+#define CORE_DB_DATA_AGG_VAL_SEL_SHIFT 6
+       u8      agg_flags;
+       __le16  spq_prod;
+};
+
+/* Enum of doorbell aggregative command selection */
+enum db_agg_cmd_sel {
+       DB_AGG_CMD_NOP,
+       DB_AGG_CMD_SET,
+       DB_AGG_CMD_ADD,
+       DB_AGG_CMD_MAX,
+       MAX_DB_AGG_CMD_SEL
+};
+
+/* Enum of doorbell destination */
+enum db_dest {
+       DB_DEST_XCM,
+       DB_DEST_UCM,
+       DB_DEST_TCM,
+       DB_NUM_DESTINATIONS,
+       MAX_DB_DEST
+};
+
+/* Structure for doorbell address, in legacy mode */
+struct db_legacy_addr {
+       __le32 addr;
+#define DB_LEGACY_ADDR_RESERVED0_MASK  0x3
+#define DB_LEGACY_ADDR_RESERVED0_SHIFT 0
+#define DB_LEGACY_ADDR_DEMS_MASK       0x7
+#define DB_LEGACY_ADDR_DEMS_SHIFT      2
+#define DB_LEGACY_ADDR_ICID_MASK       0x7FFFFFF
+#define DB_LEGACY_ADDR_ICID_SHIFT      5
+};
+
+/* Igu interrupt command */
+enum igu_int_cmd {
+       IGU_INT_ENABLE  = 0,
+       IGU_INT_DISABLE = 1,
+       IGU_INT_NOP     = 2,
+       IGU_INT_NOP2    = 3,
+       MAX_IGU_INT_CMD
+};
+
+/* IGU producer or consumer update command */
+struct igu_prod_cons_update {
+       u32 sb_id_and_flags;
+#define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK        0xFFFFFF
+#define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT       0
+#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK     0x1
+#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT    24
+#define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK      0x3
+#define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT     25
+#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK  0x1
+#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT 27
+#define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK      0x1
+#define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT     28
+#define IGU_PROD_CONS_UPDATE_RESERVED0_MASK       0x3
+#define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT      29
+#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK    0x1
+#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT   31
+       u32 reserved1;
+};
+
+/* Igu segments access for default status block only */
+enum igu_seg_access {
+       IGU_SEG_ACCESS_REG      = 0,
+       IGU_SEG_ACCESS_ATTN     = 1,
+       MAX_IGU_SEG_ACCESS
+};
+
+struct parsing_and_err_flags {
+       __le16 flags;
+#define PARSING_AND_ERR_FLAGS_L3TYPE_MASK                      0x3
+#define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT                     0
+#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK                  0x3
+#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT                 2
+#define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK                    0x1
+#define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT                   4
+#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK               0x1
+#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT              5
+#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK        0x1
+#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT       6
+#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK                 0x1
+#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT                7
+#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK           0x1
+#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT          8
+#define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK                  0x1
+#define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT                 9
+#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK                0x1
+#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT               10
+#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK                 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT                11
+#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK         0x1
+#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT        12
+#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK            0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT           13
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK  0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK          0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT         15
+};
+
+/* Concrete Function ID. */
+struct pxp_concrete_fid {
+       __le16 fid;
+#define PXP_CONCRETE_FID_PFID_MASK     0xF
+#define PXP_CONCRETE_FID_PFID_SHIFT    0
+#define PXP_CONCRETE_FID_PORT_MASK     0x3
+#define PXP_CONCRETE_FID_PORT_SHIFT    4
+#define PXP_CONCRETE_FID_PATH_MASK     0x1
+#define PXP_CONCRETE_FID_PATH_SHIFT    6
+#define PXP_CONCRETE_FID_VFVALID_MASK  0x1
+#define PXP_CONCRETE_FID_VFVALID_SHIFT 7
+#define PXP_CONCRETE_FID_VFID_MASK     0xFF
+#define PXP_CONCRETE_FID_VFID_SHIFT    8
+};
+
+struct pxp_pretend_concrete_fid {
+       __le16 fid;
+#define PXP_PRETEND_CONCRETE_FID_PFID_MASK      0xF
+#define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT     0
+#define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK  0x7
+#define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT 4
+#define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK   0x1
+#define PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT  7
+#define PXP_PRETEND_CONCRETE_FID_VFID_MASK      0xFF
+#define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT     8
+};
+
+union pxp_pretend_fid {
+       struct pxp_pretend_concrete_fid concrete_fid;
+       __le16                          opaque_fid;
+};
+
+/* Pxp Pretend Command Register. */
+struct pxp_pretend_cmd {
+       union pxp_pretend_fid   fid;
+       __le16                  control;
+#define PXP_PRETEND_CMD_PATH_MASK              0x1
+#define PXP_PRETEND_CMD_PATH_SHIFT             0
+#define PXP_PRETEND_CMD_USE_PORT_MASK          0x1
+#define PXP_PRETEND_CMD_USE_PORT_SHIFT         1
+#define PXP_PRETEND_CMD_PORT_MASK              0x3
+#define PXP_PRETEND_CMD_PORT_SHIFT             2
+#define PXP_PRETEND_CMD_RESERVED0_MASK         0xF
+#define PXP_PRETEND_CMD_RESERVED0_SHIFT        4
+#define PXP_PRETEND_CMD_RESERVED1_MASK         0xF
+#define PXP_PRETEND_CMD_RESERVED1_SHIFT        8
+#define PXP_PRETEND_CMD_PRETEND_PATH_MASK      0x1
+#define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT     12
+#define PXP_PRETEND_CMD_PRETEND_PORT_MASK      0x1
+#define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT     13
+#define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK  0x1
+#define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT 14
+#define PXP_PRETEND_CMD_IS_CONCRETE_MASK       0x1
+#define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT      15
+};
+
+/* PTT Record in PXP Admin Window. */
+struct pxp_ptt_entry {
+       __le32                  offset;
+#define PXP_PTT_ENTRY_OFFSET_MASK     0x7FFFFF
+#define PXP_PTT_ENTRY_OFFSET_SHIFT    0
+#define PXP_PTT_ENTRY_RESERVED0_MASK  0x1FF
+#define PXP_PTT_ENTRY_RESERVED0_SHIFT 23
+       struct pxp_pretend_cmd  pretend;
+};
+
+/* RSS hash type */
+enum rss_hash_type {
+       RSS_HASH_TYPE_DEFAULT   = 0,
+       RSS_HASH_TYPE_IPV4      = 1,
+       RSS_HASH_TYPE_TCP_IPV4  = 2,
+       RSS_HASH_TYPE_IPV6      = 3,
+       RSS_HASH_TYPE_TCP_IPV6  = 4,
+       RSS_HASH_TYPE_UDP_IPV4  = 5,
+       RSS_HASH_TYPE_UDP_IPV6  = 6,
+       MAX_RSS_HASH_TYPE
+};
+
+/* status block structure */
+struct status_block {
+       __le16  pi_array[PIS_PER_SB];
+       __le32  sb_num;
+#define STATUS_BLOCK_SB_NUM_MASK      0x1FF
+#define STATUS_BLOCK_SB_NUM_SHIFT     0
+#define STATUS_BLOCK_ZERO_PAD_MASK    0x7F
+#define STATUS_BLOCK_ZERO_PAD_SHIFT   9
+#define STATUS_BLOCK_ZERO_PAD2_MASK   0xFFFF
+#define STATUS_BLOCK_ZERO_PAD2_SHIFT  16
+       __le32 prod_index;
+#define STATUS_BLOCK_PROD_INDEX_MASK  0xFFFFFF
+#define STATUS_BLOCK_PROD_INDEX_SHIFT 0
+#define STATUS_BLOCK_ZERO_PAD3_MASK   0xFF
+#define STATUS_BLOCK_ZERO_PAD3_SHIFT  24
+};
+
+/* @DPDK */
+#define X_FINAL_CLEANUP_AGG_INT  1
+#define SDM_COMP_TYPE_AGG_INT 2
+#define MAX_NUM_LL2_RX_QUEUES 32
+#define QM_PQ_ELEMENT_SIZE 4
+#define PXP_VF_BAR0_START_IGU 0
+#define EAGLE_ENG1_WORKAROUND_NIG_FLOWCTRL_MODE 3
+
+#define TSTORM_QZONE_SIZE 8
+#define MSTORM_QZONE_SIZE 16
+#define USTORM_QZONE_SIZE 8
+#define XSTORM_QZONE_SIZE 0
+#define YSTORM_QZONE_SIZE 8
+#define PSTORM_QZONE_SIZE 0
+
+/* VF BAR */
+#define PXP_VF_BAR0 0
+
+#define PXP_VF_BAR0_START_GRC          0x3E00
+#define PXP_VF_BAR0_GRC_LENGTH         0x200
+#define PXP_VF_BAR0_END_GRC \
+(PXP_VF_BAR0_START_GRC + PXP_VF_BAR0_GRC_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_IGU          0
+#define PXP_VF_BAR0_IGU_LENGTH         0x3000
+#define PXP_VF_BAR0_END_IGU \
+(PXP_VF_BAR0_START_IGU + PXP_VF_BAR0_IGU_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_DQ           0x3000
+#define PXP_VF_BAR0_DQ_LENGTH          0x200
+#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET    0
+#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS \
+(PXP_VF_BAR0_START_DQ + PXP_VF_BAR0_DQ_OPAQUE_OFFSET)
+#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS \
+(PXP_VF_BAR0_ME_OPAQUE_ADDRESS + 4)
+#define PXP_VF_BAR0_END_DQ \
+(PXP_VF_BAR0_START_DQ + PXP_VF_BAR0_DQ_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_TSDM_ZONE_B   0x3200
+#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B   0x200
+#define PXP_VF_BAR0_END_TSDM_ZONE_B \
+(PXP_VF_BAR0_START_TSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_MSDM_ZONE_B   0x3400
+#define PXP_VF_BAR0_END_MSDM_ZONE_B \
+(PXP_VF_BAR0_START_MSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_USDM_ZONE_B   0x3600
+#define PXP_VF_BAR0_END_USDM_ZONE_B \
+(PXP_VF_BAR0_START_USDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_XSDM_ZONE_B   0x3800
+#define PXP_VF_BAR0_END_XSDM_ZONE_B \
+(PXP_VF_BAR0_START_XSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_YSDM_ZONE_B   0x3a00
+#define PXP_VF_BAR0_END_YSDM_ZONE_B \
+(PXP_VF_BAR0_START_YSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_PSDM_ZONE_B   0x3c00
+#define PXP_VF_BAR0_END_PSDM_ZONE_B \
+(PXP_VF_BAR0_START_PSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_SDM_ZONE_A    0x4000
+#define PXP_VF_BAR0_END_SDM_ZONE_A      0x10000
+
+#define PXP_VF_BAR0_GRC_WINDOW_LENGTH   32
+
+#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN  12
+#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024
+
+#endif /* __COMMON_HSI__ */
diff --git a/drivers/net/qede/base/ecore.h b/drivers/net/qede/base/ecore.h
new file mode 100644 (file)
index 0000000..32a3de9
--- /dev/null
@@ -0,0 +1,742 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_H
+#define __ECORE_H
+
+#include "ecore_hsi_common.h"
+#include "ecore_hsi_tools.h"
+#include "ecore_proto_if.h"
+#include "mcp_public.h"
+
+#define MAX_HWFNS_PER_DEVICE   (4)
+#define NAME_SIZE 64           /* @DPDK */
+#define VER_SIZE 16
+/* @DPDK ARRAY_DECL */
+#define ECORE_WFQ_UNIT 100
+#include "../qede_logs.h"      /* @DPDK */
+
+/* Constants */
+#define ECORE_WID_SIZE         (1024)
+
+/* Configurable */
+#define ECORE_PF_DEMS_SIZE     (4)
+
+/* cau states */
+enum ecore_coalescing_mode {
+       ECORE_COAL_MODE_DISABLE,
+       ECORE_COAL_MODE_ENABLE
+};
+
+enum ecore_nvm_cmd {
+       ECORE_PUT_FILE_BEGIN = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN,
+       ECORE_PUT_FILE_DATA = DRV_MSG_CODE_NVM_PUT_FILE_DATA,
+       ECORE_NVM_READ_NVRAM = DRV_MSG_CODE_NVM_READ_NVRAM,
+       ECORE_NVM_WRITE_NVRAM = DRV_MSG_CODE_NVM_WRITE_NVRAM,
+       ECORE_NVM_DEL_FILE = DRV_MSG_CODE_NVM_DEL_FILE,
+       ECORE_NVM_SET_SECURE_MODE = DRV_MSG_CODE_SET_SECURE_MODE,
+       ECORE_PHY_RAW_READ = DRV_MSG_CODE_PHY_RAW_READ,
+       ECORE_PHY_RAW_WRITE = DRV_MSG_CODE_PHY_RAW_WRITE,
+       ECORE_PHY_CORE_READ = DRV_MSG_CODE_PHY_CORE_READ,
+       ECORE_PHY_CORE_WRITE = DRV_MSG_CODE_PHY_CORE_WRITE,
+       ECORE_GET_MCP_NVM_RESP = 0xFFFFFF00
+};
+
+#ifndef LINUX_REMOVE
+#if !defined(CONFIG_ECORE_L2)
+#define CONFIG_ECORE_L2
+#endif
+#endif
+
+/* helpers */
+#ifndef __EXTRACT__LINUX__
+#define MASK_FIELD(_name, _value)                                      \
+               ((_value) &= (_name##_MASK))
+
+#define FIELD_VALUE(_name, _value)                                     \
+               ((_value & _name##_MASK) << _name##_SHIFT)
+
+#define SET_FIELD(value, name, flag)                                   \
+do {                                                                   \
+       (value) &= ~(name##_MASK << name##_SHIFT);                      \
+       (value) |= (((u64)flag) << (name##_SHIFT));                     \
+} while (0)
+
+#define GET_FIELD(value, name)                                         \
+       (((value) >> (name##_SHIFT)) & name##_MASK)
+#endif
+
+static OSAL_INLINE u32 DB_ADDR(u32 cid, u32 DEMS)
+{
+       u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
+           (cid * ECORE_PF_DEMS_SIZE);
+
+       return db_addr;
+}
+
+#define ALIGNED_TYPE_SIZE(type_name, p_hwfn)                             \
+       ((sizeof(type_name) + (u32)(1 << (p_hwfn->p_dev->cache_shift)) - 1) & \
+        ~((1 << (p_hwfn->p_dev->cache_shift)) - 1))
+
+#ifndef U64_HI
+#define U64_HI(val) ((u32)(((u64)(val))  >> 32))
+#endif
+
+#ifndef U64_LO
+#define U64_LO(val) ((u32)(((u64)(val)) & 0xffffffff))
+#endif
+
+#ifndef __EXTRACT__LINUX__
+enum DP_LEVEL {
+       ECORE_LEVEL_VERBOSE = 0x0,
+       ECORE_LEVEL_INFO = 0x1,
+       ECORE_LEVEL_NOTICE = 0x2,
+       ECORE_LEVEL_ERR = 0x3,
+};
+
+#define ECORE_LOG_LEVEL_SHIFT  (30)
+#define ECORE_LOG_VERBOSE_MASK (0x3fffffff)
+#define ECORE_LOG_INFO_MASK    (0x40000000)
+#define ECORE_LOG_NOTICE_MASK  (0x80000000)
+
+enum DP_MODULE {
+#ifndef LINUX_REMOVE
+       ECORE_MSG_DRV = 0x0001,
+       ECORE_MSG_PROBE = 0x0002,
+       ECORE_MSG_LINK = 0x0004,
+       ECORE_MSG_TIMER = 0x0008,
+       ECORE_MSG_IFDOWN = 0x0010,
+       ECORE_MSG_IFUP = 0x0020,
+       ECORE_MSG_RX_ERR = 0x0040,
+       ECORE_MSG_TX_ERR = 0x0080,
+       ECORE_MSG_TX_QUEUED = 0x0100,
+       ECORE_MSG_INTR = 0x0200,
+       ECORE_MSG_TX_DONE = 0x0400,
+       ECORE_MSG_RX_STATUS = 0x0800,
+       ECORE_MSG_PKTDATA = 0x1000,
+       ECORE_MSG_HW = 0x2000,
+       ECORE_MSG_WOL = 0x4000,
+#endif
+       ECORE_MSG_SPQ = 0x10000,
+       ECORE_MSG_STATS = 0x20000,
+       ECORE_MSG_DCB = 0x40000,
+       ECORE_MSG_IOV = 0x80000,
+       ECORE_MSG_SP = 0x100000,
+       ECORE_MSG_STORAGE = 0x200000,
+       ECORE_MSG_CXT = 0x800000,
+       ECORE_MSG_ILT = 0x2000000,
+       ECORE_MSG_DEBUG = 0x8000000,
+       /* to be added...up to 0x8000000 */
+};
+#endif
+
+#define for_each_hwfn(p_dev, i)        for (i = 0; i < p_dev->num_hwfns; i++)
+
+#define D_TRINE(val, cond1, cond2, true1, true2, def) \
+       (val == (cond1) ? true1 : \
+        (val == (cond2) ? true2 : def))
+
+/* forward */
+struct ecore_ptt_pool;
+struct ecore_spq;
+struct ecore_sb_info;
+struct ecore_sb_attn_info;
+struct ecore_cxt_mngr;
+struct ecore_dma_mem;
+struct ecore_sb_sp_info;
+struct ecore_igu_info;
+struct ecore_mcp_info;
+
+struct ecore_rt_data {
+       u32 *init_val;
+       bool *b_valid;
+};
+
+enum ecore_tunn_mode {
+       ECORE_MODE_L2GENEVE_TUNN,
+       ECORE_MODE_IPGENEVE_TUNN,
+       ECORE_MODE_L2GRE_TUNN,
+       ECORE_MODE_IPGRE_TUNN,
+       ECORE_MODE_VXLAN_TUNN,
+};
+
+enum ecore_tunn_clss {
+       ECORE_TUNN_CLSS_MAC_VLAN,
+       ECORE_TUNN_CLSS_MAC_VNI,
+       ECORE_TUNN_CLSS_INNER_MAC_VLAN,
+       ECORE_TUNN_CLSS_INNER_MAC_VNI,
+       MAX_ECORE_TUNN_CLSS,
+};
+
+struct ecore_tunn_start_params {
+       unsigned long tunn_mode;
+       u16 vxlan_udp_port;
+       u16 geneve_udp_port;
+       u8 update_vxlan_udp_port;
+       u8 update_geneve_udp_port;
+       u8 tunn_clss_vxlan;
+       u8 tunn_clss_l2geneve;
+       u8 tunn_clss_ipgeneve;
+       u8 tunn_clss_l2gre;
+       u8 tunn_clss_ipgre;
+};
+
+struct ecore_tunn_update_params {
+       unsigned long tunn_mode_update_mask;
+       unsigned long tunn_mode;
+       u16 vxlan_udp_port;
+       u16 geneve_udp_port;
+       u8 update_rx_pf_clss;
+       u8 update_tx_pf_clss;
+       u8 update_vxlan_udp_port;
+       u8 update_geneve_udp_port;
+       u8 tunn_clss_vxlan;
+       u8 tunn_clss_l2geneve;
+       u8 tunn_clss_ipgeneve;
+       u8 tunn_clss_l2gre;
+       u8 tunn_clss_ipgre;
+};
+
+struct ecore_hw_sriov_info {
+       /* standard SRIOV capability fields, mostly for debugging */
+       int pos;                /* capability position */
+       int nres;               /* number of resources */
+       u32 cap;                /* SR-IOV Capabilities */
+       u16 ctrl;               /* SR-IOV Control */
+       u16 total_vfs;          /* total VFs associated with the PF */
+       u16 num_vfs;            /* number of vfs that have been started */
+       u64 active_vfs[3];      /* bitfield of active vfs */
+#define ECORE_IS_VF_ACTIVE(_p_dev, _rel_vf_id) \
+               (!!(_p_dev->sriov_info.active_vfs[_rel_vf_id / 64] & \
+                   (1ULL << (_rel_vf_id % 64))))
+       u16 initial_vfs;        /* initial VFs associated with the PF */
+       u16 nr_virtfn;          /* number of VFs available */
+       u16 offset;             /* first VF Routing ID offset */
+       u16 stride;             /* following VF stride */
+       u16 vf_device_id;       /* VF device id */
+       u32 pgsz;               /* page size for BAR alignment */
+       u8 link;                /* Function Dependency Link */
+
+       bool b_hw_channel;      /* Whether PF uses the HW-channel */
+};
+
+/* The PCI personality is not quite synonymous to protocol ID:
+ * 1. All personalities need CORE connections
+ * 2. The Ethernet personality may support also the RoCE protocol
+ */
+enum ecore_pci_personality {
+       ECORE_PCI_ETH,
+       ECORE_PCI_DEFAULT       /* default in shmem */
+};
+
+/* All VFs are symmetric, all counters are PF + all VFs */
+struct ecore_qm_iids {
+       u32 cids;
+       u32 vf_cids;
+       u32 tids;
+};
+
+#define MAX_PF_PER_PORT 8
+
+/*@@@TBD MK RESC: need to remove and use MCP interface instead */
+/* HW / FW resources, output of features supported below, most information
+ * is received from MFW.
+ */
+enum ECORE_RESOURCES {
+       ECORE_SB,
+       ECORE_L2_QUEUE,
+       ECORE_VPORT,
+       ECORE_RSS_ENG,
+       ECORE_PQ,
+       ECORE_RL,
+       ECORE_MAC,
+       ECORE_VLAN,
+       ECORE_ILT,
+       ECORE_CMDQS_CQS,
+       ECORE_MAX_RESC,
+};
+
+/* Features that require resources, given as input to the resource management
+ * algorithm, the output are the resources above
+ */
+enum ECORE_FEATURE {
+       ECORE_PF_L2_QUE,
+       ECORE_PF_TC,
+       ECORE_VF,
+       ECORE_EXTRA_VF_QUE,
+       ECORE_VMQ,
+       ECORE_MAX_FEATURES,
+};
+
+enum ECORE_PORT_MODE {
+       ECORE_PORT_MODE_DE_2X40G,
+       ECORE_PORT_MODE_DE_2X50G,
+       ECORE_PORT_MODE_DE_1X100G,
+       ECORE_PORT_MODE_DE_4X10G_F,
+       ECORE_PORT_MODE_DE_4X10G_E,
+       ECORE_PORT_MODE_DE_4X20G,
+       ECORE_PORT_MODE_DE_1X40G,
+       ECORE_PORT_MODE_DE_2X25G,
+       ECORE_PORT_MODE_DE_1X25G
+};
+
+enum ecore_dev_cap {
+       ECORE_DEV_CAP_ETH,
+};
+
+#ifndef __EXTRACT__LINUX__
+enum ecore_hw_err_type {
+       ECORE_HW_ERR_FAN_FAIL,
+       ECORE_HW_ERR_MFW_RESP_FAIL,
+       ECORE_HW_ERR_HW_ATTN,
+       ECORE_HW_ERR_DMAE_FAIL,
+       ECORE_HW_ERR_RAMROD_FAIL,
+       ECORE_HW_ERR_FW_ASSERT,
+};
+#endif
+
+struct ecore_hw_info {
+       /* PCI personality */
+       enum ecore_pci_personality personality;
+
+       /* Resource Allocation scheme results */
+       u32 resc_start[ECORE_MAX_RESC];
+       u32 resc_num[ECORE_MAX_RESC];
+       u32 feat_num[ECORE_MAX_FEATURES];
+
+#define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc])
+#define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc])
+#define RESC_END(_p_hwfn, resc) (RESC_START(_p_hwfn, resc) + \
+                                        RESC_NUM(_p_hwfn, resc))
+#define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])
+
+       u8 num_tc;
+       u8 ooo_tc;
+       u8 offload_tc;
+       u8 non_offload_tc;
+
+       u32 concrete_fid;
+       u16 opaque_fid;
+       u16 ovlan;
+       u32 part_num[4];
+
+       unsigned char hw_mac_addr[ETH_ALEN];
+
+       struct ecore_igu_info *p_igu_info;
+       /* Sriov */
+       u32 first_vf_in_pf;
+       u8 max_chains_per_vf;
+
+       u32 port_mode;
+       u32 hw_mode;
+       unsigned long device_capabilities;
+};
+
+struct ecore_hw_cid_data {
+       u32 cid;
+       bool b_cid_allocated;
+       u8 vfid;                /* 1-based; 0 signals this is for a PF */
+
+       /* Additional identifiers */
+       u16 opaque_fid;
+       u8 vport_id;
+};
+
+/* maximun size of read/write commands (HW limit) */
+#define DMAE_MAX_RW_SIZE       0x2000
+
+struct ecore_dmae_info {
+       /* Mutex for synchronizing access to functions */
+       osal_mutex_t mutex;
+
+       u8 channel;
+
+       dma_addr_t completion_word_phys_addr;
+
+       /* The memory location where the DMAE writes the completion
+        * value when an operation is finished on this context.
+        */
+       u32 *p_completion_word;
+
+       dma_addr_t intermediate_buffer_phys_addr;
+
+       /* An intermediate buffer for DMAE operations that use virtual
+        * addresses - data is DMA'd to/from this buffer and then
+        * memcpy'd to/from the virtual address
+        */
+       u32 *p_intermediate_buffer;
+
+       dma_addr_t dmae_cmd_phys_addr;
+       struct dmae_cmd *p_dmae_cmd;
+};
+
+struct ecore_wfq_data {
+       u32 default_min_speed;  /* When wfq feature is not configured */
+       u32 min_speed;          /* when feature is configured for any 1 vport */
+       bool configured;
+};
+
+struct ecore_qm_info {
+       struct init_qm_pq_params *qm_pq_params;
+       struct init_qm_vport_params *qm_vport_params;
+       struct init_qm_port_params *qm_port_params;
+       u16 start_pq;
+       u8 start_vport;
+       u8 pure_lb_pq;
+       u8 offload_pq;
+       u8 pure_ack_pq;
+       u8 ooo_pq;
+       u8 vf_queues_offset;
+       u16 num_pqs;
+       u16 num_vf_pqs;
+       u8 num_vports;
+       u8 max_phys_tcs_per_port;
+       bool pf_rl_en;
+       bool pf_wfq_en;
+       bool vport_rl_en;
+       bool vport_wfq_en;
+       u8 pf_wfq;
+       u32 pf_rl;
+       struct ecore_wfq_data *wfq_data;
+};
+
+struct storm_stats {
+       u32 address;
+       u32 len;
+};
+
+#define CONFIG_ECORE_BINARY_FW
+#define CONFIG_ECORE_ZIPPED_FW
+
+struct ecore_fw_data {
+#ifdef CONFIG_ECORE_BINARY_FW
+       struct fw_ver_info *fw_ver_info;
+#endif
+       const u8 *modes_tree_buf;
+       union init_op *init_ops;
+       const u32 *arr_data;
+       u32 init_ops_size;
+};
+
+struct ecore_hwfn {
+       struct ecore_dev *p_dev;
+       u8 my_id;               /* ID inside the PF */
+#define IS_LEAD_HWFN(edev)             (!((edev)->my_id))
+       u8 rel_pf_id;           /* Relative to engine */
+       u8 abs_pf_id;
+#define ECORE_PATH_ID(_p_hwfn) \
+               (ECORE_IS_K2((_p_hwfn)->p_dev) ? 0 : ((_p_hwfn)->abs_pf_id & 1))
+       u8 port_id;
+       bool b_active;
+
+       u32 dp_module;
+       u8 dp_level;
+       char name[NAME_SIZE];
+       void *dp_ctx;
+
+       bool first_on_engine;
+       bool hw_init_done;
+
+       u8 num_funcs_on_engine;
+
+       /* BAR access */
+       void OSAL_IOMEM *regview;
+       void OSAL_IOMEM *doorbells;
+       u64 db_phys_addr;
+       unsigned long db_size;
+
+       /* PTT pool */
+       struct ecore_ptt_pool *p_ptt_pool;
+
+       /* HW info */
+       struct ecore_hw_info hw_info;
+
+       /* rt_array (for init-tool) */
+       struct ecore_rt_data rt_data;
+
+       /* SPQ */
+       struct ecore_spq *p_spq;
+
+       /* EQ */
+       struct ecore_eq *p_eq;
+
+       /* Consolidate Q */
+       struct ecore_consq *p_consq;
+
+       /* Slow-Path definitions */
+       osal_dpc_t sp_dpc;
+       bool b_sp_dpc_enabled;
+
+       struct ecore_ptt *p_main_ptt;
+       struct ecore_ptt *p_dpc_ptt;
+
+       struct ecore_sb_sp_info *p_sp_sb;
+       struct ecore_sb_attn_info *p_sb_attn;
+
+       /* Protocol related */
+       struct ecore_ooo_info *p_ooo_info;
+       struct ecore_pf_params pf_params;
+
+       /* Array of sb_info of all status blocks */
+       struct ecore_sb_info *sbs_info[MAX_SB_PER_PF_MIMD];
+       u16 num_sbs;
+
+       struct ecore_cxt_mngr *p_cxt_mngr;
+
+       /* Flag indicating whether interrupts are enabled or not */
+       bool b_int_enabled;
+       bool b_int_requested;
+
+       /* True if the driver requests for the link */
+       bool b_drv_link_init;
+
+       struct ecore_vf_iov *vf_iov_info;
+       struct ecore_pf_iov *pf_iov_info;
+       struct ecore_mcp_info *mcp_info;
+
+       struct ecore_hw_cid_data *p_tx_cids;
+       struct ecore_hw_cid_data *p_rx_cids;
+
+       struct ecore_dmae_info dmae_info;
+
+       /* QM init */
+       struct ecore_qm_info qm_info;
+
+       /* Buffer for unzipping firmware data */
+#ifdef CONFIG_ECORE_ZIPPED_FW
+       void *unzip_buf;
+#endif
+
+       struct dbg_tools_data dbg_info;
+
+       struct z_stream_s *stream;
+
+       /* PWM region specific data */
+       u32 dpi_size;
+       u32 dpi_count;
+       u32 dpi_start_offset;   /* this is used to
+                                * calculate th
+                                * doorbell address
+                                */
+};
+
+#ifndef __EXTRACT__LINUX__
+enum ecore_mf_mode {
+       ECORE_MF_DEFAULT,
+       ECORE_MF_OVLAN,
+       ECORE_MF_NPAR,
+};
+#endif
+
+struct ecore_dev {
+       u32 dp_module;
+       u8 dp_level;
+       char name[NAME_SIZE];
+       void *dp_ctx;
+
+       u8 type;
+#define ECORE_DEV_TYPE_BB      (0 << 0)
+#define ECORE_DEV_TYPE_AH      (1 << 0)
+/* Translate type/revision combo into the proper conditions */
+#define ECORE_IS_BB(dev)       ((dev)->type == ECORE_DEV_TYPE_BB)
+#define ECORE_IS_BB_A0(dev)    (ECORE_IS_BB(dev) && \
+                                CHIP_REV_IS_A0(dev))
+#define ECORE_IS_BB_B0(dev)    (ECORE_IS_BB(dev) && \
+                                CHIP_REV_IS_B0(dev))
+#define ECORE_IS_AH(dev)       ((dev)->type == ECORE_DEV_TYPE_AH)
+#define ECORE_IS_K2(dev)       ECORE_IS_AH(dev)
+#define ECORE_GET_TYPE(dev)    (ECORE_IS_BB_A0(dev) ? CHIP_BB_A0 : \
+                                ECORE_IS_BB_B0(dev) ? CHIP_BB_B0 : CHIP_K2)
+
+       u16 vendor_id;
+       u16 device_id;
+
+       u16 chip_num;
+#define CHIP_NUM_MASK                  0xffff
+#define CHIP_NUM_SHIFT                 16
+
+       u16 chip_rev;
+#define CHIP_REV_MASK                  0xf
+#define CHIP_REV_SHIFT                 12
+#ifndef ASIC_ONLY
+#define CHIP_REV_IS_TEDIBEAR(_p_dev) ((_p_dev)->chip_rev == 0x5)
+#define CHIP_REV_IS_EMUL_A0(_p_dev) ((_p_dev)->chip_rev == 0xe)
+#define CHIP_REV_IS_EMUL_B0(_p_dev) ((_p_dev)->chip_rev == 0xc)
+#define CHIP_REV_IS_EMUL(_p_dev) (CHIP_REV_IS_EMUL_A0(_p_dev) || \
+                                         CHIP_REV_IS_EMUL_B0(_p_dev))
+#define CHIP_REV_IS_FPGA_A0(_p_dev) ((_p_dev)->chip_rev == 0xf)
+#define CHIP_REV_IS_FPGA_B0(_p_dev) ((_p_dev)->chip_rev == 0xd)
+#define CHIP_REV_IS_FPGA(_p_dev) (CHIP_REV_IS_FPGA_A0(_p_dev) || \
+                                         CHIP_REV_IS_FPGA_B0(_p_dev))
+#define CHIP_REV_IS_SLOW(_p_dev) \
+               (CHIP_REV_IS_EMUL(_p_dev) || CHIP_REV_IS_FPGA(_p_dev))
+#define CHIP_REV_IS_A0(_p_dev) \
+               (CHIP_REV_IS_EMUL_A0(_p_dev) || \
+                CHIP_REV_IS_FPGA_A0(_p_dev) || \
+                !(_p_dev)->chip_rev)
+#define CHIP_REV_IS_B0(_p_dev) \
+               (CHIP_REV_IS_EMUL_B0(_p_dev) || \
+                CHIP_REV_IS_FPGA_B0(_p_dev) || \
+                (_p_dev)->chip_rev == 1)
+#define CHIP_REV_IS_ASIC(_p_dev) (!CHIP_REV_IS_SLOW(_p_dev))
+#else
+#define CHIP_REV_IS_A0(_p_dev) (!(_p_dev)->chip_rev)
+#define CHIP_REV_IS_B0(_p_dev) ((_p_dev)->chip_rev == 1)
+#endif
+
+       u16 chip_metal;
+#define CHIP_METAL_MASK                        0xff
+#define CHIP_METAL_SHIFT               4
+
+       u16 chip_bond_id;
+#define CHIP_BOND_ID_MASK              0xf
+#define CHIP_BOND_ID_SHIFT             0
+
+       u8 num_engines;
+       u8 num_ports_in_engines;
+       u8 num_funcs_in_port;
+
+       u8 path_id;
+       enum ecore_mf_mode mf_mode;
+#define IS_MF_DEFAULT(_p_hwfn) \
+               (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_DEFAULT)
+#define IS_MF_SI(_p_hwfn)      (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_NPAR)
+#define IS_MF_SD(_p_hwfn)      (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_OVLAN)
+
+       int pcie_width;
+       int pcie_speed;
+       u8 ver_str[VER_SIZE];
+       /* Add MF related configuration */
+       u8 mcp_rev;
+       u8 boot_mode;
+
+       u8 wol;
+
+       u32 int_mode;
+       enum ecore_coalescing_mode int_coalescing_mode;
+       u8 rx_coalesce_usecs;
+       u8 tx_coalesce_usecs;
+
+       /* Start Bar offset of first hwfn */
+       void OSAL_IOMEM *regview;
+       void OSAL_IOMEM *doorbells;
+       u64 db_phys_addr;
+       unsigned long db_size;
+
+       /* PCI */
+       u8 cache_shift;
+
+       /* Init */
+       const struct iro *iro_arr;
+#define IRO (p_hwfn->p_dev->iro_arr)
+
+       /* HW functions */
+       u8 num_hwfns;
+       struct ecore_hwfn hwfns[MAX_HWFNS_PER_DEVICE];
+
+       /* SRIOV */
+       struct ecore_hw_sriov_info sriov_info;
+       unsigned long tunn_mode;
+#define IS_ECORE_SRIOV(edev)           (!!((edev)->sriov_info.total_vfs))
+       bool b_is_vf;
+
+       u32 drv_type;
+
+       struct ecore_eth_stats *reset_stats;
+       struct ecore_fw_data *fw_data;
+
+       u32 mcp_nvm_resp;
+
+       /* Recovery */
+       bool recov_in_prog;
+
+#ifndef ASIC_ONLY
+       bool b_is_emul_full;
+#endif
+
+       void *firmware;
+
+       u64 fw_len;
+
+};
+
+#define NUM_OF_VFS(dev)                (ECORE_IS_BB(dev) ? MAX_NUM_VFS_BB \
+                                                 : MAX_NUM_VFS_K2)
+#define NUM_OF_L2_QUEUES(dev)  (ECORE_IS_BB(dev) ? MAX_NUM_L2_QUEUES_BB \
+                                                 : MAX_NUM_L2_QUEUES_K2)
+#define NUM_OF_PORTS(dev)      (ECORE_IS_BB(dev) ? MAX_NUM_PORTS_BB \
+                                                 : MAX_NUM_PORTS_K2)
+#define NUM_OF_SBS(dev)                (ECORE_IS_BB(dev) ? MAX_SB_PER_PATH_BB \
+                                                 : MAX_SB_PER_PATH_K2)
+#define NUM_OF_ENG_PFS(dev)    (ECORE_IS_BB(dev) ? MAX_NUM_PFS_BB \
+                                                 : MAX_NUM_PFS_K2)
+
+#define ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn) ( \
+       (ECORE_IS_BB_A0(p_hwfn->p_dev)) && \
+       (ECORE_PATH_ID(p_hwfn) == 1) && \
+       ((p_hwfn->hw_info.port_mode == ECORE_PORT_MODE_DE_2X40G) || \
+        (p_hwfn->hw_info.port_mode == ECORE_PORT_MODE_DE_2X50G) || \
+        (p_hwfn->hw_info.port_mode == ECORE_PORT_MODE_DE_2X25G)))
+
+/**
+ * @brief ecore_concrete_to_sw_fid - get the sw function id from
+ *        the concrete value.
+ *
+ * @param concrete_fid
+ *
+ * @return OSAL_INLINE u8
+ */
+static OSAL_INLINE u8 ecore_concrete_to_sw_fid(struct ecore_dev *p_dev,
+                                              u32 concrete_fid)
+{
+       u8 vfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID);
+       u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
+       u8 vf_valid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID);
+       u8 sw_fid;
+
+       if (vf_valid)
+               sw_fid = vfid + MAX_NUM_PFS;
+       else
+               sw_fid = pfid;
+
+       return sw_fid;
+}
+
+#define PURE_LB_TC 8
+#define OOO_LB_TC 9
+
+static OSAL_INLINE u16 ecore_sriov_get_next_vf(struct ecore_hwfn *p_hwfn,
+                                              u16 rel_vf_id)
+{
+       u16 i;
+
+       for (i = rel_vf_id; i < p_hwfn->p_dev->sriov_info.total_vfs; i++)
+               if (ECORE_IS_VF_ACTIVE(p_hwfn->p_dev, i))
+                       return i;
+
+       return p_hwfn->p_dev->sriov_info.total_vfs;
+}
+
+int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate);
+void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev,
+                                          u32 min_pf_rate);
+
+int ecore_configure_pf_max_bandwidth(struct ecore_dev *p_dev, u8 max_bw);
+int ecore_configure_pf_min_bandwidth(struct ecore_dev *p_dev, u8 min_bw);
+void ecore_clean_wfq_db(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+int ecore_device_num_engines(struct ecore_dev *p_dev);
+int ecore_device_num_ports(struct ecore_dev *p_dev);
+
+#define ecore_for_each_vf(_p_hwfn, _i)                         \
+       for (_i = ecore_sriov_get_next_vf(_p_hwfn, 0);          \
+            _i < _p_hwfn->p_dev->sriov_info.total_vfs;         \
+            _i = ecore_sriov_get_next_vf(_p_hwfn, _i + 1))
+
+#define ECORE_LEADING_HWFN(dev)        (&dev->hwfns[0])
+
+#endif /* __ECORE_H */
diff --git a/drivers/net/qede/base/ecore_chain.h b/drivers/net/qede/base/ecore_chain.h
new file mode 100644 (file)
index 0000000..98bbffc
--- /dev/null
@@ -0,0 +1,718 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_CHAIN_H__
+#define __ECORE_CHAIN_H__
+
+#include <assert.h>            /* @DPDK */
+
+#include "common_hsi.h"
+#include "ecore_utils.h"
+
+enum ecore_chain_mode {
+       /* Each Page contains a next pointer at its end */
+       ECORE_CHAIN_MODE_NEXT_PTR,
+
+       /* Chain is a single page (next ptr) is unrequired */
+       ECORE_CHAIN_MODE_SINGLE,
+
+       /* Page pointers are located in a side list */
+       ECORE_CHAIN_MODE_PBL,
+};
+
+enum ecore_chain_use_mode {
+       ECORE_CHAIN_USE_TO_PRODUCE,     /* Chain starts empty */
+       ECORE_CHAIN_USE_TO_CONSUME,     /* Chain starts full */
+       ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,     /* Chain starts empty */
+};
+
+enum ecore_chain_cnt_type {
+       /* The chain's size/prod/cons are kept in 16-bit variables */
+       ECORE_CHAIN_CNT_TYPE_U16,
+
+       /* The chain's size/prod/cons are kept in 32-bit variables  */
+       ECORE_CHAIN_CNT_TYPE_U32,
+};
+
+struct ecore_chain_next {
+       struct regpair next_phys;
+       void *next_virt;
+};
+
+struct ecore_chain_pbl_u16 {
+       u16 prod_page_idx;
+       u16 cons_page_idx;
+};
+
+struct ecore_chain_pbl_u32 {
+       u32 prod_page_idx;
+       u32 cons_page_idx;
+};
+
+struct ecore_chain_pbl {
+       /* Base address of a pre-allocated buffer for pbl */
+       dma_addr_t p_phys_table;
+       void *p_virt_table;
+
+       /* Table for keeping the virtual addresses of the chain pages,
+        * respectively to the physical addresses in the pbl table.
+        */
+       void **pp_virt_addr_tbl;
+
+       /* Index to current used page by producer/consumer */
+       union {
+               struct ecore_chain_pbl_u16 pbl16;
+               struct ecore_chain_pbl_u32 pbl32;
+       } u;
+};
+
+struct ecore_chain_u16 {
+       /* Cyclic index of next element to produce/consme */
+       u16 prod_idx;
+       u16 cons_idx;
+};
+
+struct ecore_chain_u32 {
+       /* Cyclic index of next element to produce/consme */
+       u32 prod_idx;
+       u32 cons_idx;
+};
+
+struct ecore_chain {
+       /* Address of first page of the chain */
+       void *p_virt_addr;
+       dma_addr_t p_phys_addr;
+
+       /* Point to next element to produce/consume */
+       void *p_prod_elem;
+       void *p_cons_elem;
+
+       enum ecore_chain_mode mode;
+       enum ecore_chain_use_mode intended_use;
+
+       enum ecore_chain_cnt_type cnt_type;
+       union {
+               struct ecore_chain_u16 chain16;
+               struct ecore_chain_u32 chain32;
+       } u;
+
+       u32 page_cnt;
+
+       /* Number of elements - capacity is for usable elements only,
+        * while size will contain total number of elements [for entire chain].
+        */
+       u32 capacity;
+       u32 size;
+
+       /* Elements information for fast calculations */
+       u16 elem_per_page;
+       u16 elem_per_page_mask;
+       u16 elem_unusable;
+       u16 usable_per_page;
+       u16 elem_size;
+       u16 next_page_mask;
+
+       struct ecore_chain_pbl pbl;
+};
+
+#define ECORE_CHAIN_PBL_ENTRY_SIZE     (8)
+#define ECORE_CHAIN_PAGE_SIZE          (0x1000)
+#define ELEMS_PER_PAGE(elem_size)      (ECORE_CHAIN_PAGE_SIZE / (elem_size))
+
+#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)               \
+         ((mode == ECORE_CHAIN_MODE_NEXT_PTR) ?                \
+          (1 + ((sizeof(struct ecore_chain_next) - 1) /                \
+          (elem_size))) : 0)
+
+#define USABLE_ELEMS_PER_PAGE(elem_size, mode)                 \
+       ((u32)(ELEMS_PER_PAGE(elem_size) -                      \
+       UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)))
+
+#define ECORE_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode)                \
+       DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode))
+
+#define is_chain_u16(p)        ((p)->cnt_type == ECORE_CHAIN_CNT_TYPE_U16)
+#define is_chain_u32(p)        ((p)->cnt_type == ECORE_CHAIN_CNT_TYPE_U32)
+
+/* Accessors */
+static OSAL_INLINE u16 ecore_chain_get_prod_idx(struct ecore_chain *p_chain)
+{
+       OSAL_ASSERT(is_chain_u16(p_chain));
+       return p_chain->u.chain16.prod_idx;
+}
+
+static OSAL_INLINE u32 ecore_chain_get_prod_idx_u32(struct ecore_chain *p_chain)
+{
+       OSAL_ASSERT(is_chain_u32(p_chain));
+       return p_chain->u.chain32.prod_idx;
+}
+
+static OSAL_INLINE u16 ecore_chain_get_cons_idx(struct ecore_chain *p_chain)
+{
+       OSAL_ASSERT(is_chain_u16(p_chain));
+       return p_chain->u.chain16.cons_idx;
+}
+
+static OSAL_INLINE u32 ecore_chain_get_cons_idx_u32(struct ecore_chain *p_chain)
+{
+       OSAL_ASSERT(is_chain_u32(p_chain));
+       return p_chain->u.chain32.cons_idx;
+}
+
+/* FIXME:
+ * Should create OSALs for the below definitions.
+ * For Linux, replace them with the existing U16_MAX and U32_MAX, and handle
+ * kernel versions that lack them.
+ */
+#define ECORE_U16_MAX  ((u16)~0U)
+#define ECORE_U32_MAX  ((u32)~0U)
+
+static OSAL_INLINE u16 ecore_chain_get_elem_left(struct ecore_chain *p_chain)
+{
+       u16 used;
+
+       OSAL_ASSERT(is_chain_u16(p_chain));
+
+       used = (u16)(((u32)ECORE_U16_MAX + 1 +
+                     (u32)(p_chain->u.chain16.prod_idx)) -
+                    (u32)p_chain->u.chain16.cons_idx);
+       if (p_chain->mode == ECORE_CHAIN_MODE_NEXT_PTR)
+               used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page -
+                   p_chain->u.chain16.cons_idx / p_chain->elem_per_page;
+
+       return (u16)(p_chain->capacity - used);
+}
+
+static OSAL_INLINE u32
+ecore_chain_get_elem_left_u32(struct ecore_chain *p_chain)
+{
+       u32 used;
+
+       OSAL_ASSERT(is_chain_u32(p_chain));
+
+       used = (u32)(((u64)ECORE_U32_MAX + 1 +
+                      (u64)(p_chain->u.chain32.prod_idx)) -
+                     (u64)p_chain->u.chain32.cons_idx);
+       if (p_chain->mode == ECORE_CHAIN_MODE_NEXT_PTR)
+               used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page -
+                   p_chain->u.chain32.cons_idx / p_chain->elem_per_page;
+
+       return p_chain->capacity - used;
+}
+
+static OSAL_INLINE u8 ecore_chain_is_full(struct ecore_chain *p_chain)
+{
+       if (is_chain_u16(p_chain))
+               return (ecore_chain_get_elem_left(p_chain) ==
+                       p_chain->capacity);
+       else
+               return (ecore_chain_get_elem_left_u32(p_chain) ==
+                       p_chain->capacity);
+}
+
+static OSAL_INLINE u8 ecore_chain_is_empty(struct ecore_chain *p_chain)
+{
+       if (is_chain_u16(p_chain))
+               return (ecore_chain_get_elem_left(p_chain) == 0);
+       else
+               return (ecore_chain_get_elem_left_u32(p_chain) == 0);
+}
+
+static OSAL_INLINE
+u16 ecore_chain_get_elem_per_page(struct ecore_chain *p_chain)
+{
+       return p_chain->elem_per_page;
+}
+
+static OSAL_INLINE
+u16 ecore_chain_get_usable_per_page(struct ecore_chain *p_chain)
+{
+       return p_chain->usable_per_page;
+}
+
+static OSAL_INLINE
+u16 ecore_chain_get_unusable_per_page(struct ecore_chain *p_chain)
+{
+       return p_chain->elem_unusable;
+}
+
+static OSAL_INLINE u32 ecore_chain_get_size(struct ecore_chain *p_chain)
+{
+       return p_chain->size;
+}
+
+static OSAL_INLINE u32 ecore_chain_get_page_cnt(struct ecore_chain *p_chain)
+{
+       return p_chain->page_cnt;
+}
+
+/**
+ * @brief ecore_chain_advance_page -
+ *
+ * Advance the next element accros pages for a linked chain
+ *
+ * @param p_chain
+ * @param p_next_elem
+ * @param idx_to_inc
+ * @param page_to_inc
+ */
+static OSAL_INLINE void
+ecore_chain_advance_page(struct ecore_chain *p_chain, void **p_next_elem,
+                        void *idx_to_inc, void *page_to_inc)
+{
+       struct ecore_chain_next *p_next = OSAL_NULL;
+       u32 page_index = 0;
+
+       switch (p_chain->mode) {
+       case ECORE_CHAIN_MODE_NEXT_PTR:
+               p_next = (struct ecore_chain_next *)(*p_next_elem);
+               *p_next_elem = p_next->next_virt;
+               if (is_chain_u16(p_chain))
+                       *(u16 *)idx_to_inc += p_chain->elem_unusable;
+               else
+                       *(u32 *)idx_to_inc += p_chain->elem_unusable;
+               break;
+       case ECORE_CHAIN_MODE_SINGLE:
+               *p_next_elem = p_chain->p_virt_addr;
+               break;
+       case ECORE_CHAIN_MODE_PBL:
+               if (is_chain_u16(p_chain)) {
+                       if (++(*(u16 *)page_to_inc) == p_chain->page_cnt)
+                               *(u16 *)page_to_inc = 0;
+                       page_index = *(u16 *)page_to_inc;
+               } else {
+                       if (++(*(u32 *)page_to_inc) == p_chain->page_cnt)
+                               *(u32 *)page_to_inc = 0;
+                       page_index = *(u32 *)page_to_inc;
+               }
+               *p_next_elem = p_chain->pbl.pp_virt_addr_tbl[page_index];
+       }
+}
+
+#define is_unusable_idx(p, idx)                        \
+       (((p)->u.chain16.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
+
+#define is_unusable_idx_u32(p, idx)            \
+       (((p)->u.chain32.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
+
+#define is_unusable_next_idx(p, idx)           \
+       ((((p)->u.chain16.idx + 1) & (p)->elem_per_page_mask) == \
+       (p)->usable_per_page)
+
+#define is_unusable_next_idx_u32(p, idx)       \
+       ((((p)->u.chain32.idx + 1) & (p)->elem_per_page_mask) \
+       == (p)->usable_per_page)
+
+#define test_and_skip(p, idx)                                          \
+       do {                                                            \
+               if (is_chain_u16(p)) {                                  \
+                       if (is_unusable_idx(p, idx))                    \
+                               (p)->u.chain16.idx += (p)->elem_unusable; \
+               } else {                                                \
+                       if (is_unusable_idx_u32(p, idx))                \
+                               (p)->u.chain32.idx += (p)->elem_unusable; \
+               }                                                       \
+       } while (0)
+
+/**
+ * @brief ecore_chain_return_multi_produced -
+ *
+ * A chain in which the driver "Produces" elements should use this API
+ * to indicate previous produced elements are now consumed.
+ *
+ * @param p_chain
+ * @param num
+ */
+static OSAL_INLINE
+void ecore_chain_return_multi_produced(struct ecore_chain *p_chain, u32 num)
+{
+       if (is_chain_u16(p_chain))
+               p_chain->u.chain16.cons_idx += (u16)num;
+       else
+               p_chain->u.chain32.cons_idx += num;
+       test_and_skip(p_chain, cons_idx);
+}
+
+/**
+ * @brief ecore_chain_return_produced -
+ *
+ * A chain in which the driver "Produces" elements should use this API
+ * to indicate previous produced elements are now consumed.
+ *
+ * @param p_chain
+ */
+static OSAL_INLINE void ecore_chain_return_produced(struct ecore_chain *p_chain)
+{
+       if (is_chain_u16(p_chain))
+               p_chain->u.chain16.cons_idx++;
+       else
+               p_chain->u.chain32.cons_idx++;
+       test_and_skip(p_chain, cons_idx);
+}
+
+/**
+ * @brief ecore_chain_produce -
+ *
+ * A chain in which the driver "Produces" elements should use this to get
+ * a pointer to the next element which can be "Produced". It's driver
+ * responsibility to validate that the chain has room for new element.
+ *
+ * @param p_chain
+ *
+ * @return void*, a pointer to next element
+ */
+static OSAL_INLINE void *ecore_chain_produce(struct ecore_chain *p_chain)
+{
+       void *p_ret = OSAL_NULL, *p_prod_idx, *p_prod_page_idx;
+
+       if (is_chain_u16(p_chain)) {
+               if ((p_chain->u.chain16.prod_idx &
+                    p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
+                       p_prod_idx = &p_chain->u.chain16.prod_idx;
+                       p_prod_page_idx = &p_chain->pbl.u.pbl16.prod_page_idx;
+                       ecore_chain_advance_page(p_chain, &p_chain->p_prod_elem,
+                                                p_prod_idx, p_prod_page_idx);
+               }
+               p_chain->u.chain16.prod_idx++;
+       } else {
+               if ((p_chain->u.chain32.prod_idx &
+                    p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
+                       p_prod_idx = &p_chain->u.chain32.prod_idx;
+                       p_prod_page_idx = &p_chain->pbl.u.pbl32.prod_page_idx;
+                       ecore_chain_advance_page(p_chain, &p_chain->p_prod_elem,
+                                                p_prod_idx, p_prod_page_idx);
+               }
+               p_chain->u.chain32.prod_idx++;
+       }
+
+       p_ret = p_chain->p_prod_elem;
+       p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) +
+                                       p_chain->elem_size);
+
+       return p_ret;
+}
+
+/**
+ * @brief ecore_chain_get_capacity -
+ *
+ * Get the maximum number of BDs in chain
+ *
+ * @param p_chain
+ * @param num
+ *
+ * @return number of unusable BDs
+ */
+static OSAL_INLINE u32 ecore_chain_get_capacity(struct ecore_chain *p_chain)
+{
+       return p_chain->capacity;
+}
+
+/**
+ * @brief ecore_chain_recycle_consumed -
+ *
+ * Returns an element which was previously consumed;
+ * Increments producers so they could be written to FW.
+ *
+ * @param p_chain
+ */
+static OSAL_INLINE
+void ecore_chain_recycle_consumed(struct ecore_chain *p_chain)
+{
+       test_and_skip(p_chain, prod_idx);
+       if (is_chain_u16(p_chain))
+               p_chain->u.chain16.prod_idx++;
+       else
+               p_chain->u.chain32.prod_idx++;
+}
+
+/**
+ * @brief ecore_chain_consume -
+ *
+ * A Chain in which the driver utilizes data written by a different source
+ * (i.e., FW) should use this to access passed buffers.
+ *
+ * @param p_chain
+ *
+ * @return void*, a pointer to the next buffer written
+ */
+static OSAL_INLINE void *ecore_chain_consume(struct ecore_chain *p_chain)
+{
+       void *p_ret = OSAL_NULL, *p_cons_idx, *p_cons_page_idx;
+
+       if (is_chain_u16(p_chain)) {
+               if ((p_chain->u.chain16.cons_idx &
+                    p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
+                       p_cons_idx = &p_chain->u.chain16.cons_idx;
+                       p_cons_page_idx = &p_chain->pbl.u.pbl16.cons_page_idx;
+                       ecore_chain_advance_page(p_chain, &p_chain->p_cons_elem,
+                                                p_cons_idx, p_cons_page_idx);
+               }
+               p_chain->u.chain16.cons_idx++;
+       } else {
+               if ((p_chain->u.chain32.cons_idx &
+                    p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
+                       p_cons_idx = &p_chain->u.chain32.cons_idx;
+                       p_cons_page_idx = &p_chain->pbl.u.pbl32.cons_page_idx;
+                       ecore_chain_advance_page(p_chain, &p_chain->p_cons_elem,
+                                                p_cons_idx, p_cons_page_idx);
+               }
+               p_chain->u.chain32.cons_idx++;
+       }
+
+       p_ret = p_chain->p_cons_elem;
+       p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) +
+                                       p_chain->elem_size);
+
+       return p_ret;
+}
+
+/**
+ * @brief ecore_chain_reset -
+ *
+ * Resets the chain to its start state
+ *
+ * @param p_chain pointer to a previously allocted chain
+ */
+static OSAL_INLINE void ecore_chain_reset(struct ecore_chain *p_chain)
+{
+       u32 i;
+
+       if (is_chain_u16(p_chain)) {
+               p_chain->u.chain16.prod_idx = 0;
+               p_chain->u.chain16.cons_idx = 0;
+       } else {
+               p_chain->u.chain32.prod_idx = 0;
+               p_chain->u.chain32.cons_idx = 0;
+       }
+       p_chain->p_cons_elem = p_chain->p_virt_addr;
+       p_chain->p_prod_elem = p_chain->p_virt_addr;
+
+       if (p_chain->mode == ECORE_CHAIN_MODE_PBL) {
+               /* Use (page_cnt - 1) as a reset value for the prod/cons page's
+                * indices, to avoid unnecessary page advancing on the first
+                * call to ecore_chain_produce/consume. Instead, the indices
+                * will be advanced to page_cnt and then will be wrapped to 0.
+                */
+               u32 reset_val = p_chain->page_cnt - 1;
+
+               if (is_chain_u16(p_chain)) {
+                       p_chain->pbl.u.pbl16.prod_page_idx = (u16)reset_val;
+                       p_chain->pbl.u.pbl16.cons_page_idx = (u16)reset_val;
+               } else {
+                       p_chain->pbl.u.pbl32.prod_page_idx = reset_val;
+                       p_chain->pbl.u.pbl32.cons_page_idx = reset_val;
+               }
+       }
+
+       switch (p_chain->intended_use) {
+       case ECORE_CHAIN_USE_TO_CONSUME_PRODUCE:
+       case ECORE_CHAIN_USE_TO_PRODUCE:
+               /* Do nothing */
+               break;
+
+       case ECORE_CHAIN_USE_TO_CONSUME:
+               /* produce empty elements */
+               for (i = 0; i < p_chain->capacity; i++)
+                       ecore_chain_recycle_consumed(p_chain);
+               break;
+       }
+}
+
+/**
+ * @brief ecore_chain_init_params -
+ *
+ * Initalizes a basic chain struct
+ *
+ * @param p_chain
+ * @param page_cnt     number of pages in the allocated buffer
+ * @param elem_size    size of each element in the chain
+ * @param intended_use
+ * @param mode
+ * @param cnt_type
+ */
+static OSAL_INLINE void
+ecore_chain_init_params(struct ecore_chain *p_chain, u32 page_cnt, u8 elem_size,
+                       enum ecore_chain_use_mode intended_use,
+                       enum ecore_chain_mode mode,
+                       enum ecore_chain_cnt_type cnt_type)
+{
+       /* chain fixed parameters */
+       p_chain->p_virt_addr = OSAL_NULL;
+       p_chain->p_phys_addr = 0;
+       p_chain->elem_size = elem_size;
+       p_chain->intended_use = intended_use;
+       p_chain->mode = mode;
+       p_chain->cnt_type = cnt_type;
+
+       p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
+       p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode);
+       p_chain->elem_per_page_mask = p_chain->elem_per_page - 1;
+       p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode);
+       p_chain->next_page_mask = (p_chain->usable_per_page &
+                                  p_chain->elem_per_page_mask);
+
+       p_chain->page_cnt = page_cnt;
+       p_chain->capacity = p_chain->usable_per_page * page_cnt;
+       p_chain->size = p_chain->elem_per_page * page_cnt;
+
+       p_chain->pbl.p_phys_table = 0;
+       p_chain->pbl.p_virt_table = OSAL_NULL;
+       p_chain->pbl.pp_virt_addr_tbl = OSAL_NULL;
+}
+
+/**
+ * @brief ecore_chain_init_mem -
+ *
+ * Initalizes a basic chain struct with its chain buffers
+ *
+ * @param p_chain
+ * @param p_virt_addr  virtual address of allocated buffer's beginning
+ * @param p_phys_addr  physical address of allocated buffer's beginning
+ *
+ */
+static OSAL_INLINE void ecore_chain_init_mem(struct ecore_chain *p_chain,
+                                            void *p_virt_addr,
+                                            dma_addr_t p_phys_addr)
+{
+       p_chain->p_virt_addr = p_virt_addr;
+       p_chain->p_phys_addr = p_phys_addr;
+}
+
+/**
+ * @brief ecore_chain_init_pbl_mem -
+ *
+ * Initalizes a basic chain struct with its pbl buffers
+ *
+ * @param p_chain
+ * @param p_virt_pbl   pointer to a pre allocated side table which will hold
+ *                      virtual page addresses.
+ * @param p_phys_pbl   pointer to a pre-allocated side table which will hold
+ *                      physical page addresses.
+ * @param pp_virt_addr_tbl
+ *                      pointer to a pre-allocated side table which will hold
+ *                      the virtual addresses of the chain pages.
+ *
+ */
+static OSAL_INLINE void ecore_chain_init_pbl_mem(struct ecore_chain *p_chain,
+                                                void *p_virt_pbl,
+                                                dma_addr_t p_phys_pbl,
+                                                void **pp_virt_addr_tbl)
+{
+       p_chain->pbl.p_phys_table = p_phys_pbl;
+       p_chain->pbl.p_virt_table = p_virt_pbl;
+       p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl;
+}
+
+/**
+ * @brief ecore_chain_init_next_ptr_elem -
+ *
+ * Initalizes a next pointer element
+ *
+ * @param p_chain
+ * @param p_virt_curr  virtual address of a chain page of which the next
+ *                      pointer element is initialized
+ * @param p_virt_next  virtual address of the next chain page
+ * @param p_phys_next  physical address of the next chain page
+ *
+ */
+static OSAL_INLINE void
+ecore_chain_init_next_ptr_elem(struct ecore_chain *p_chain, void *p_virt_curr,
+                              void *p_virt_next, dma_addr_t p_phys_next)
+{
+       struct ecore_chain_next *p_next;
+       u32 size;
+
+       size = p_chain->elem_size * p_chain->usable_per_page;
+       p_next = (struct ecore_chain_next *)((u8 *)p_virt_curr + size);
+
+       DMA_REGPAIR_LE(p_next->next_phys, p_phys_next);
+
+       p_next->next_virt = p_virt_next;
+}
+
+/**
+ * @brief ecore_chain_get_last_elem -
+ *
+ * Returns a pointer to the last element of the chain
+ *
+ * @param p_chain
+ *
+ * @return void*
+ */
+static OSAL_INLINE void *ecore_chain_get_last_elem(struct ecore_chain *p_chain)
+{
+       struct ecore_chain_next *p_next = OSAL_NULL;
+       void *p_virt_addr = OSAL_NULL;
+       u32 size, last_page_idx;
+
+       if (!p_chain->p_virt_addr)
+               goto out;
+
+       switch (p_chain->mode) {
+       case ECORE_CHAIN_MODE_NEXT_PTR:
+               size = p_chain->elem_size * p_chain->usable_per_page;
+               p_virt_addr = p_chain->p_virt_addr;
+               p_next = (struct ecore_chain_next *)((u8 *)p_virt_addr + size);
+               while (p_next->next_virt != p_chain->p_virt_addr) {
+                       p_virt_addr = p_next->next_virt;
+                       p_next =
+                           (struct ecore_chain_next *)((u8 *)p_virt_addr +
+                                                       size);
+               }
+               break;
+       case ECORE_CHAIN_MODE_SINGLE:
+               p_virt_addr = p_chain->p_virt_addr;
+               break;
+       case ECORE_CHAIN_MODE_PBL:
+               last_page_idx = p_chain->page_cnt - 1;
+               p_virt_addr = p_chain->pbl.pp_virt_addr_tbl[last_page_idx];
+               break;
+       }
+       /* p_virt_addr points at this stage to the last page of the chain */
+       size = p_chain->elem_size * (p_chain->usable_per_page - 1);
+       p_virt_addr = ((u8 *)p_virt_addr + size);
+out:
+       return p_virt_addr;
+}
+
+/**
+ * @brief ecore_chain_set_prod - sets the prod to the given value
+ *
+ * @param prod_idx
+ * @param p_prod_elem
+ */
+static OSAL_INLINE void ecore_chain_set_prod(struct ecore_chain *p_chain,
+                                            u32 prod_idx, void *p_prod_elem)
+{
+       if (is_chain_u16(p_chain))
+               p_chain->u.chain16.prod_idx = (u16)prod_idx;
+       else
+               p_chain->u.chain32.prod_idx = prod_idx;
+       p_chain->p_prod_elem = p_prod_elem;
+}
+
+/**
+ * @brief ecore_chain_pbl_zero_mem - set chain memory to 0
+ *
+ * @param p_chain
+ */
+static OSAL_INLINE void ecore_chain_pbl_zero_mem(struct ecore_chain *p_chain)
+{
+       u32 i, page_cnt;
+
+       if (p_chain->mode != ECORE_CHAIN_MODE_PBL)
+               return;
+
+       page_cnt = ecore_chain_get_page_cnt(p_chain);
+
+       for (i = 0; i < page_cnt; i++)
+               OSAL_MEM_ZERO(p_chain->pbl.pp_virt_addr_tbl[i],
+                             ECORE_CHAIN_PAGE_SIZE);
+}
+
+#endif /* __ECORE_CHAIN_H__ */
diff --git a/drivers/net/qede/base/ecore_cxt.c b/drivers/net/qede/base/ecore_cxt.c
new file mode 100644 (file)
index 0000000..8436621
--- /dev/null
@@ -0,0 +1,1961 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+#include "reg_addr.h"
+#include "ecore_hsi_common.h"
+#include "ecore_hsi_eth.h"
+#include "ecore_rt_defs.h"
+#include "ecore_status.h"
+#include "ecore.h"
+#include "ecore_init_ops.h"
+#include "ecore_init_fw_funcs.h"
+#include "ecore_cxt.h"
+#include "ecore_hw.h"
+#include "ecore_dev_api.h"
+
+/* Max number of connection types in HW (DQ/CDU etc.) */
+#define MAX_CONN_TYPES         PROTOCOLID_COMMON
+#define NUM_TASK_TYPES         2
+#define NUM_TASK_PF_SEGMENTS   4
+#define NUM_TASK_VF_SEGMENTS   1
+
+/* Doorbell-Queue constants */
+#define DQ_RANGE_SHIFT 4
+#define DQ_RANGE_ALIGN (1 << DQ_RANGE_SHIFT)
+
+/* Searcher constants */
+#define SRC_MIN_NUM_ELEMS 256
+
+/* Timers constants */
+#define TM_SHIFT       7
+#define TM_ALIGN       (1 << TM_SHIFT)
+#define TM_ELEM_SIZE   4
+
+/* ILT constants */
+/* If for some reason, HW P size is modified to be less than 32K,
+ * special handling needs to be made for CDU initialization
+ */
+#define ILT_DEFAULT_HW_P_SIZE  3
+
+#define ILT_PAGE_IN_BYTES(hw_p_size)   (1U << ((hw_p_size) + 12))
+#define ILT_CFG_REG(cli, reg)          PSWRQ2_REG_##cli##_##reg##_RT_OFFSET
+
+/* ILT entry structure */
+#define ILT_ENTRY_PHY_ADDR_MASK                0x000FFFFFFFFFFFULL
+#define ILT_ENTRY_PHY_ADDR_SHIFT       0
+#define ILT_ENTRY_VALID_MASK           0x1ULL
+#define ILT_ENTRY_VALID_SHIFT          52
+#define ILT_ENTRY_IN_REGS              2
+#define ILT_REG_SIZE_IN_BYTES          4
+
+/* connection context union */
+union conn_context {
+       struct core_conn_context core_ctx;
+       struct eth_conn_context eth_ctx;
+};
+
+struct src_ent {
+       u8 opaque[56];
+       u64 next;
+};
+
+#define CDUT_SEG_ALIGNMET 3    /* in 4k chunks */
+#define CDUT_SEG_ALIGNMET_IN_BYTES (1 << (CDUT_SEG_ALIGNMET + 12))
+
+#define CONN_CXT_SIZE(p_hwfn) \
+       ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
+
+/* PF per protocl configuration object */
+#define TASK_SEGMENTS   (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS)
+#define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS)
+
+struct ecore_tid_seg {
+       u32 count;
+       u8 type;
+       bool has_fl_mem;
+};
+
+struct ecore_conn_type_cfg {
+       u32 cid_count;
+       u32 cid_start;
+       u32 cids_per_vf;
+       struct ecore_tid_seg tid_seg[TASK_SEGMENTS];
+};
+
+/* ILT Client configuration,
+ * Per connection type (protocol) resources (cids, tis, vf cids etc.)
+ * 1 - for connection context (CDUC) and for each task context we need two
+ * values, for regular task context and for force load memory
+ */
+#define ILT_CLI_PF_BLOCKS      (1 + NUM_TASK_PF_SEGMENTS * 2)
+#define ILT_CLI_VF_BLOCKS      (1 + NUM_TASK_VF_SEGMENTS * 2)
+#define CDUC_BLK               (0)
+#define CDUT_SEG_BLK(n)                (1 + (u8)(n))
+#define CDUT_FL_SEG_BLK(n, X)  (1 + (n) + NUM_TASK_##X##_SEGMENTS)
+
+enum ilt_clients {
+       ILT_CLI_CDUC,
+       ILT_CLI_CDUT,
+       ILT_CLI_QM,
+       ILT_CLI_TM,
+       ILT_CLI_SRC,
+       ILT_CLI_MAX
+};
+
+struct ilt_cfg_pair {
+       u32 reg;
+       u32 val;
+};
+
+struct ecore_ilt_cli_blk {
+       u32 total_size;         /* 0 means not active */
+       u32 real_size_in_page;
+       u32 start_line;
+       u32 dynamic_line_cnt;
+};
+
+struct ecore_ilt_client_cfg {
+       bool active;
+
+       /* ILT boundaries */
+       struct ilt_cfg_pair first;
+       struct ilt_cfg_pair last;
+       struct ilt_cfg_pair p_size;
+
+       /* ILT client blocks for PF */
+       struct ecore_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
+       u32 pf_total_lines;
+
+       /* ILT client blocks for VFs */
+       struct ecore_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
+       u32 vf_total_lines;
+};
+
+/* Per Path -
+ *      ILT shadow table
+ *      Protocol acquired CID lists
+ *      PF start line in ILT
+ */
+struct ecore_dma_mem {
+       dma_addr_t p_phys;
+       void *p_virt;
+       osal_size_t size;
+};
+
+#define MAP_WORD_SIZE          sizeof(unsigned long)
+#define BITS_PER_MAP_WORD      (MAP_WORD_SIZE * 8)
+
+struct ecore_cid_acquired_map {
+       u32 start_cid;
+       u32 max_count;
+       unsigned long *cid_map;
+};
+
+struct ecore_cxt_mngr {
+       /* Per protocl configuration */
+       struct ecore_conn_type_cfg conn_cfg[MAX_CONN_TYPES];
+
+       /* computed ILT structure */
+       struct ecore_ilt_client_cfg clients[ILT_CLI_MAX];
+
+       /* Task type sizes */
+       u32 task_type_size[NUM_TASK_TYPES];
+
+       /* total number of VFs for this hwfn -
+        * ALL VFs are symmetric in terms of HW resources
+        */
+       u32 vf_count;
+
+       /* Acquired CIDs */
+       struct ecore_cid_acquired_map acquired[MAX_CONN_TYPES];
+
+       /* ILT  shadow table */
+       struct ecore_dma_mem *ilt_shadow;
+       u32 pf_start_line;
+
+       /* SRC T2 */
+       struct ecore_dma_mem *t2;
+       u32 t2_num_pages;
+       u64 first_free;
+       u64 last_free;
+};
+
+/* check if resources/configuration is required according to protocol type */
+static OSAL_INLINE bool src_proto(enum protocol_type type)
+{
+       return type == PROTOCOLID_TOE;
+}
+
+static OSAL_INLINE bool tm_cid_proto(enum protocol_type type)
+{
+       return type == PROTOCOLID_TOE;
+}
+
+/* counts the iids for the CDU/CDUC ILT client configuration */
+struct ecore_cdu_iids {
+       u32 pf_cids;
+       u32 per_vf_cids;
+};
+
+static void ecore_cxt_cdu_iids(struct ecore_cxt_mngr *p_mngr,
+                              struct ecore_cdu_iids *iids)
+{
+       u32 type;
+
+       for (type = 0; type < MAX_CONN_TYPES; type++) {
+               iids->pf_cids += p_mngr->conn_cfg[type].cid_count;
+               iids->per_vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
+       }
+}
+
+/* counts the iids for the Searcher block configuration */
+struct ecore_src_iids {
+       u32 pf_cids;
+       u32 per_vf_cids;
+};
+
+static OSAL_INLINE void ecore_cxt_src_iids(struct ecore_cxt_mngr *p_mngr,
+                                          struct ecore_src_iids *iids)
+{
+       u32 i;
+
+       for (i = 0; i < MAX_CONN_TYPES; i++) {
+               if (!src_proto(i))
+                       continue;
+
+               iids->pf_cids += p_mngr->conn_cfg[i].cid_count;
+               iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf;
+       }
+}
+
+/* counts the iids for the Timers block configuration */
+struct ecore_tm_iids {
+       u32 pf_cids;
+       u32 pf_tids[NUM_TASK_PF_SEGMENTS];      /* per segment */
+       u32 pf_tids_total;
+       u32 per_vf_cids;
+       u32 per_vf_tids;
+};
+
+static OSAL_INLINE void ecore_cxt_tm_iids(struct ecore_cxt_mngr *p_mngr,
+                                         struct ecore_tm_iids *iids)
+{
+       u32 i, j;
+
+       for (i = 0; i < MAX_CONN_TYPES; i++) {
+               struct ecore_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i];
+
+               if (tm_cid_proto(i)) {
+                       iids->pf_cids += p_cfg->cid_count;
+                       iids->per_vf_cids += p_cfg->cids_per_vf;
+               }
+       }
+
+       iids->pf_cids = ROUNDUP(iids->pf_cids, TM_ALIGN);
+       iids->per_vf_cids = ROUNDUP(iids->per_vf_cids, TM_ALIGN);
+       iids->per_vf_tids = ROUNDUP(iids->per_vf_tids, TM_ALIGN);
+
+       for (iids->pf_tids_total = 0, j = 0; j < NUM_TASK_PF_SEGMENTS; j++) {
+               iids->pf_tids[j] = ROUNDUP(iids->pf_tids[j], TM_ALIGN);
+               iids->pf_tids_total += iids->pf_tids[j];
+       }
+}
+
+void ecore_cxt_qm_iids(struct ecore_hwfn *p_hwfn, struct ecore_qm_iids *iids)
+{
+       struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       struct ecore_tid_seg *segs;
+       u32 vf_cids = 0, type, j;
+       u32 vf_tids = 0;
+
+       for (type = 0; type < MAX_CONN_TYPES; type++) {
+               iids->cids += p_mngr->conn_cfg[type].cid_count;
+               vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
+
+               segs = p_mngr->conn_cfg[type].tid_seg;
+               /* for each segment there is at most one
+                * protocol for which count is not 0.
+                */
+               for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
+                       iids->tids += segs[j].count;
+
+               /* The last array elelment is for the VFs. As for PF
+                * segments there can be only one protocol for
+                * which this value is not 0.
+                */
+               vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
+       }
+
+       iids->vf_cids += vf_cids * p_mngr->vf_count;
+       iids->tids += vf_tids * p_mngr->vf_count;
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+                  "iids: CIDS %08x vf_cids %08x tids %08x vf_tids %08x\n",
+                  iids->cids, iids->vf_cids, iids->tids, vf_tids);
+}
+
+static struct ecore_tid_seg *ecore_cxt_tid_seg_info(struct ecore_hwfn *p_hwfn,
+                                                   u32 seg)
+{
+       struct ecore_cxt_mngr *p_cfg = p_hwfn->p_cxt_mngr;
+       u32 i;
+
+       /* Find the protocol with tid count > 0 for this segment.
+        * Note: there can only be one and this is already validated.
+        */
+       for (i = 0; i < MAX_CONN_TYPES; i++) {
+               if (p_cfg->conn_cfg[i].tid_seg[seg].count)
+                       return &p_cfg->conn_cfg[i].tid_seg[seg];
+       }
+       return OSAL_NULL;
+}
+
+/* set the iids (cid/tid) count per protocol */
+void ecore_cxt_set_proto_cid_count(struct ecore_hwfn *p_hwfn,
+                                  enum protocol_type type,
+                                  u32 cid_count, u32 vf_cid_cnt)
+{
+       struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
+       struct ecore_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type];
+
+       p_conn->cid_count = ROUNDUP(cid_count, DQ_RANGE_ALIGN);
+       p_conn->cids_per_vf = ROUNDUP(vf_cid_cnt, DQ_RANGE_ALIGN);
+}
+
+u32 ecore_cxt_get_proto_cid_count(struct ecore_hwfn *p_hwfn,
+                                 enum protocol_type type, u32 *vf_cid)
+{
+       if (vf_cid)
+               *vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf;
+
+       return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
+}
+
+u32 ecore_cxt_get_proto_cid_start(struct ecore_hwfn *p_hwfn,
+                                 enum protocol_type type)
+{
+       return p_hwfn->p_cxt_mngr->acquired[type].start_cid;
+}
+
+static u32 ecore_cxt_get_proto_tid_count(struct ecore_hwfn *p_hwfn,
+                                        enum protocol_type type)
+{
+       u32 cnt = 0;
+       int i;
+
+       for (i = 0; i < TASK_SEGMENTS; i++)
+               cnt += p_hwfn->p_cxt_mngr->conn_cfg[type].tid_seg[i].count;
+
+       return cnt;
+}
+
+static OSAL_INLINE void
+ecore_cxt_set_proto_tid_count(struct ecore_hwfn *p_hwfn,
+                             enum protocol_type proto,
+                             u8 seg, u8 seg_type, u32 count, bool has_fl)
+{
+       struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       struct ecore_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg];
+
+       p_seg->count = count;
+       p_seg->has_fl_mem = has_fl;
+       p_seg->type = seg_type;
+}
+
+/* the *p_line parameter must be either 0 for the first invocation or the
+ * value returned in the previous invocation.
+ */
+static void ecore_ilt_cli_blk_fill(struct ecore_ilt_client_cfg *p_cli,
+                                  struct ecore_ilt_cli_blk *p_blk,
+                                  u32 start_line,
+                                  u32 total_size, u32 elem_size)
+{
+       u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
+
+       /* verfiy called once for each block */
+       if (p_blk->total_size)
+               return;
+
+       p_blk->total_size = total_size;
+       p_blk->real_size_in_page = 0;
+       if (elem_size)
+               p_blk->real_size_in_page = (ilt_size / elem_size) * elem_size;
+       p_blk->start_line = start_line;
+}
+
+static void ecore_ilt_cli_adv_line(struct ecore_hwfn *p_hwfn,
+                                  struct ecore_ilt_client_cfg *p_cli,
+                                  struct ecore_ilt_cli_blk *p_blk,
+                                  u32 *p_line, enum ilt_clients client_id)
+{
+       if (!p_blk->total_size)
+               return;
+
+       if (!p_cli->active)
+               p_cli->first.val = *p_line;
+
+       p_cli->active = true;
+       *p_line += DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page);
+       p_cli->last.val = *p_line - 1;
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+                  "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x [Real %08x] Start line %d\n",
+                  client_id, p_cli->first.val, p_cli->last.val,
+                  p_blk->total_size, p_blk->real_size_in_page,
+                  p_blk->start_line);
+}
+
+static u32 ecore_ilt_get_dynamic_line_cnt(struct ecore_hwfn *p_hwfn,
+                                         enum ilt_clients ilt_client)
+{
+       u32 cid_count = p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE].cid_count;
+       struct ecore_ilt_client_cfg *p_cli;
+       u32 lines_to_skip = 0;
+       u32 cxts_per_p;
+
+       /* TBD MK: ILT code should be simplified once PROTO enum is changed */
+
+       if (ilt_client == ILT_CLI_CDUC) {
+               p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+
+               cxts_per_p = ILT_PAGE_IN_BYTES(p_cli->p_size.val) /
+                   (u32)CONN_CXT_SIZE(p_hwfn);
+
+               lines_to_skip = cid_count / cxts_per_p;
+       }
+
+       return lines_to_skip;
+}
+
+enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       u32 curr_line, total, i, task_size, line;
+       struct ecore_ilt_client_cfg *p_cli;
+       struct ecore_ilt_cli_blk *p_blk;
+       struct ecore_cdu_iids cdu_iids;
+       struct ecore_src_iids src_iids;
+       struct ecore_qm_iids qm_iids;
+       struct ecore_tm_iids tm_iids;
+       struct ecore_tid_seg *p_seg;
+
+       OSAL_MEM_ZERO(&qm_iids, sizeof(qm_iids));
+       OSAL_MEM_ZERO(&cdu_iids, sizeof(cdu_iids));
+       OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
+       OSAL_MEM_ZERO(&tm_iids, sizeof(tm_iids));
+
+       p_mngr->pf_start_line = RESC_START(p_hwfn, ECORE_ILT);
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+                  "hwfn [%d] - Set context manager starting line to be 0x%08x\n",
+                  p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
+
+       /* CDUC */
+       p_cli = &p_mngr->clients[ILT_CLI_CDUC];
+
+       curr_line = p_mngr->pf_start_line;
+
+       /* CDUC PF */
+       p_cli->pf_total_lines = 0;
+
+       /* get the counters for the CDUC,CDUC and QM clients  */
+       ecore_cxt_cdu_iids(p_mngr, &cdu_iids);
+
+       p_blk = &p_cli->pf_blks[CDUC_BLK];
+
+       total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn);
+
+       ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+                              total, CONN_CXT_SIZE(p_hwfn));
+
+       ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
+       p_cli->pf_total_lines = curr_line - p_blk->start_line;
+
+       p_blk->dynamic_line_cnt = ecore_ilt_get_dynamic_line_cnt(p_hwfn,
+                                                                ILT_CLI_CDUC);
+
+       /* CDUC VF */
+       p_blk = &p_cli->vf_blks[CDUC_BLK];
+       total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn);
+
+       ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+                              total, CONN_CXT_SIZE(p_hwfn));
+
+       ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
+       p_cli->vf_total_lines = curr_line - p_blk->start_line;
+
+       for (i = 1; i < p_mngr->vf_count; i++)
+               ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+                                      ILT_CLI_CDUC);
+
+       /* CDUT PF */
+       p_cli = &p_mngr->clients[ILT_CLI_CDUT];
+       p_cli->first.val = curr_line;
+
+       /* first the 'working' task memory */
+       for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+               p_seg = ecore_cxt_tid_seg_info(p_hwfn, i);
+               if (!p_seg || p_seg->count == 0)
+                       continue;
+
+               p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(i)];
+               total = p_seg->count * p_mngr->task_type_size[p_seg->type];
+               ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total,
+                                      p_mngr->task_type_size[p_seg->type]);
+
+               ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+                                      ILT_CLI_CDUT);
+       }
+
+       /* next the 'init' task memory (forced load memory) */
+       for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+               p_seg = ecore_cxt_tid_seg_info(p_hwfn, i);
+               if (!p_seg || p_seg->count == 0)
+                       continue;
+
+               p_blk = &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)];
+
+               if (!p_seg->has_fl_mem) {
+                       /* The segment is active (total size pf 'working'
+                        * memory is > 0) but has no FL (forced-load, Init)
+                        * memory. Thus:
+                        *
+                        * 1.   The total-size in the corrsponding FL block of
+                        *      the ILT client is set to 0 - No ILT line are
+                        *      provisioned and no ILT memory allocated.
+                        *
+                        * 2.   The start-line of said block is set to the
+                        *      start line of the matching working memory
+                        *      block in the ILT client. This is later used to
+                        *      configure the CDU segment offset registers and
+                        *      results in an FL command for TIDs of this
+                        *      segment behaves as regular load commands
+                        *      (loading TIDs from the working memory).
+                        */
+                       line = p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line;
+
+                       ecore_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
+                       continue;
+               }
+               total = p_seg->count * p_mngr->task_type_size[p_seg->type];
+
+               ecore_ilt_cli_blk_fill(p_cli, p_blk,
+                                      curr_line, total,
+                                      p_mngr->task_type_size[p_seg->type]);
+
+               ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+                                      ILT_CLI_CDUT);
+       }
+       p_cli->pf_total_lines = curr_line - p_cli->pf_blks[0].start_line;
+
+       /* CDUT VF */
+       p_seg = ecore_cxt_tid_seg_info(p_hwfn, TASK_SEGMENT_VF);
+       if (p_seg && p_seg->count) {
+               /* Stricly speaking we need to iterate over all VF
+                * task segment types, but a VF has only 1 segment
+                */
+
+               /* 'working' memory */
+               total = p_seg->count * p_mngr->task_type_size[p_seg->type];
+
+               p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
+               ecore_ilt_cli_blk_fill(p_cli, p_blk,
+                                      curr_line, total,
+                                      p_mngr->task_type_size[p_seg->type]);
+
+               ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+                                      ILT_CLI_CDUT);
+
+               /* 'init' memory */
+               p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
+               if (!p_seg->has_fl_mem) {
+                       /* see comment above */
+                       line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line;
+                       ecore_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
+               } else {
+                       task_size = p_mngr->task_type_size[p_seg->type];
+                       ecore_ilt_cli_blk_fill(p_cli, p_blk,
+                                              curr_line, total, task_size);
+                       ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+                                              ILT_CLI_CDUT);
+               }
+               p_cli->vf_total_lines = curr_line -
+                   p_cli->vf_blks[0].start_line;
+
+               /* Now for the rest of the VFs */
+               for (i = 1; i < p_mngr->vf_count; i++) {
+                       p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
+                       ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+                                              ILT_CLI_CDUT);
+
+                       p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
+                       ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+                                              ILT_CLI_CDUT);
+               }
+       }
+
+       /* QM */
+       p_cli = &p_mngr->clients[ILT_CLI_QM];
+       p_blk = &p_cli->pf_blks[0];
+
+       ecore_cxt_qm_iids(p_hwfn, &qm_iids);
+       total = ecore_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids,
+                                    qm_iids.vf_cids, qm_iids.tids,
+                                    p_hwfn->qm_info.num_pqs,
+                                    p_hwfn->qm_info.num_vf_pqs);
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+                  "QM ILT Info, (cids=%d, vf_cids=%d, tids=%d, num_pqs=%d,"
+                  " num_vf_pqs=%d, memory_size=%d)\n",
+                  qm_iids.cids, qm_iids.vf_cids, qm_iids.tids,
+                  p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total);
+
+       ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total * 0x1000,
+                              QM_PQ_ELEMENT_SIZE);
+
+       ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM);
+       p_cli->pf_total_lines = curr_line - p_blk->start_line;
+
+       /* SRC */
+       p_cli = &p_mngr->clients[ILT_CLI_SRC];
+       ecore_cxt_src_iids(p_mngr, &src_iids);
+
+       /* Both the PF and VFs searcher connections are stored in the per PF
+        * database. Thus sum the PF searcher cids and all the VFs searcher
+        * cids.
+        */
+       total = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
+       if (total) {
+               u32 local_max = OSAL_MAX_T(u32, total,
+                                          SRC_MIN_NUM_ELEMS);
+
+               total = OSAL_ROUNDUP_POW_OF_TWO(local_max);
+
+               p_blk = &p_cli->pf_blks[0];
+               ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+                                      total * sizeof(struct src_ent),
+                                      sizeof(struct src_ent));
+
+               ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+                                      ILT_CLI_SRC);
+               p_cli->pf_total_lines = curr_line - p_blk->start_line;
+       }
+
+       /* TM PF */
+       p_cli = &p_mngr->clients[ILT_CLI_TM];
+       ecore_cxt_tm_iids(p_mngr, &tm_iids);
+       total = tm_iids.pf_cids + tm_iids.pf_tids_total;
+       if (total) {
+               p_blk = &p_cli->pf_blks[0];
+               ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+                                      total * TM_ELEM_SIZE, TM_ELEM_SIZE);
+
+               ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+                                      ILT_CLI_TM);
+               p_cli->pf_total_lines = curr_line - p_blk->start_line;
+       }
+
+       /* TM VF */
+       total = tm_iids.per_vf_cids + tm_iids.per_vf_tids;
+       if (total) {
+               p_blk = &p_cli->vf_blks[0];
+               ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+                                      total * TM_ELEM_SIZE, TM_ELEM_SIZE);
+
+               ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+                                      ILT_CLI_TM);
+               p_cli->pf_total_lines = curr_line - p_blk->start_line;
+
+               for (i = 1; i < p_mngr->vf_count; i++) {
+                       ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+                                              ILT_CLI_TM);
+               }
+       }
+
+       if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
+           RESC_NUM(p_hwfn, ECORE_ILT)) {
+               DP_ERR(p_hwfn, "too many ilt lines...#lines=%d\n",
+                      curr_line - p_hwfn->p_cxt_mngr->pf_start_line);
+               return ECORE_INVAL;
+       }
+
+       return ECORE_SUCCESS;
+}
+
+static void ecore_cxt_src_t2_free(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       u32 i;
+
+       if (!p_mngr->t2)
+               return;
+
+       for (i = 0; i < p_mngr->t2_num_pages; i++)
+               if (p_mngr->t2[i].p_virt)
+                       OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+                                              p_mngr->t2[i].p_virt,
+                                              p_mngr->t2[i].p_phys,
+                                              p_mngr->t2[i].size);
+
+       OSAL_FREE(p_hwfn->p_dev, p_mngr->t2);
+       p_mngr->t2 = OSAL_NULL;
+}
+
+static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       u32 conn_num, total_size, ent_per_page, psz, i;
+       struct ecore_ilt_client_cfg *p_src;
+       struct ecore_src_iids src_iids;
+       struct ecore_dma_mem *p_t2;
+       enum _ecore_status_t rc;
+
+       OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
+
+       /* if the SRC ILT client is inactive - there are no connection
+        * requiring the searcer, leave.
+        */
+       p_src = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_SRC];
+       if (!p_src->active)
+               return ECORE_SUCCESS;
+
+       ecore_cxt_src_iids(p_mngr, &src_iids);
+       conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
+       total_size = conn_num * sizeof(struct src_ent);
+
+       /* use the same page size as the SRC ILT client */
+       psz = ILT_PAGE_IN_BYTES(p_src->p_size.val);
+       p_mngr->t2_num_pages = DIV_ROUND_UP(total_size, psz);
+
+       /* allocate t2 */
+       p_mngr->t2 = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+                                p_mngr->t2_num_pages *
+                                sizeof(struct ecore_dma_mem));
+       if (!p_mngr->t2) {
+               DP_NOTICE(p_hwfn, true, "Failed to allocate t2 table\n");
+               rc = ECORE_NOMEM;
+               goto t2_fail;
+       }
+
+       /* allocate t2 pages */
+       for (i = 0; i < p_mngr->t2_num_pages; i++) {
+               u32 size = OSAL_MIN_T(u32, total_size, psz);
+               void **p_virt = &p_mngr->t2[i].p_virt;
+
+               *p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+                                                 &p_mngr->t2[i].p_phys, size);
+               if (!p_mngr->t2[i].p_virt) {
+                       rc = ECORE_NOMEM;
+                       goto t2_fail;
+               }
+               OSAL_MEM_ZERO(*p_virt, size);
+               p_mngr->t2[i].size = size;
+               total_size -= size;
+       }
+
+       /* Set the t2 pointers */
+
+       /* entries per page - must be a power of two */
+       ent_per_page = psz / sizeof(struct src_ent);
+
+       p_mngr->first_free = (u64)p_mngr->t2[0].p_phys;
+
+       p_t2 = &p_mngr->t2[(conn_num - 1) / ent_per_page];
+       p_mngr->last_free = (u64)p_t2->p_phys +
+           ((conn_num - 1) & (ent_per_page - 1)) * sizeof(struct src_ent);
+
+       for (i = 0; i < p_mngr->t2_num_pages; i++) {
+               u32 ent_num = OSAL_MIN_T(u32, ent_per_page, conn_num);
+               struct src_ent *entries = p_mngr->t2[i].p_virt;
+               u64 p_ent_phys = (u64)p_mngr->t2[i].p_phys, val;
+               u32 j;
+
+               for (j = 0; j < ent_num - 1; j++) {
+                       val = p_ent_phys + (j + 1) * sizeof(struct src_ent);
+                       entries[j].next = OSAL_CPU_TO_BE64(val);
+               }
+
+               if (i < p_mngr->t2_num_pages - 1)
+                       val = (u64)p_mngr->t2[i + 1].p_phys;
+               else
+                       val = 0;
+               entries[j].next = OSAL_CPU_TO_BE64(val);
+
+               conn_num -= ent_per_page;
+       }
+
+       return ECORE_SUCCESS;
+
+t2_fail:
+       ecore_cxt_src_t2_free(p_hwfn);
+       return rc;
+}
+
+/* Total number of ILT lines used by this PF */
+static u32 ecore_cxt_ilt_shadow_size(struct ecore_ilt_client_cfg *ilt_clients)
+{
+       u32 size = 0;
+       u32 i;
+
+       for (i = 0; i < ILT_CLI_MAX; i++)
+               if (!ilt_clients[i].active)
+                       continue;
+               else
+                       size += (ilt_clients[i].last.val -
+                               ilt_clients[i].first.val + 1);
+
+       return size;
+}
+
+static void ecore_ilt_shadow_free(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_ilt_client_cfg *p_cli = p_hwfn->p_cxt_mngr->clients;
+       struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       u32 ilt_size, i;
+
+       ilt_size = ecore_cxt_ilt_shadow_size(p_cli);
+
+       for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
+               struct ecore_dma_mem *p_dma = &p_mngr->ilt_shadow[i];
+
+               if (p_dma->p_virt)
+                       OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+                                              p_dma->p_virt,
+                                              p_dma->p_phys, p_dma->size);
+               p_dma->p_virt = OSAL_NULL;
+       }
+       OSAL_FREE(p_hwfn->p_dev, p_mngr->ilt_shadow);
+}
+
+static enum _ecore_status_t
+ecore_ilt_blk_alloc(struct ecore_hwfn *p_hwfn,
+                   struct ecore_ilt_cli_blk *p_blk,
+                   enum ilt_clients ilt_client, u32 start_line_offset)
+{
+       struct ecore_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
+       u32 lines, line, sz_left, lines_to_skip = 0;
+
+       /* Special handling for RoCE that supports dynamic allocation */
+       if (ilt_client == ILT_CLI_CDUT)
+               return ECORE_SUCCESS;
+
+       lines_to_skip = p_blk->dynamic_line_cnt;
+
+       if (!p_blk->total_size)
+               return ECORE_SUCCESS;
+
+       sz_left = p_blk->total_size;
+       lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page) - lines_to_skip;
+       line = p_blk->start_line + start_line_offset -
+           p_hwfn->p_cxt_mngr->pf_start_line + lines_to_skip;
+
+       for (; lines; lines--) {
+               dma_addr_t p_phys;
+               void *p_virt;
+               u32 size;
+
+               size = OSAL_MIN_T(u32, sz_left, p_blk->real_size_in_page);
+
+/* @DPDK */
+#define ILT_BLOCK_ALIGN_SIZE 0x1000
+               p_virt = OSAL_DMA_ALLOC_COHERENT_ALIGNED(p_hwfn->p_dev,
+                                                        &p_phys, size,
+                                                        ILT_BLOCK_ALIGN_SIZE);
+               if (!p_virt)
+                       return ECORE_NOMEM;
+               OSAL_MEM_ZERO(p_virt, size);
+
+               ilt_shadow[line].p_phys = p_phys;
+               ilt_shadow[line].p_virt = p_virt;
+               ilt_shadow[line].size = size;
+
+               DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+                          "ILT shadow: Line [%d] Physical 0x%lx "
+                          "Virtual %p Size %d\n",
+                          line, (u64)p_phys, p_virt, size);
+
+               sz_left -= size;
+               line++;
+       }
+
+       return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_ilt_shadow_alloc(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       struct ecore_ilt_client_cfg *clients = p_mngr->clients;
+       struct ecore_ilt_cli_blk *p_blk;
+       enum _ecore_status_t rc;
+       u32 size, i, j, k;
+
+       size = ecore_cxt_ilt_shadow_size(clients);
+       p_mngr->ilt_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+                                        size * sizeof(struct ecore_dma_mem));
+
+       if (!p_mngr->ilt_shadow) {
+               DP_NOTICE(p_hwfn, true, "Failed to allocate ilt shadow table");
+               rc = ECORE_NOMEM;
+               goto ilt_shadow_fail;
+       }
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+                  "Allocated 0x%x bytes for ilt shadow\n",
+                  (u32)(size * sizeof(struct ecore_dma_mem)));
+
+       for (i = 0; i < ILT_CLI_MAX; i++)
+               if (!clients[i].active) {
+                       continue;
+               } else {
+               for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
+                       p_blk = &clients[i].pf_blks[j];
+                       rc = ecore_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
+                       if (rc != ECORE_SUCCESS)
+                               goto ilt_shadow_fail;
+               }
+               for (k = 0; k < p_mngr->vf_count; k++) {
+                       for (j = 0; j < ILT_CLI_VF_BLOCKS; j++) {
+                               u32 lines = clients[i].vf_total_lines * k;
+
+                               p_blk = &clients[i].vf_blks[j];
+                               rc = ecore_ilt_blk_alloc(p_hwfn, p_blk,
+                                                        i, lines);
+                               if (rc != ECORE_SUCCESS)
+                                       goto ilt_shadow_fail;
+                       }
+               }
+       }
+
+       return ECORE_SUCCESS;
+
+ilt_shadow_fail:
+       ecore_ilt_shadow_free(p_hwfn);
+       return rc;
+}
+
+static void ecore_cid_map_free(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       u32 type;
+
+       for (type = 0; type < MAX_CONN_TYPES; type++) {
+               OSAL_FREE(p_hwfn->p_dev, p_mngr->acquired[type].cid_map);
+               p_mngr->acquired[type].max_count = 0;
+               p_mngr->acquired[type].start_cid = 0;
+       }
+}
+
+static enum _ecore_status_t ecore_cid_map_alloc(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       u32 start_cid = 0;
+       u32 type;
+
+       for (type = 0; type < MAX_CONN_TYPES; type++) {
+               u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
+               u32 size;
+
+               if (cid_cnt == 0)
+                       continue;
+
+               size = MAP_WORD_SIZE * DIV_ROUND_UP(cid_cnt, BITS_PER_MAP_WORD);
+               p_mngr->acquired[type].cid_map = OSAL_ZALLOC(p_hwfn->p_dev,
+                                                            GFP_KERNEL, size);
+               if (!p_mngr->acquired[type].cid_map)
+                       goto cid_map_fail;
+
+               p_mngr->acquired[type].max_count = cid_cnt;
+               p_mngr->acquired[type].start_cid = start_cid;
+
+               p_hwfn->p_cxt_mngr->conn_cfg[type].cid_start = start_cid;
+
+               DP_VERBOSE(p_hwfn, ECORE_MSG_CXT,
+                          "Type %08x start: %08x count %08x\n",
+                          type, p_mngr->acquired[type].start_cid,
+                          p_mngr->acquired[type].max_count);
+               start_cid += cid_cnt;
+       }
+
+       return ECORE_SUCCESS;
+
+cid_map_fail:
+       ecore_cid_map_free(p_hwfn);
+       return ECORE_NOMEM;
+}
+
+enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_cxt_mngr *p_mngr;
+       u32 i;
+
+       p_mngr = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_mngr));
+       if (!p_mngr) {
+               DP_NOTICE(p_hwfn, true,
+                         "Failed to allocate `struct ecore_cxt_mngr'\n");
+               return ECORE_NOMEM;
+       }
+
+       /* Initialize ILT client registers */
+       p_mngr->clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
+       p_mngr->clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT);
+       p_mngr->clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);
+
+       p_mngr->clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT);
+       p_mngr->clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT);
+       p_mngr->clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE);
+
+       p_mngr->clients[ILT_CLI_TM].first.reg = ILT_CFG_REG(TM, FIRST_ILT);
+       p_mngr->clients[ILT_CLI_TM].last.reg = ILT_CFG_REG(TM, LAST_ILT);
+       p_mngr->clients[ILT_CLI_TM].p_size.reg = ILT_CFG_REG(TM, P_SIZE);
+
+       p_mngr->clients[ILT_CLI_SRC].first.reg = ILT_CFG_REG(SRC, FIRST_ILT);
+       p_mngr->clients[ILT_CLI_SRC].last.reg = ILT_CFG_REG(SRC, LAST_ILT);
+       p_mngr->clients[ILT_CLI_SRC].p_size.reg = ILT_CFG_REG(SRC, P_SIZE);
+
+       p_mngr->clients[ILT_CLI_CDUT].first.reg = ILT_CFG_REG(CDUT, FIRST_ILT);
+       p_mngr->clients[ILT_CLI_CDUT].last.reg = ILT_CFG_REG(CDUT, LAST_ILT);
+       p_mngr->clients[ILT_CLI_CDUT].p_size.reg = ILT_CFG_REG(CDUT, P_SIZE);
+
+       /* default ILT page size for all clients is 32K */
+       for (i = 0; i < ILT_CLI_MAX; i++)
+               p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
+
+       /* Initialize task sizes */
+       p_mngr->task_type_size[0] = 512;        /* @DPDK */
+       p_mngr->task_type_size[1] = 128;        /* @DPDK */
+
+       p_mngr->vf_count = p_hwfn->p_dev->sriov_info.total_vfs;
+       /* Set the cxt mangr pointer priori to further allocations */
+       p_hwfn->p_cxt_mngr = p_mngr;
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_cxt_tables_alloc(struct ecore_hwfn *p_hwfn)
+{
+       enum _ecore_status_t rc;
+
+       /* Allocate the ILT shadow table */
+       rc = ecore_ilt_shadow_alloc(p_hwfn);
+       if (rc) {
+               DP_NOTICE(p_hwfn, true, "Failed to allocate ilt memory\n");
+               goto tables_alloc_fail;
+       }
+
+       /* Allocate the T2  table */
+       rc = ecore_cxt_src_t2_alloc(p_hwfn);
+       if (rc) {
+               DP_NOTICE(p_hwfn, true, "Failed to allocate T2 memory\n");
+               goto tables_alloc_fail;
+       }
+
+       /* Allocate and initialize the acquired cids bitmaps */
+       rc = ecore_cid_map_alloc(p_hwfn);
+       if (rc) {
+               DP_NOTICE(p_hwfn, true, "Failed to allocate cid maps\n");
+               goto tables_alloc_fail;
+       }
+
+       return ECORE_SUCCESS;
+
+tables_alloc_fail:
+       ecore_cxt_mngr_free(p_hwfn);
+       return rc;
+}
+
+void ecore_cxt_mngr_free(struct ecore_hwfn *p_hwfn)
+{
+       if (!p_hwfn->p_cxt_mngr)
+               return;
+
+       ecore_cid_map_free(p_hwfn);
+       ecore_cxt_src_t2_free(p_hwfn);
+       ecore_ilt_shadow_free(p_hwfn);
+       OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_cxt_mngr);
+
+       p_hwfn->p_cxt_mngr = OSAL_NULL;
+}
+
+void ecore_cxt_mngr_setup(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       int type;
+
+       /* Reset acquired cids */
+       for (type = 0; type < MAX_CONN_TYPES; type++) {
+               u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
+               u32 i;
+
+               if (cid_cnt == 0)
+                       continue;
+
+               for (i = 0; i < DIV_ROUND_UP(cid_cnt, BITS_PER_MAP_WORD); i++)
+                       p_mngr->acquired[type].cid_map[i] = 0;
+       }
+}
+
+/* HW initialization helper (per Block, per phase) */
+
+/* CDU Common */
+#define CDUC_CXT_SIZE_SHIFT                                            \
+       CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT
+
+#define CDUC_CXT_SIZE_MASK                                             \
+       (CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE >> CDUC_CXT_SIZE_SHIFT)
+
+#define CDUC_BLOCK_WASTE_SHIFT                                         \
+       CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT
+
+#define CDUC_BLOCK_WASTE_MASK                                          \
+       (CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE >> CDUC_BLOCK_WASTE_SHIFT)
+
+#define CDUC_NCIB_SHIFT                                                        \
+       CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT
+
+#define CDUC_NCIB_MASK                                                 \
+       (CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT)
+
+#define CDUT_TYPE0_CXT_SIZE_SHIFT                                      \
+       CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT
+
+#define CDUT_TYPE0_CXT_SIZE_MASK                                       \
+       (CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE >>                         \
+       CDUT_TYPE0_CXT_SIZE_SHIFT)
+
+#define CDUT_TYPE0_BLOCK_WASTE_SHIFT                                   \
+       CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT
+
+#define CDUT_TYPE0_BLOCK_WASTE_MASK                                    \
+       (CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE >>                  \
+       CDUT_TYPE0_BLOCK_WASTE_SHIFT)
+
+#define CDUT_TYPE0_NCIB_SHIFT                                          \
+       CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT
+
+#define CDUT_TYPE0_NCIB_MASK                                           \
+       (CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK >>                \
+       CDUT_TYPE0_NCIB_SHIFT)
+
+#define CDUT_TYPE1_CXT_SIZE_SHIFT                                      \
+       CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT
+
+#define CDUT_TYPE1_CXT_SIZE_MASK                                       \
+       (CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE >>                         \
+       CDUT_TYPE1_CXT_SIZE_SHIFT)
+
+#define CDUT_TYPE1_BLOCK_WASTE_SHIFT                                   \
+       CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT
+
+#define CDUT_TYPE1_BLOCK_WASTE_MASK                                    \
+       (CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE >>                  \
+       CDUT_TYPE1_BLOCK_WASTE_SHIFT)
+
+#define CDUT_TYPE1_NCIB_SHIFT                                          \
+       CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT
+
+#define CDUT_TYPE1_NCIB_MASK                                           \
+       (CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK >>                \
+       CDUT_TYPE1_NCIB_SHIFT)
+
+static void ecore_cdu_init_common(struct ecore_hwfn *p_hwfn)
+{
+       u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0;
+
+       /* CDUC - connection configuration */
+       page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
+       cxt_size = CONN_CXT_SIZE(p_hwfn);
+       elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+       block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
+
+       SET_FIELD(cdu_params, CDUC_CXT_SIZE, cxt_size);
+       SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste);
+       SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page);
+       STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params);
+
+       /* CDUT - type-0 tasks configuration */
+       page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT].p_size.val;
+       cxt_size = p_hwfn->p_cxt_mngr->task_type_size[0];
+       elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+       block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
+
+       /* cxt size and block-waste are multipes of 8 */
+       cdu_params = 0;
+       SET_FIELD(cdu_params, CDUT_TYPE0_CXT_SIZE, (cxt_size >> 3));
+       SET_FIELD(cdu_params, CDUT_TYPE0_BLOCK_WASTE, (block_waste >> 3));
+       SET_FIELD(cdu_params, CDUT_TYPE0_NCIB, elems_per_page);
+       STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT0_PARAMS_RT_OFFSET, cdu_params);
+
+       /* CDUT - type-1 tasks configuration */
+       cxt_size = p_hwfn->p_cxt_mngr->task_type_size[1];
+       elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+       block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
+
+       /* cxt size and block-waste are multipes of 8 */
+       cdu_params = 0;
+       SET_FIELD(cdu_params, CDUT_TYPE1_CXT_SIZE, (cxt_size >> 3));
+       SET_FIELD(cdu_params, CDUT_TYPE1_BLOCK_WASTE, (block_waste >> 3));
+       SET_FIELD(cdu_params, CDUT_TYPE1_NCIB, elems_per_page);
+       STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT1_PARAMS_RT_OFFSET, cdu_params);
+}
+
+/* CDU PF */
+#define CDU_SEG_REG_TYPE_SHIFT         CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT
+#define CDU_SEG_REG_TYPE_MASK          0x1
+#define CDU_SEG_REG_OFFSET_SHIFT       0
+#define CDU_SEG_REG_OFFSET_MASK                CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK
+
+static void ecore_cdu_init_pf(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_ilt_client_cfg *p_cli;
+       struct ecore_tid_seg *p_seg;
+       u32 cdu_seg_params, offset;
+       int i;
+
+       static const u32 rt_type_offset_arr[] = {
+               CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET,
+               CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET,
+               CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET,
+               CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET
+       };
+
+       static const u32 rt_type_offset_fl_arr[] = {
+               CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET,
+               CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET,
+               CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET,
+               CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET
+       };
+
+       p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+
+       /* There are initializations only for CDUT during pf Phase */
+       for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+               /* Segment 0 */
+               p_seg = ecore_cxt_tid_seg_info(p_hwfn, i);
+               if (!p_seg)
+                       continue;
+
+               /* Note: start_line is already adjusted for the CDU
+                * segment register granularity, so we just need to
+                * divide. Adjustment is implicit as we assume ILT
+                * Page size is larger than 32K!
+                */
+               offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
+                         (p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line -
+                          p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
+
+               cdu_seg_params = 0;
+               SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
+               SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
+               STORE_RT_REG(p_hwfn, rt_type_offset_arr[i], cdu_seg_params);
+
+               offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
+                         (p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)].start_line -
+                          p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
+
+               cdu_seg_params = 0;
+               SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
+               SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
+               STORE_RT_REG(p_hwfn, rt_type_offset_fl_arr[i], cdu_seg_params);
+       }
+}
+
+void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+       struct ecore_qm_iids iids;
+
+       OSAL_MEM_ZERO(&iids, sizeof(iids));
+       ecore_cxt_qm_iids(p_hwfn, &iids);
+
+       ecore_qm_pf_rt_init(p_hwfn, p_hwfn->p_main_ptt, p_hwfn->port_id,
+                           p_hwfn->rel_pf_id, qm_info->max_phys_tcs_per_port,
+                           p_hwfn->first_on_engine,
+                           iids.cids, iids.vf_cids, iids.tids,
+                           qm_info->start_pq,
+                           qm_info->num_pqs - qm_info->num_vf_pqs,
+                           qm_info->num_vf_pqs,
+                           qm_info->start_vport,
+                           qm_info->num_vports, qm_info->pf_wfq,
+                           qm_info->pf_rl, p_hwfn->qm_info.qm_pq_params,
+                           p_hwfn->qm_info.qm_vport_params);
+}
+
+/* CM PF */
+static enum _ecore_status_t ecore_cm_init_pf(struct ecore_hwfn *p_hwfn)
+{
+       union ecore_qm_pq_params pq_params;
+       u16 pq;
+
+       /* XCM pure-LB queue */
+       OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
+       pq_params.core.tc = LB_TC;
+       pq = ecore_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
+       STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET, pq);
+
+       return ECORE_SUCCESS;
+}
+
+/* DQ PF */
+static void ecore_dq_init_pf(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       u32 dq_pf_max_cid = 0, dq_vf_max_cid = 0;
+
+       dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT);
+       STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid);
+
+       dq_vf_max_cid += (p_mngr->conn_cfg[0].cids_per_vf >> DQ_RANGE_SHIFT);
+       STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_0_RT_OFFSET, dq_vf_max_cid);
+
+       dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT);
+       STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid);
+
+       dq_vf_max_cid += (p_mngr->conn_cfg[1].cids_per_vf >> DQ_RANGE_SHIFT);
+       STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_1_RT_OFFSET, dq_vf_max_cid);
+
+       dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT);
+       STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid);
+
+       dq_vf_max_cid += (p_mngr->conn_cfg[2].cids_per_vf >> DQ_RANGE_SHIFT);
+       STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_2_RT_OFFSET, dq_vf_max_cid);
+
+       dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT);
+       STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid);
+
+       dq_vf_max_cid += (p_mngr->conn_cfg[3].cids_per_vf >> DQ_RANGE_SHIFT);
+       STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_3_RT_OFFSET, dq_vf_max_cid);
+
+       dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT);
+       STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid);
+
+       dq_vf_max_cid += (p_mngr->conn_cfg[4].cids_per_vf >> DQ_RANGE_SHIFT);
+       STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_4_RT_OFFSET, dq_vf_max_cid);
+
+       dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT);
+       STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid);
+
+       dq_vf_max_cid += (p_mngr->conn_cfg[5].cids_per_vf >> DQ_RANGE_SHIFT);
+       STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_5_RT_OFFSET, dq_vf_max_cid);
+
+       /* Connection types 6 & 7 are not in use, yet they must be configured
+        * as the highest possible connection. Not configuring them means the
+        * defaults will be  used, and with a large number of cids a bug may
+        * occur, if the defaults will be smaller than dq_pf_max_cid /
+        * dq_vf_max_cid.
+        */
+       STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_6_RT_OFFSET, dq_pf_max_cid);
+       STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_6_RT_OFFSET, dq_vf_max_cid);
+
+       STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_7_RT_OFFSET, dq_pf_max_cid);
+       STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_7_RT_OFFSET, dq_vf_max_cid);
+}
+
+static void ecore_ilt_bounds_init(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_ilt_client_cfg *ilt_clients;
+       int i;
+
+       ilt_clients = p_hwfn->p_cxt_mngr->clients;
+       for (i = 0; i < ILT_CLI_MAX; i++)
+               if (!ilt_clients[i].active) {
+                       continue;
+               } else {
+               STORE_RT_REG(p_hwfn,
+                            ilt_clients[i].first.reg,
+                            ilt_clients[i].first.val);
+               STORE_RT_REG(p_hwfn,
+                            ilt_clients[i].last.reg, ilt_clients[i].last.val);
+               STORE_RT_REG(p_hwfn,
+                            ilt_clients[i].p_size.reg,
+                            ilt_clients[i].p_size.val);
+       }
+}
+
+static void ecore_ilt_vf_bounds_init(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_ilt_client_cfg *p_cli;
+       u32 blk_factor;
+
+       /* For simplicty  we set the 'block' to be an ILT page */
+       STORE_RT_REG(p_hwfn,
+                    PSWRQ2_REG_VF_BASE_RT_OFFSET,
+                    p_hwfn->hw_info.first_vf_in_pf);
+       STORE_RT_REG(p_hwfn,
+                    PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET,
+                    p_hwfn->hw_info.first_vf_in_pf +
+                    p_hwfn->p_dev->sriov_info.total_vfs);
+
+       p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+       blk_factor = OSAL_LOG2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
+       if (p_cli->active) {
+               STORE_RT_REG(p_hwfn,
+                            PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET,
+                            blk_factor);
+               STORE_RT_REG(p_hwfn,
+                            PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
+                            p_cli->pf_total_lines);
+               STORE_RT_REG(p_hwfn,
+                            PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET,
+                            p_cli->vf_total_lines);
+       }
+
+       p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+       blk_factor = OSAL_LOG2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
+       if (p_cli->active) {
+               STORE_RT_REG(p_hwfn,
+                            PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET,
+                            blk_factor);
+               STORE_RT_REG(p_hwfn,
+                            PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
+                            p_cli->pf_total_lines);
+               STORE_RT_REG(p_hwfn,
+                            PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET,
+                            p_cli->vf_total_lines);
+       }
+
+       p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TM];
+       blk_factor = OSAL_LOG2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
+       if (p_cli->active) {
+               STORE_RT_REG(p_hwfn,
+                            PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET, blk_factor);
+               STORE_RT_REG(p_hwfn,
+                            PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
+                            p_cli->pf_total_lines);
+               STORE_RT_REG(p_hwfn,
+                            PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET,
+                            p_cli->vf_total_lines);
+       }
+}
+
+/* ILT (PSWRQ2) PF */
+static void ecore_ilt_init_pf(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_ilt_client_cfg *clients;
+       struct ecore_cxt_mngr *p_mngr;
+       struct ecore_dma_mem *p_shdw;
+       u32 line, rt_offst, i;
+
+       ecore_ilt_bounds_init(p_hwfn);
+       ecore_ilt_vf_bounds_init(p_hwfn);
+
+       p_mngr = p_hwfn->p_cxt_mngr;
+       p_shdw = p_mngr->ilt_shadow;
+       clients = p_hwfn->p_cxt_mngr->clients;
+
+       for (i = 0; i < ILT_CLI_MAX; i++)
+               if (!clients[i].active) {
+                       continue;
+               } else {
+               /* Client's 1st val and RT array are absolute, ILT shadows'
+                * lines are relative.
+                */
+               line = clients[i].first.val - p_mngr->pf_start_line;
+               rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET +
+                   clients[i].first.val * ILT_ENTRY_IN_REGS;
+
+               for (; line <= clients[i].last.val - p_mngr->pf_start_line;
+                    line++, rt_offst += ILT_ENTRY_IN_REGS) {
+                       u64 ilt_hw_entry = 0;
+
+                       /** p_virt could be OSAL_NULL incase of dynamic
+                        *  allocation
+                        */
+                       if (p_shdw[line].p_virt != OSAL_NULL) {
+                               SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
+                               SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
+                                         (p_shdw[line].p_phys >> 12));
+
+                               DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+                                       "Setting RT[0x%08x] from"
+                                       " ILT[0x%08x] [Client is %d] to"
+                                       " Physical addr: 0x%lx\n",
+                                       rt_offst, line, i,
+                                       (u64)(p_shdw[line].p_phys >> 12));
+                       }
+
+                       STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
+               }
+       }
+}
+
+/* SRC (Searcher) PF */
+static void ecore_src_init_pf(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       u32 rounded_conn_num, conn_num, conn_max;
+       struct ecore_src_iids src_iids;
+
+       OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
+       ecore_cxt_src_iids(p_mngr, &src_iids);
+       conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
+       if (!conn_num)
+               return;
+
+       conn_max = OSAL_MAX_T(u32, conn_num, SRC_MIN_NUM_ELEMS);
+       rounded_conn_num = OSAL_ROUNDUP_POW_OF_TWO(conn_max);
+
+       STORE_RT_REG(p_hwfn, SRC_REG_COUNTFREE_RT_OFFSET, conn_num);
+       STORE_RT_REG(p_hwfn, SRC_REG_NUMBER_HASH_BITS_RT_OFFSET,
+                    OSAL_LOG2(rounded_conn_num));
+
+       STORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET,
+                        p_hwfn->p_cxt_mngr->first_free);
+       STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET,
+                        p_hwfn->p_cxt_mngr->last_free);
+}
+
+/* Timers PF */
+#define TM_CFG_NUM_IDS_SHIFT           0
+#define TM_CFG_NUM_IDS_MASK            0xFFFFULL
+#define TM_CFG_PRE_SCAN_OFFSET_SHIFT   16
+#define TM_CFG_PRE_SCAN_OFFSET_MASK    0x1FFULL
+#define TM_CFG_PARENT_PF_SHIFT         25
+#define TM_CFG_PARENT_PF_MASK          0x7ULL
+
+#define TM_CFG_CID_PRE_SCAN_ROWS_SHIFT 30
+#define TM_CFG_CID_PRE_SCAN_ROWS_MASK  0x1FFULL
+
+#define TM_CFG_TID_OFFSET_SHIFT                30
+#define TM_CFG_TID_OFFSET_MASK         0x7FFFFULL
+#define TM_CFG_TID_PRE_SCAN_ROWS_SHIFT 49
+#define TM_CFG_TID_PRE_SCAN_ROWS_MASK  0x1FFULL
+
+static void ecore_tm_init_pf(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       u32 active_seg_mask = 0, tm_offset, rt_reg;
+       struct ecore_tm_iids tm_iids;
+       u64 cfg_word;
+       u8 i;
+
+       OSAL_MEM_ZERO(&tm_iids, sizeof(tm_iids));
+       ecore_cxt_tm_iids(p_mngr, &tm_iids);
+
+       /* @@@TBD No pre-scan for now */
+
+       /* Note: We assume consecutive VFs for a PF */
+       for (i = 0; i < p_mngr->vf_count; i++) {
+               cfg_word = 0;
+               SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_cids);
+               SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
+               SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
+               SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0);
+
+               rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
+                   (sizeof(cfg_word) / sizeof(u32)) *
+                   (p_hwfn->hw_info.first_vf_in_pf + i);
+               STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+       }
+
+       cfg_word = 0;
+       SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_cids);
+       SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
+       SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0);       /* n/a for PF */
+       SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0);
+
+       rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
+           (sizeof(cfg_word) / sizeof(u32)) *
+           (NUM_OF_VFS(p_hwfn->p_dev) + p_hwfn->rel_pf_id);
+       STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+
+       /* enale scan */
+       STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_CONN_RT_OFFSET,
+                    tm_iids.pf_cids ? 0x1 : 0x0);
+
+       /* @@@TBD how to enable the scan for the VFs */
+
+       tm_offset = tm_iids.per_vf_cids;
+
+       /* Note: We assume consecutive VFs for a PF */
+       for (i = 0; i < p_mngr->vf_count; i++) {
+               cfg_word = 0;
+               SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_tids);
+               SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
+               SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
+               SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
+               SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64)0);
+
+               rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
+                   (sizeof(cfg_word) / sizeof(u32)) *
+                   (p_hwfn->hw_info.first_vf_in_pf + i);
+
+               STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+       }
+
+       tm_offset = tm_iids.pf_cids;
+       for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+               cfg_word = 0;
+               SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_tids[i]);
+               SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
+               SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0);
+               SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
+               SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64)0);
+
+               rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
+                   (sizeof(cfg_word) / sizeof(u32)) *
+                   (NUM_OF_VFS(p_hwfn->p_dev) +
+                    p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i);
+
+               STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+               active_seg_mask |= (tm_iids.pf_tids[i] ? (1 << i) : 0);
+
+               tm_offset += tm_iids.pf_tids[i];
+       }
+
+       STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask);
+
+       /* @@@TBD how to enable the scan for the VFs */
+}
+
+static void ecore_prs_init_common(struct ecore_hwfn *p_hwfn)
+{
+}
+
+void ecore_cxt_hw_init_common(struct ecore_hwfn *p_hwfn)
+{
+       /* CDU configuration */
+       ecore_cdu_init_common(p_hwfn);
+       ecore_prs_init_common(p_hwfn);
+}
+
+void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn)
+{
+       ecore_qm_init_pf(p_hwfn);
+       ecore_cm_init_pf(p_hwfn);
+       ecore_dq_init_pf(p_hwfn);
+       ecore_cdu_init_pf(p_hwfn);
+       ecore_ilt_init_pf(p_hwfn);
+       ecore_src_init_pf(p_hwfn);
+       ecore_tm_init_pf(p_hwfn);
+}
+
+enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
+                                          enum protocol_type type, u32 *p_cid)
+{
+       struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       u32 rel_cid;
+
+       if (type >= MAX_CONN_TYPES || !p_mngr->acquired[type].cid_map) {
+               DP_NOTICE(p_hwfn, true, "Invalid protocol type %d", type);
+               return ECORE_INVAL;
+       }
+
+       rel_cid = OSAL_FIND_FIRST_ZERO_BIT(p_mngr->acquired[type].cid_map,
+                                          p_mngr->acquired[type].max_count);
+
+       if (rel_cid >= p_mngr->acquired[type].max_count) {
+               DP_NOTICE(p_hwfn, false, "no CID available for protocol %d",
+                         type);
+               return ECORE_NORESOURCES;
+       }
+
+       OSAL_SET_BIT(rel_cid, p_mngr->acquired[type].cid_map);
+
+       *p_cid = rel_cid + p_mngr->acquired[type].start_cid;
+
+       return ECORE_SUCCESS;
+}
+
+static bool ecore_cxt_test_cid_acquired(struct ecore_hwfn *p_hwfn,
+                                       u32 cid, enum protocol_type *p_type)
+{
+       struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       struct ecore_cid_acquired_map *p_map;
+       enum protocol_type p;
+       u32 rel_cid;
+
+       /* Iterate over protocols and find matching cid range */
+       for (p = 0; p < MAX_CONN_TYPES; p++) {
+               p_map = &p_mngr->acquired[p];
+
+               if (!p_map->cid_map)
+                       continue;
+               if (cid >= p_map->start_cid &&
+                   cid < p_map->start_cid + p_map->max_count) {
+                       break;
+               }
+       }
+       *p_type = p;
+
+       if (p == MAX_CONN_TYPES) {
+               DP_NOTICE(p_hwfn, true, "Invalid CID %d", cid);
+               return false;
+       }
+       rel_cid = cid - p_map->start_cid;
+       if (!OSAL_TEST_BIT(rel_cid, p_map->cid_map)) {
+               DP_NOTICE(p_hwfn, true, "CID %d not acquired", cid);
+               return false;
+       }
+       return true;
+}
+
+void ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid)
+{
+       struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       enum protocol_type type;
+       bool b_acquired;
+       u32 rel_cid;
+
+       /* Test acquired and find matching per-protocol map */
+       b_acquired = ecore_cxt_test_cid_acquired(p_hwfn, cid, &type);
+
+       if (!b_acquired)
+               return;
+
+       rel_cid = cid - p_mngr->acquired[type].start_cid;
+       OSAL_CLEAR_BIT(rel_cid, p_mngr->acquired[type].cid_map);
+}
+
+enum _ecore_status_t ecore_cxt_get_cid_info(struct ecore_hwfn *p_hwfn,
+                                           struct ecore_cxt_info *p_info)
+{
+       struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
+       enum protocol_type type;
+       bool b_acquired;
+
+       /* Test acquired and find matching per-protocol map */
+       b_acquired = ecore_cxt_test_cid_acquired(p_hwfn, p_info->iid, &type);
+
+       if (!b_acquired)
+               return ECORE_INVAL;
+
+       /* set the protocl type */
+       p_info->type = type;
+
+       /* compute context virtual pointer */
+       hw_p_size = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
+
+       conn_cxt_size = CONN_CXT_SIZE(p_hwfn);
+       cxts_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / conn_cxt_size;
+       line = p_info->iid / cxts_per_p;
+
+       /* Make sure context is allocated (dynamic allocation) */
+       if (!p_mngr->ilt_shadow[line].p_virt)
+               return ECORE_INVAL;
+
+       p_info->p_cxt = (u8 *)p_mngr->ilt_shadow[line].p_virt +
+           p_info->iid % cxts_per_p * conn_cxt_size;
+
+       DP_VERBOSE(p_hwfn, (ECORE_MSG_ILT | ECORE_MSG_CXT),
+               "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n",
+               (p_info->iid / cxts_per_p), p_info->p_cxt, p_info->iid);
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn)
+{
+       /* Set the number of required CORE connections */
+       u32 core_cids = 1;      /* SPQ */
+
+       ecore_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
+
+       switch (p_hwfn->hw_info.personality) {
+       case ECORE_PCI_ETH:
+               {
+                       struct ecore_eth_pf_params *p_params =
+                           &p_hwfn->pf_params.eth_pf_params;
+
+                       ecore_cxt_set_proto_cid_count(p_hwfn,
+                               PROTOCOLID_ETH,
+                               p_params->num_cons, 1); /* FIXME VF count... */
+
+                       break;
+               }
+       default:
+               return ECORE_INVAL;
+       }
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_cxt_get_tid_mem_info(struct ecore_hwfn *p_hwfn,
+                                               struct ecore_tid_mem *p_info)
+{
+       struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       u32 proto, seg, total_lines, i, shadow_line;
+       struct ecore_ilt_client_cfg *p_cli;
+       struct ecore_ilt_cli_blk *p_fl_seg;
+       struct ecore_tid_seg *p_seg_info;
+
+       /* Verify the personality */
+       switch (p_hwfn->hw_info.personality) {
+       default:
+               return ECORE_INVAL;
+       }
+
+       p_cli = &p_mngr->clients[ILT_CLI_CDUT];
+       if (!p_cli->active)
+               return ECORE_INVAL;
+
+       p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
+       if (!p_seg_info->has_fl_mem)
+               return ECORE_INVAL;
+
+       p_fl_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
+       total_lines = DIV_ROUND_UP(p_fl_seg->total_size,
+                                  p_fl_seg->real_size_in_page);
+
+       for (i = 0; i < total_lines; i++) {
+               shadow_line = i + p_fl_seg->start_line -
+                   p_hwfn->p_cxt_mngr->pf_start_line;
+               p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].p_virt;
+       }
+       p_info->waste = ILT_PAGE_IN_BYTES(p_cli->p_size.val) -
+           p_fl_seg->real_size_in_page;
+       p_info->tid_size = p_mngr->task_type_size[p_seg_info->type];
+       p_info->num_tids_per_block = p_fl_seg->real_size_in_page /
+           p_info->tid_size;
+
+       return ECORE_SUCCESS;
+}
+
+/* This function is very RoCE oriented, if another protocol in the future
+ * will want this feature we'll need to modify the function to be more generic
+ */
+static enum _ecore_status_t
+ecore_cxt_free_ilt_range(struct ecore_hwfn *p_hwfn,
+                        enum ecore_cxt_elem_type elem_type,
+                        u32 start_iid, u32 count)
+{
+       u32 reg_offset, elem_size, hw_p_size, elems_per_p;
+       u32 start_line, end_line, shadow_start_line, shadow_end_line;
+       struct ecore_ilt_client_cfg *p_cli;
+       struct ecore_ilt_cli_blk *p_blk;
+       u32 end_iid = start_iid + count;
+       struct ecore_ptt *p_ptt;
+       u64 ilt_hw_entry = 0;
+       u32 i;
+
+       if (elem_type == ECORE_ELEM_CXT) {
+               p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+               elem_size = CONN_CXT_SIZE(p_hwfn);
+               p_blk = &p_cli->pf_blks[CDUC_BLK];
+       }
+
+       /* Calculate line in ilt */
+       hw_p_size = p_cli->p_size.val;
+       elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
+       start_line = p_blk->start_line + (start_iid / elems_per_p);
+       end_line = p_blk->start_line + (end_iid / elems_per_p);
+       if (((end_iid + 1) / elems_per_p) != (end_iid / elems_per_p))
+               end_line--;
+
+       shadow_start_line = start_line - p_hwfn->p_cxt_mngr->pf_start_line;
+       shadow_end_line = end_line - p_hwfn->p_cxt_mngr->pf_start_line;
+
+       p_ptt = ecore_ptt_acquire(p_hwfn);
+       if (!p_ptt) {
+               DP_NOTICE(p_hwfn, false,
+                         "ECORE_TIME_OUT on ptt acquire - dynamic allocation");
+               return ECORE_TIMEOUT;
+       }
+
+       for (i = shadow_start_line; i < shadow_end_line; i++) {
+               if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt)
+                       continue;
+
+               OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+                                      p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt,
+                                      p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys,
+                                      p_hwfn->p_cxt_mngr->ilt_shadow[i].size);
+
+               p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt = OSAL_NULL;
+               p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys = 0;
+               p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0;
+
+               /* compute absolute offset */
+               reg_offset = PSWRQ2_REG_ILT_MEMORY +
+                   ((start_line++) * ILT_REG_SIZE_IN_BYTES *
+                    ILT_ENTRY_IN_REGS);
+
+               ecore_wr(p_hwfn, p_ptt, reg_offset, U64_LO(ilt_hw_entry));
+               ecore_wr(p_hwfn, p_ptt, reg_offset + ILT_REG_SIZE_IN_BYTES,
+                        U64_HI(ilt_hw_entry));
+       }
+
+       ecore_ptt_release(p_hwfn, p_ptt);
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_cxt_free_proto_ilt(struct ecore_hwfn *p_hwfn,
+                                             enum protocol_type proto)
+{
+       enum _ecore_status_t rc;
+       u32 cid;
+
+       /* Free Connection CXT */
+       rc = ecore_cxt_free_ilt_range(p_hwfn, ECORE_ELEM_CXT,
+                                     ecore_cxt_get_proto_cid_start(p_hwfn,
+                                                                   proto),
+                                     ecore_cxt_get_proto_cid_count(p_hwfn,
+                                                                   proto,
+                                                                   &cid));
+
+       if (rc)
+               return rc;
+
+       /* Free Task CXT */
+       rc = ecore_cxt_free_ilt_range(p_hwfn, ECORE_ELEM_TASK, 0,
+                                     ecore_cxt_get_proto_tid_count(p_hwfn,
+                                                                   proto));
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_cxt_get_task_ctx(struct ecore_hwfn *p_hwfn,
+                                           u32 tid,
+                                           u8 ctx_type, void **pp_task_ctx)
+{
+       struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       struct ecore_ilt_client_cfg *p_cli;
+       struct ecore_ilt_cli_blk *p_seg;
+       struct ecore_tid_seg *p_seg_info;
+       u32 proto, seg;
+       u32 total_lines;
+       u32 tid_size, ilt_idx;
+       u32 num_tids_per_block;
+
+       /* Verify the personality */
+       switch (p_hwfn->hw_info.personality) {
+       default:
+               return ECORE_INVAL;
+       }
+
+       p_cli = &p_mngr->clients[ILT_CLI_CDUT];
+       if (!p_cli->active)
+               return ECORE_INVAL;
+
+       p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
+
+       if (ctx_type == ECORE_CTX_WORKING_MEM) {
+               p_seg = &p_cli->pf_blks[CDUT_SEG_BLK(seg)];
+       } else if (ctx_type == ECORE_CTX_FL_MEM) {
+               if (!p_seg_info->has_fl_mem)
+                       return ECORE_INVAL;
+               p_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
+       } else {
+               return ECORE_INVAL;
+       }
+       total_lines = DIV_ROUND_UP(p_seg->total_size, p_seg->real_size_in_page);
+       tid_size = p_mngr->task_type_size[p_seg_info->type];
+       num_tids_per_block = p_seg->real_size_in_page / tid_size;
+
+       if (total_lines < tid / num_tids_per_block)
+               return ECORE_INVAL;
+
+       ilt_idx = tid / num_tids_per_block + p_seg->start_line -
+           p_mngr->pf_start_line;
+       *pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].p_virt +
+           (tid % num_tids_per_block) * tid_size;
+
+       return ECORE_SUCCESS;
+}
diff --git a/drivers/net/qede/base/ecore_cxt.h b/drivers/net/qede/base/ecore_cxt.h
new file mode 100644 (file)
index 0000000..1ac95f9
--- /dev/null
@@ -0,0 +1,157 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef _ECORE_CID_
+#define _ECORE_CID_
+
+#include "ecore_hsi_common.h"
+#include "ecore_proto_if.h"
+#include "ecore_cxt_api.h"
+
+enum ecore_cxt_elem_type {
+       ECORE_ELEM_CXT,
+       ECORE_ELEM_TASK
+};
+
+u32 ecore_cxt_get_proto_cid_count(struct ecore_hwfn *p_hwfn,
+                                 enum protocol_type type, u32 *vf_cid);
+
+u32 ecore_cxt_get_proto_cid_start(struct ecore_hwfn *p_hwfn,
+                                 enum protocol_type type);
+
+/**
+ * @brief ecore_cxt_qm_iids - fills the cid/tid counts for the QM configuration
+ *
+ * @param p_hwfn
+ * @param iids [out], a structure holding all the counters
+ */
+void ecore_cxt_qm_iids(struct ecore_hwfn *p_hwfn, struct ecore_qm_iids *iids);
+
+/**
+ * @brief ecore_cxt_set_pf_params - Set the PF params for cxt init
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_cxt_set_proto_cid_count - Set the max cids per protocol for cxt
+ *        init
+ *
+ * @param p_hwfn
+ * @param type
+ * @param cid_cnt - number of pf cids
+ * @param vf_cid_cnt - number of vf cids
+ */
+void ecore_cxt_set_proto_cid_count(struct ecore_hwfn *p_hwfn,
+                                  enum protocol_type type,
+                                  u32 cid_cnt, u32 vf_cid_cnt);
+/**
+ * @brief ecore_cxt_cfg_ilt_compute - compute ILT init parameters
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_cxt_mngr_alloc - Allocate and init the context manager struct
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_cxt_mngr_free
+ *
+ * @param p_hwfn
+ */
+void ecore_cxt_mngr_free(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_cxt_tables_alloc - Allocate ILT shadow, Searcher T2, acquired
+ *        map
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_cxt_tables_alloc(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_cxt_mngr_setup - Reset the acquired CIDs
+ *
+ * @param p_hwfn
+ */
+void ecore_cxt_mngr_setup(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_cxt_hw_init_common - Initailze ILT and DQ, common phase, per
+ *        path.
+ *
+ * @param p_hwfn
+ */
+void ecore_cxt_hw_init_common(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_cxt_hw_init_pf - Initailze ILT and DQ, PF phase, per path.
+ *
+ * @param p_hwfn
+ */
+void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_qm_init_pf - Initailze the QM PF phase, per path
+ *
+ * @param p_hwfn
+ */
+void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn);
+
+ /**
+ * @brief Reconfigures QM pf on the fly
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
+                                    struct ecore_ptt *p_ptt);
+
+/**
+* @brief ecore_cxt_release - Release a cid
+*
+* @param p_hwfn
+* @param cid
+*/
+void ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid);
+
+/**
+ * @brief ecore_cxt_free_proto_ilt - function frees ilt pages
+ *        associated with the protocol passed.
+ *
+ * @param p_hwfn
+ * @param proto
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_cxt_free_proto_ilt(struct ecore_hwfn *p_hwfn,
+                                             enum protocol_type proto);
+
+#define ECORE_CTX_WORKING_MEM 0
+#define ECORE_CTX_FL_MEM 1
+enum _ecore_status_t ecore_cxt_get_task_ctx(struct ecore_hwfn *p_hwfn,
+                                           u32 tid,
+                                           u8 ctx_type, void **task_ctx);
+
+#endif /* _ECORE_CID_ */
diff --git a/drivers/net/qede/base/ecore_cxt_api.h b/drivers/net/qede/base/ecore_cxt_api.h
new file mode 100644 (file)
index 0000000..d98dddb
--- /dev/null
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_CXT_API_H__
+#define __ECORE_CXT_API_H__
+
+struct ecore_hwfn;
+
+struct ecore_cxt_info {
+       void *p_cxt;
+       u32 iid;
+       enum protocol_type type;
+};
+
+#define MAX_TID_BLOCKS                 512
+struct ecore_tid_mem {
+       u32 tid_size;
+       u32 num_tids_per_block;
+       u32 waste;
+       u8 *blocks[MAX_TID_BLOCKS];     /* 4K */
+};
+
+static OSAL_INLINE void *get_task_mem(struct ecore_tid_mem *info, u32 tid)
+{
+       /* note: waste is superfluous */
+       return (void *)(info->blocks[tid / info->num_tids_per_block] +
+                       (tid % info->num_tids_per_block) * info->tid_size);
+
+       /* more elaborate alternative with no modulo
+        * u32 mask = info->tid_size * info->num_tids_per_block +
+        *            info->waste - 1;
+        * u32 index = tid / info->num_tids_per_block;
+        * u32 offset = tid * info->tid_size + index * info->waste;
+        * return (void *)(blocks[index] + (offset & mask));
+        */
+}
+
+/**
+* @brief ecore_cxt_acquire - Acquire a new cid of a specific protocol type
+*
+* @param p_hwfn
+* @param type
+* @param p_cid
+*
+* @return enum _ecore_status_t
+*/
+enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
+                                          enum protocol_type type,
+                                          u32 *p_cid);
+
+/**
+* @brief ecoreo_cid_get_cxt_info - Returns the context info for a specific cid
+*
+*
+* @param p_hwfn
+* @param p_info in/out
+*
+* @return enum _ecore_status_t
+*/
+enum _ecore_status_t ecore_cxt_get_cid_info(struct ecore_hwfn *p_hwfn,
+                                           struct ecore_cxt_info *p_info);
+
+/**
+* @brief ecore_cxt_get_tid_mem_info
+*
+* @param p_hwfn
+* @param p_info
+*
+* @return enum _ecore_status_t
+*/
+enum _ecore_status_t ecore_cxt_get_tid_mem_info(struct ecore_hwfn *p_hwfn,
+                                               struct ecore_tid_mem *p_info);
+
+#endif
diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
new file mode 100644 (file)
index 0000000..83c126f
--- /dev/null
@@ -0,0 +1,3442 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+#include "reg_addr.h"
+#include "ecore_gtt_reg_addr.h"
+#include "ecore.h"
+#include "ecore_chain.h"
+#include "ecore_status.h"
+#include "ecore_hw.h"
+#include "ecore_rt_defs.h"
+#include "ecore_init_ops.h"
+#include "ecore_int.h"
+#include "ecore_cxt.h"
+#include "ecore_spq.h"
+#include "ecore_init_fw_funcs.h"
+#include "ecore_sp_commands.h"
+#include "ecore_dev_api.h"
+#include "ecore_mcp.h"
+#include "ecore_hw_defs.h"
+#include "mcp_public.h"
+#include "ecore_iro.h"
+#include "nvm_cfg.h"
+#include "ecore_dev_api.h"
+
+/* Configurable */
+#define ECORE_MIN_DPIS         (4)     /* The minimal number of DPIs required
+                                        * load the driver. The number was
+                                        * arbitrarily set.
+                                        */
+
+/* Derived */
+#define ECORE_MIN_PWM_REGION   ((ECORE_WID_SIZE) * (ECORE_MIN_DPIS))
+
+enum BAR_ID {
+       BAR_ID_0,               /* used for GRC */
+       BAR_ID_1                /* Used for doorbells */
+};
+
+static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn, enum BAR_ID bar_id)
+{
+       u32 bar_reg = (bar_id == BAR_ID_0 ?
+                      PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
+       u32 val = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
+
+       /* The above registers were updated in the past only in CMT mode. Since
+        * they were found to be useful MFW started updating them from 8.7.7.0.
+        * In older MFW versions they are set to 0 which means disabled.
+        */
+       if (!val) {
+               if (p_hwfn->p_dev->num_hwfns > 1) {
+                       DP_NOTICE(p_hwfn, false,
+                                 "BAR size not configured. Assuming BAR"
+                                 " size of 256kB for GRC and 512kB for DB\n");
+                       return BAR_ID_0 ? 256 * 1024 : 512 * 1024;
+               }
+
+               DP_NOTICE(p_hwfn, false,
+                         "BAR size not configured. Assuming BAR"
+                         " size of 512kB for GRC and 512kB for DB\n");
+               return 512 * 1024;
+       }
+
+       return 1 << (val + 15);
+}
+
+void ecore_init_dp(struct ecore_dev *p_dev,
+                  u32 dp_module, u8 dp_level, void *dp_ctx)
+{
+       u32 i;
+
+       p_dev->dp_level = dp_level;
+       p_dev->dp_module = dp_module;
+       p_dev->dp_ctx = dp_ctx;
+       for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+               p_hwfn->dp_level = dp_level;
+               p_hwfn->dp_module = dp_module;
+               p_hwfn->dp_ctx = dp_ctx;
+       }
+}
+
+void ecore_init_struct(struct ecore_dev *p_dev)
+{
+       u8 i;
+
+       for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+               p_hwfn->p_dev = p_dev;
+               p_hwfn->my_id = i;
+               p_hwfn->b_active = false;
+
+               OSAL_MUTEX_ALLOC(p_hwfn, &p_hwfn->dmae_info.mutex);
+               OSAL_MUTEX_INIT(&p_hwfn->dmae_info.mutex);
+       }
+
+       /* hwfn 0 is always active */
+       p_dev->hwfns[0].b_active = true;
+
+       /* set the default cache alignment to 128 (may be overridden later) */
+       p_dev->cache_shift = 7;
+}
+
+static void ecore_qm_info_free(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+       OSAL_FREE(p_hwfn->p_dev, qm_info->qm_pq_params);
+       qm_info->qm_pq_params = OSAL_NULL;
+       OSAL_FREE(p_hwfn->p_dev, qm_info->qm_vport_params);
+       qm_info->qm_vport_params = OSAL_NULL;
+       OSAL_FREE(p_hwfn->p_dev, qm_info->qm_port_params);
+       qm_info->qm_port_params = OSAL_NULL;
+       OSAL_FREE(p_hwfn->p_dev, qm_info->wfq_data);
+       qm_info->wfq_data = OSAL_NULL;
+}
+
+void ecore_resc_free(struct ecore_dev *p_dev)
+{
+       int i;
+
+       OSAL_FREE(p_dev, p_dev->fw_data);
+       p_dev->fw_data = OSAL_NULL;
+
+       OSAL_FREE(p_dev, p_dev->reset_stats);
+
+       for_each_hwfn(p_dev, i) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+               OSAL_FREE(p_dev, p_hwfn->p_tx_cids);
+               p_hwfn->p_tx_cids = OSAL_NULL;
+               OSAL_FREE(p_dev, p_hwfn->p_rx_cids);
+               p_hwfn->p_rx_cids = OSAL_NULL;
+       }
+
+       for_each_hwfn(p_dev, i) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+               ecore_cxt_mngr_free(p_hwfn);
+               ecore_qm_info_free(p_hwfn);
+               ecore_spq_free(p_hwfn);
+               ecore_eq_free(p_hwfn, p_hwfn->p_eq);
+               ecore_consq_free(p_hwfn, p_hwfn->p_consq);
+               ecore_int_free(p_hwfn);
+               ecore_dmae_info_free(p_hwfn);
+               /* @@@TBD Flush work-queue ? */
+       }
+}
+
+static enum _ecore_status_t ecore_init_qm_info(struct ecore_hwfn *p_hwfn,
+                                              bool b_sleepable)
+{
+       u8 num_vports, vf_offset = 0, i, vport_id, num_ports;
+       struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+       struct init_qm_port_params *p_qm_port;
+       u16 num_pqs, multi_cos_tcs = 1;
+       u16 num_vfs = 0;
+
+       OSAL_MEM_ZERO(qm_info, sizeof(*qm_info));
+
+#ifndef ASIC_ONLY
+       /* @TMP - Don't allocate QM queues for VFs on emulation */
+       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+               DP_NOTICE(p_hwfn, false,
+                         "Emulation - skip configuring QM queues for VFs\n");
+               num_vfs = 0;
+       }
+#endif
+
+       num_pqs = multi_cos_tcs + num_vfs + 1;  /* The '1' is for pure-LB */
+       num_vports = (u8)RESC_NUM(p_hwfn, ECORE_VPORT);
+
+       /* Sanity checking that setup requires legal number of resources */
+       if (num_pqs > RESC_NUM(p_hwfn, ECORE_PQ)) {
+               DP_ERR(p_hwfn,
+                      "Need too many Physical queues - 0x%04x when"
+                       " only %04x are available\n",
+                      num_pqs, RESC_NUM(p_hwfn, ECORE_PQ));
+               return ECORE_INVAL;
+       }
+
+       /* PQs will be arranged as follows: First per-TC PQ, then pure-LB queue,
+        * then special queues, then per-VF PQ.
+        */
+       qm_info->qm_pq_params = OSAL_ZALLOC(p_hwfn->p_dev,
+                                           b_sleepable ? GFP_KERNEL :
+                                           GFP_ATOMIC,
+                                           sizeof(struct init_qm_pq_params) *
+                                           num_pqs);
+       if (!qm_info->qm_pq_params)
+               goto alloc_err;
+
+       qm_info->qm_vport_params = OSAL_ZALLOC(p_hwfn->p_dev,
+                                              b_sleepable ? GFP_KERNEL :
+                                              GFP_ATOMIC,
+                                              sizeof(struct
+                                                     init_qm_vport_params) *
+                                              num_vports);
+       if (!qm_info->qm_vport_params)
+               goto alloc_err;
+
+       qm_info->qm_port_params = OSAL_ZALLOC(p_hwfn->p_dev,
+                                             b_sleepable ? GFP_KERNEL :
+                                             GFP_ATOMIC,
+                                             sizeof(struct init_qm_port_params)
+                                             * MAX_NUM_PORTS);
+       if (!qm_info->qm_port_params)
+               goto alloc_err;
+
+       qm_info->wfq_data = OSAL_ZALLOC(p_hwfn->p_dev,
+                                       b_sleepable ? GFP_KERNEL :
+                                       GFP_ATOMIC,
+                                       sizeof(struct ecore_wfq_data) *
+                                       num_vports);
+
+       if (!qm_info->wfq_data)
+               goto alloc_err;
+
+       vport_id = (u8)RESC_START(p_hwfn, ECORE_VPORT);
+
+       /* First init per-TC PQs */
+       for (i = 0; i < multi_cos_tcs; i++) {
+               struct init_qm_pq_params *params = &qm_info->qm_pq_params[i];
+
+               if (p_hwfn->hw_info.personality == ECORE_PCI_ETH) {
+                       params->vport_id = vport_id;
+                       params->tc_id = p_hwfn->hw_info.non_offload_tc;
+                       params->wrr_group = 1;  /* @@@TBD ECORE_WRR_MEDIUM */
+               } else {
+                       params->vport_id = vport_id;
+                       params->tc_id = p_hwfn->hw_info.offload_tc;
+                       params->wrr_group = 1;  /* @@@TBD ECORE_WRR_MEDIUM */
+               }
+       }
+
+       /* Then init pure-LB PQ */
+       qm_info->pure_lb_pq = i;
+       qm_info->qm_pq_params[i].vport_id =
+           (u8)RESC_START(p_hwfn, ECORE_VPORT);
+       qm_info->qm_pq_params[i].tc_id = PURE_LB_TC;
+       qm_info->qm_pq_params[i].wrr_group = 1;
+       i++;
+
+       /* Then init per-VF PQs */
+       vf_offset = i;
+       for (i = 0; i < num_vfs; i++) {
+               /* First vport is used by the PF */
+               qm_info->qm_pq_params[vf_offset + i].vport_id = vport_id +
+                   i + 1;
+               qm_info->qm_pq_params[vf_offset + i].tc_id =
+                   p_hwfn->hw_info.non_offload_tc;
+               qm_info->qm_pq_params[vf_offset + i].wrr_group = 1;
+       };
+
+       qm_info->vf_queues_offset = vf_offset;
+       qm_info->num_pqs = num_pqs;
+       qm_info->num_vports = num_vports;
+
+       /* Initialize qm port parameters */
+       num_ports = p_hwfn->p_dev->num_ports_in_engines;
+       for (i = 0; i < num_ports; i++) {
+               p_qm_port = &qm_info->qm_port_params[i];
+               p_qm_port->active = 1;
+               if (num_ports == 4)
+                       p_qm_port->num_active_phys_tcs = 2;
+               else
+                       p_qm_port->num_active_phys_tcs = 5;
+               p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
+               p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
+       }
+
+       if (ECORE_IS_AH(p_hwfn->p_dev) && (num_ports == 4))
+               qm_info->max_phys_tcs_per_port = NUM_PHYS_TCS_4PORT_K2;
+       else
+               qm_info->max_phys_tcs_per_port = NUM_OF_PHYS_TCS;
+
+       qm_info->start_pq = (u16)RESC_START(p_hwfn, ECORE_PQ);
+
+       qm_info->num_vf_pqs = num_vfs;
+       qm_info->start_vport = (u8)RESC_START(p_hwfn, ECORE_VPORT);
+
+       for (i = 0; i < qm_info->num_vports; i++)
+               qm_info->qm_vport_params[i].vport_wfq = 1;
+
+       qm_info->pf_wfq = 0;
+       qm_info->pf_rl = 0;
+       qm_info->vport_rl_en = 1;
+       qm_info->vport_wfq_en = 1;
+
+       return ECORE_SUCCESS;
+
+alloc_err:
+       DP_NOTICE(p_hwfn, false, "Failed to allocate memory for QM params\n");
+       ecore_qm_info_free(p_hwfn);
+       return ECORE_NOMEM;
+}
+
+/* This function reconfigures the QM pf on the fly.
+ * For this purpose we:
+ * 1. reconfigure the QM database
+ * 2. set new values to runtime arrat
+ * 3. send an sdm_qm_cmd through the rbc interface to stop the QM
+ * 4. activate init tool in QM_PF stage
+ * 5. send an sdm_qm_cmd through rbc interface to release the QM
+ */
+enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
+                                    struct ecore_ptt *p_ptt)
+{
+       struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+       enum _ecore_status_t rc;
+       bool b_rc;
+
+       /* qm_info is allocated in ecore_init_qm_info() which is already called
+        * from ecore_resc_alloc() or previous call of ecore_qm_reconf().
+        * The allocated size may change each init, so we free it before next
+        * allocation.
+        */
+       ecore_qm_info_free(p_hwfn);
+
+       /* initialize ecore's qm data structure */
+       rc = ecore_init_qm_info(p_hwfn, false);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       /* stop PF's qm queues */
+       b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, false, true,
+                                     qm_info->start_pq, qm_info->num_pqs);
+       if (!b_rc)
+               return ECORE_INVAL;
+
+       /* clear the QM_PF runtime phase leftovers from previous init */
+       ecore_init_clear_rt_data(p_hwfn);
+
+       /* prepare QM portion of runtime array */
+       ecore_qm_init_pf(p_hwfn);
+
+       /* activate init tool on runtime array */
+       rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
+                           p_hwfn->hw_info.hw_mode);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       /* start PF's qm queues */
+       b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, true, true,
+                                     qm_info->start_pq, qm_info->num_pqs);
+       if (!rc)
+               return ECORE_INVAL;
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       struct ecore_consq *p_consq;
+       struct ecore_eq *p_eq;
+       int i;
+
+       p_dev->fw_data = OSAL_ZALLOC(p_dev, GFP_KERNEL,
+                                    sizeof(struct ecore_fw_data));
+       if (!p_dev->fw_data)
+               return ECORE_NOMEM;
+
+       /* Allocate Memory for the Queue->CID mapping */
+       for_each_hwfn(p_dev, i) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+               /* @@@TMP - resc management, change to actual required size */
+               int tx_size = sizeof(struct ecore_hw_cid_data) *
+                   RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
+               int rx_size = sizeof(struct ecore_hw_cid_data) *
+                   RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
+
+               p_hwfn->p_tx_cids = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+                                               tx_size);
+               if (!p_hwfn->p_tx_cids) {
+                       DP_NOTICE(p_hwfn, true,
+                                 "Failed to allocate memory for Tx Cids\n");
+                       goto alloc_no_mem;
+               }
+
+               p_hwfn->p_rx_cids = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+                                               rx_size);
+               if (!p_hwfn->p_rx_cids) {
+                       DP_NOTICE(p_hwfn, true,
+                                 "Failed to allocate memory for Rx Cids\n");
+                       goto alloc_no_mem;
+               }
+       }
+
+       for_each_hwfn(p_dev, i) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+               /* First allocate the context manager structure */
+               rc = ecore_cxt_mngr_alloc(p_hwfn);
+               if (rc)
+                       goto alloc_err;
+
+               /* Set the HW cid/tid numbers (in the contest manager)
+                * Must be done prior to any further computations.
+                */
+               rc = ecore_cxt_set_pf_params(p_hwfn);
+               if (rc)
+                       goto alloc_err;
+
+               /* Prepare and process QM requirements */
+               rc = ecore_init_qm_info(p_hwfn, true);
+               if (rc)
+                       goto alloc_err;
+
+               /* Compute the ILT client partition */
+               rc = ecore_cxt_cfg_ilt_compute(p_hwfn);
+               if (rc)
+                       goto alloc_err;
+
+               /* CID map / ILT shadow table / T2
+                * The talbes sizes are determined by the computations above
+                */
+               rc = ecore_cxt_tables_alloc(p_hwfn);
+               if (rc)
+                       goto alloc_err;
+
+               /* SPQ, must follow ILT because initializes SPQ context */
+               rc = ecore_spq_alloc(p_hwfn);
+               if (rc)
+                       goto alloc_err;
+
+               /* SP status block allocation */
+               p_hwfn->p_dpc_ptt = ecore_get_reserved_ptt(p_hwfn,
+                                                          RESERVED_PTT_DPC);
+
+               rc = ecore_int_alloc(p_hwfn, p_hwfn->p_main_ptt);
+               if (rc)
+                       goto alloc_err;
+
+               /* EQ */
+               p_eq = ecore_eq_alloc(p_hwfn, 256);
+               if (!p_eq)
+                       goto alloc_no_mem;
+               p_hwfn->p_eq = p_eq;
+
+               p_consq = ecore_consq_alloc(p_hwfn);
+               if (!p_consq)
+                       goto alloc_no_mem;
+               p_hwfn->p_consq = p_consq;
+
+               /* DMA info initialization */
+               rc = ecore_dmae_info_alloc(p_hwfn);
+               if (rc) {
+                       DP_NOTICE(p_hwfn, true,
+                                 "Failed to allocate memory for"
+                                 " dmae_info structure\n");
+                       goto alloc_err;
+               }
+       }
+
+       p_dev->reset_stats = OSAL_ZALLOC(p_dev, GFP_KERNEL,
+                                        sizeof(struct ecore_eth_stats));
+       if (!p_dev->reset_stats) {
+               DP_NOTICE(p_dev, true, "Failed to allocate reset statistics\n");
+               goto alloc_no_mem;
+       }
+
+       return ECORE_SUCCESS;
+
+alloc_no_mem:
+       rc = ECORE_NOMEM;
+alloc_err:
+       ecore_resc_free(p_dev);
+       return rc;
+}
+
+void ecore_resc_setup(struct ecore_dev *p_dev)
+{
+       int i;
+
+       for_each_hwfn(p_dev, i) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+               ecore_cxt_mngr_setup(p_hwfn);
+               ecore_spq_setup(p_hwfn);
+               ecore_eq_setup(p_hwfn, p_hwfn->p_eq);
+               ecore_consq_setup(p_hwfn, p_hwfn->p_consq);
+
+               /* Read shadow of current MFW mailbox */
+               ecore_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
+               OSAL_MEMCPY(p_hwfn->mcp_info->mfw_mb_shadow,
+                           p_hwfn->mcp_info->mfw_mb_cur,
+                           p_hwfn->mcp_info->mfw_mb_length);
+
+               ecore_int_setup(p_hwfn, p_hwfn->p_main_ptt);
+       }
+}
+
+#define FINAL_CLEANUP_POLL_CNT (100)
+#define FINAL_CLEANUP_POLL_TIME        (10)
+enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn,
+                                        struct ecore_ptt *p_ptt,
+                                        u16 id, bool is_vf)
+{
+       u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
+       enum _ecore_status_t rc = ECORE_TIMEOUT;
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev) ||
+           CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+               DP_INFO(p_hwfn, "Skipping final cleanup for non-ASIC\n");
+               return ECORE_SUCCESS;
+       }
+#endif
+
+       addr = GTT_BAR0_MAP_REG_USDM_RAM +
+           USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);
+
+       if (is_vf)
+               id += 0x10;
+
+       command |= X_FINAL_CLEANUP_AGG_INT <<
+           SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT;
+       command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT;
+       command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT;
+       command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT;
+
+       /* Make sure notification is not set before initiating final cleanup */
+       if (REG_RD(p_hwfn, addr)) {
+               DP_NOTICE(p_hwfn, false,
+                         "Unexpected; Found final cleanup notification "
+                         "before initiating final cleanup\n");
+               REG_WR(p_hwfn, addr, 0);
+       }
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                  "Sending final cleanup for PFVF[%d] [Command %08x\n]",
+                  id, OSAL_CPU_TO_LE32(command));
+
+       ecore_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN,
+                OSAL_CPU_TO_LE32(command));
+
+       /* Poll until completion */
+       while (!REG_RD(p_hwfn, addr) && count--)
+               OSAL_MSLEEP(FINAL_CLEANUP_POLL_TIME);
+
+       if (REG_RD(p_hwfn, addr))
+               rc = ECORE_SUCCESS;
+       else
+               DP_NOTICE(p_hwfn, true,
+                         "Failed to receive FW final cleanup notification\n");
+
+       /* Cleanup afterwards */
+       REG_WR(p_hwfn, addr, 0);
+
+       return rc;
+}
+
+static void ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn)
+{
+       int hw_mode = 0;
+
+       switch (ECORE_GET_TYPE(p_hwfn->p_dev)) {
+       case CHIP_BB_A0:
+               hw_mode |= 1 << MODE_BB_A0;
+               break;
+       case CHIP_BB_B0:
+               hw_mode |= 1 << MODE_BB_B0;
+               break;
+       case CHIP_K2:
+               hw_mode |= 1 << MODE_K2;
+               break;
+       default:
+               DP_NOTICE(p_hwfn, true, "Can't initialize chip ID %d\n",
+                         ECORE_GET_TYPE(p_hwfn->p_dev));
+               return;
+       }
+
+       /* Ports per engine is based on the values in CNIG_REG_NW_PORT_MODE */
+       switch (p_hwfn->p_dev->num_ports_in_engines) {
+       case 1:
+               hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
+               break;
+       case 2:
+               hw_mode |= 1 << MODE_PORTS_PER_ENG_2;
+               break;
+       case 4:
+               hw_mode |= 1 << MODE_PORTS_PER_ENG_4;
+               break;
+       default:
+               DP_NOTICE(p_hwfn, true,
+                         "num_ports_in_engine = %d not supported\n",
+                         p_hwfn->p_dev->num_ports_in_engines);
+               return;
+       }
+
+       switch (p_hwfn->p_dev->mf_mode) {
+       case ECORE_MF_DEFAULT:
+       case ECORE_MF_NPAR:
+               hw_mode |= 1 << MODE_MF_SI;
+               break;
+       case ECORE_MF_OVLAN:
+               hw_mode |= 1 << MODE_MF_SD;
+               break;
+       default:
+               DP_NOTICE(p_hwfn, true,
+                         "Unsupported MF mode, init as DEFAULT\n");
+               hw_mode |= 1 << MODE_MF_SI;
+       }
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+               if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
+                       hw_mode |= 1 << MODE_FPGA;
+               } else {
+                       if (p_hwfn->p_dev->b_is_emul_full)
+                               hw_mode |= 1 << MODE_EMUL_FULL;
+                       else
+                               hw_mode |= 1 << MODE_EMUL_REDUCED;
+               }
+       } else
+#endif
+               hw_mode |= 1 << MODE_ASIC;
+
+       if (ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn))
+               hw_mode |= 1 << MODE_EAGLE_ENG1_WORKAROUND;
+
+       if (p_hwfn->p_dev->num_hwfns > 1)
+               hw_mode |= 1 << MODE_100G;
+
+       p_hwfn->hw_info.hw_mode = hw_mode;
+
+       DP_VERBOSE(p_hwfn, (ECORE_MSG_PROBE | ECORE_MSG_IFUP),
+                  "Configuring function for hw_mode: 0x%08x\n",
+                  p_hwfn->hw_info.hw_mode);
+}
+
+#ifndef ASIC_ONLY
+/* MFW-replacement initializations for non-ASIC */
+static void ecore_hw_init_chip(struct ecore_hwfn *p_hwfn,
+                              struct ecore_ptt *p_ptt)
+{
+       u32 pl_hv = 1;
+       int i;
+
+       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev))
+               pl_hv |= 0x600;
+
+       ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV + 4, pl_hv);
+
+       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev))
+               ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV_2, 0x3ffffff);
+
+       /* initialize port mode to 4x10G_E (10G with 4x10 SERDES) */
+       /* CNIG_REG_NW_PORT_MODE is same for A0 and B0 */
+       if (!CHIP_REV_IS_EMUL(p_hwfn->p_dev) || !ECORE_IS_AH(p_hwfn->p_dev))
+               ecore_wr(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB_B0, 4);
+
+       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev)) {
+               /* 2 for 4-port, 1 for 2-port, 0 for 1-port */
+               ecore_wr(p_hwfn, p_ptt, MISC_REG_PORT_MODE,
+                        (p_hwfn->p_dev->num_ports_in_engines >> 1));
+
+               ecore_wr(p_hwfn, p_ptt, MISC_REG_BLOCK_256B_EN,
+                        p_hwfn->p_dev->num_ports_in_engines == 4 ? 0 : 3);
+       }
+
+       /* Poll on RBC */
+       ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_RBC_DONE, 1);
+       for (i = 0; i < 100; i++) {
+               OSAL_UDELAY(50);
+               if (ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_CFG_DONE) == 1)
+                       break;
+       }
+       if (i == 100)
+               DP_NOTICE(p_hwfn, true,
+                         "RBC done failed to complete in PSWRQ2\n");
+}
+#endif
+
+/* Init run time data for all PFs and their VFs on an engine.
+ * TBD - for VFs - Once we have parent PF info for each VF in
+ * shmem available as CAU requires knowledge of parent PF for each VF.
+ */
+static void ecore_init_cau_rt_data(struct ecore_dev *p_dev)
+{
+       u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
+       int i, sb_id;
+
+       for_each_hwfn(p_dev, i) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+               struct ecore_igu_info *p_igu_info;
+               struct ecore_igu_block *p_block;
+               struct cau_sb_entry sb_entry;
+
+               p_igu_info = p_hwfn->hw_info.p_igu_info;
+
+               for (sb_id = 0; sb_id < ECORE_MAPPING_MEMORY_SIZE(p_dev);
+                    sb_id++) {
+                       p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
+
+                       if (!p_block->is_pf)
+                               continue;
+
+                       ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
+                                               p_block->function_id, 0, 0);
+                       STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2, sb_entry);
+               }
+       }
+}
+
+static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
+                                                struct ecore_ptt *p_ptt,
+                                                int hw_mode)
+{
+       struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       struct ecore_dev *p_dev = p_hwfn->p_dev;
+       u8 vf_id, max_num_vfs;
+       u16 num_pfs, pf_id;
+       u32 concrete_fid;
+
+       ecore_init_cau_rt_data(p_dev);
+
+       /* Program GTT windows */
+       ecore_gtt_init(p_hwfn);
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+               ecore_hw_init_chip(p_hwfn, p_hwfn->p_main_ptt);
+#endif
+
+       if (p_hwfn->mcp_info) {
+               if (p_hwfn->mcp_info->func_info.bandwidth_max)
+                       qm_info->pf_rl_en = 1;
+               if (p_hwfn->mcp_info->func_info.bandwidth_min)
+                       qm_info->pf_wfq_en = 1;
+       }
+
+       ecore_qm_common_rt_init(p_hwfn,
+                               p_hwfn->p_dev->num_ports_in_engines,
+                               qm_info->max_phys_tcs_per_port,
+                               qm_info->pf_rl_en, qm_info->pf_wfq_en,
+                               qm_info->vport_rl_en, qm_info->vport_wfq_en,
+                               qm_info->qm_port_params);
+
+       ecore_cxt_hw_init_common(p_hwfn);
+
+       /* Close gate from NIG to BRB/Storm; By default they are open, but
+        * we close them to prevent NIG from passing data to reset blocks.
+        * Should have been done in the ENGINE phase, but init-tool lacks
+        * proper port-pretend capabilities.
+        */
+       ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
+       ecore_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
+       ecore_port_pretend(p_hwfn, p_ptt, p_hwfn->port_id ^ 1);
+       ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
+       ecore_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
+       ecore_port_unpretend(p_hwfn, p_ptt);
+
+       rc = ecore_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       /* @@TBD MichalK - should add VALIDATE_VFID to init tool...
+        * need to decide with which value, maybe runtime
+        */
+       ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
+       ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
+
+       if (ECORE_IS_BB(p_hwfn->p_dev)) {
+               num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
+               if (num_pfs == 1)
+                       return rc;
+               /* pretend to original PF */
+               ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+       }
+
+       /* Workaround for avoiding CCFC execution error when getting packets
+        * with CRC errors, and allowing instead the invoking of the FW error
+        * handler.
+        * This is not done inside the init tool since it currently can't
+        * perform a pretending to VFs.
+        */
+       max_num_vfs = ECORE_IS_AH(p_hwfn->p_dev) ? MAX_NUM_VFS_K2
+           : MAX_NUM_VFS_BB;
+       for (vf_id = 0; vf_id < max_num_vfs; vf_id++) {
+               concrete_fid = ecore_vfid_to_concrete(p_hwfn, vf_id);
+               ecore_fid_pretend(p_hwfn, p_ptt, (u16)concrete_fid);
+               ecore_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
+       }
+       /* pretend to original PF */
+       ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+
+       return rc;
+}
+
+#ifndef ASIC_ONLY
+#define MISC_REG_RESET_REG_2_XMAC_BIT (1 << 4)
+#define MISC_REG_RESET_REG_2_XMAC_SOFT_BIT (1 << 5)
+
+#define PMEG_IF_BYTE_COUNT     8
+
+static void ecore_wr_nw_port(struct ecore_hwfn *p_hwfn,
+                            struct ecore_ptt *p_ptt,
+                            u32 addr, u64 data, u8 reg_type, u8 port)
+{
+       DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+                  "CMD: %08x, ADDR: 0x%08x, DATA: %08x:%08x\n",
+                  ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB_B0) |
+                  (8 << PMEG_IF_BYTE_COUNT),
+                  (reg_type << 25) | (addr << 8) | port,
+                  (u32)((data >> 32) & 0xffffffff),
+                  (u32)(data & 0xffffffff));
+
+       ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB_B0,
+                (ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB_B0) &
+                 0xffff00fe) | (8 << PMEG_IF_BYTE_COUNT));
+       ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_ADDR_BB_B0,
+                (reg_type << 25) | (addr << 8) | port);
+       ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB_B0,
+                data & 0xffffffff);
+       ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB_B0,
+                (data >> 32) & 0xffffffff);
+}
+
+#define XLPORT_MODE_REG        (0x20a)
+#define XLPORT_MAC_CONTROL (0x210)
+#define XLPORT_FLOW_CONTROL_CONFIG (0x207)
+#define XLPORT_ENABLE_REG (0x20b)
+
+#define XLMAC_CTRL (0x600)
+#define XLMAC_MODE (0x601)
+#define XLMAC_RX_MAX_SIZE (0x608)
+#define XLMAC_TX_CTRL (0x604)
+#define XLMAC_PAUSE_CTRL (0x60d)
+#define XLMAC_PFC_CTRL (0x60e)
+
+static void ecore_emul_link_init_ah(struct ecore_hwfn *p_hwfn,
+                                   struct ecore_ptt *p_ptt)
+{
+       u8 port = p_hwfn->port_id;
+       u32 mac_base = NWM_REG_MAC0 + (port << 2) * NWM_REG_MAC0_SIZE;
+
+       ecore_wr(p_hwfn, p_ptt, CNIG_REG_NIG_PORT0_CONF_K2 + (port << 2),
+                (1 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_ENABLE_0_SHIFT) |
+                (port << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_NWM_PORT_MAP_0_SHIFT)
+                | (0 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_RATE_0_SHIFT));
+
+       ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_XIF_MODE,
+                1 << ETH_MAC_REG_XIF_MODE_XGMII_SHIFT);
+
+       ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_FRM_LENGTH,
+                9018 << ETH_MAC_REG_FRM_LENGTH_FRM_LENGTH_SHIFT);
+
+       ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_IPG_LENGTH,
+                0xc << ETH_MAC_REG_TX_IPG_LENGTH_TXIPG_SHIFT);
+
+       ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_RX_FIFO_SECTIONS,
+                8 << ETH_MAC_REG_RX_FIFO_SECTIONS_RX_SECTION_FULL_SHIFT);
+
+       ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_FIFO_SECTIONS,
+                (0xA << ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_SHIFT) |
+                (8 << ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_FULL_SHIFT));
+
+       ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_COMMAND_CONFIG, 0xa853);
+}
+
+static void ecore_emul_link_init(struct ecore_hwfn *p_hwfn,
+                                struct ecore_ptt *p_ptt)
+{
+       u8 loopback = 0, port = p_hwfn->port_id * 2;
+
+       DP_INFO(p_hwfn->p_dev, "Configurating Emulation Link %02x\n", port);
+
+       if (ECORE_IS_AH(p_hwfn->p_dev)) {
+               ecore_emul_link_init_ah(p_hwfn, p_ptt);
+               return;
+       }
+
+       ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MODE_REG, (0x4 << 4) | 0x4, 1,
+                               port);
+       ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MAC_CONTROL, 0, 1, port);
+       ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x40, 0, port);
+       ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_MODE, 0x40, 0, port);
+       ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_RX_MAX_SIZE, 0x3fff, 0, port);
+       ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_TX_CTRL,
+                        0x01000000800ULL | (0xa << 12) | ((u64)1 << 38),
+                        0, port);
+       ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PAUSE_CTRL, 0x7c000, 0, port);
+       ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PFC_CTRL,
+                        0x30ffffc000ULL, 0, port);
+       ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x3 | (loopback << 2), 0,
+                       port);
+       ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x1003 | (loopback << 2),
+                       0, port);
+       ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_FLOW_CONTROL_CONFIG, 1, 0, port);
+       ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_ENABLE_REG, 0xf, 1, port);
+}
+
+static void ecore_link_init(struct ecore_hwfn *p_hwfn,
+                           struct ecore_ptt *p_ptt, u8 port)
+{
+       int port_offset = port ? 0x800 : 0;
+       u32 xmac_rxctrl = 0;
+
+       /* Reset of XMAC */
+       /* FIXME: move to common start */
+       ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2 * sizeof(u32),
+               MISC_REG_RESET_REG_2_XMAC_BIT); /* Clear */
+       OSAL_MSLEEP(1);
+       ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32),
+               MISC_REG_RESET_REG_2_XMAC_BIT); /* Set */
+
+       ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_CORE_PORT_MODE, 1);
+
+       /* Set the number of ports on the Warp Core to 10G */
+       ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_PHY_PORT_MODE, 3);
+
+       /* Soft reset of XMAC */
+       ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2 * sizeof(u32),
+                MISC_REG_RESET_REG_2_XMAC_SOFT_BIT);
+       OSAL_MSLEEP(1);
+       ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32),
+                MISC_REG_RESET_REG_2_XMAC_SOFT_BIT);
+
+       /* FIXME: move to common end */
+       if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
+               ecore_wr(p_hwfn, p_ptt, XMAC_REG_MODE + port_offset, 0x20);
+
+       /* Set Max packet size: initialize XMAC block register for port 0 */
+       ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_MAX_SIZE + port_offset, 0x2710);
+
+       /* CRC append for Tx packets: init XMAC block register for port 1 */
+       ecore_wr(p_hwfn, p_ptt, XMAC_REG_TX_CTRL_LO + port_offset, 0xC800);
+
+       /* Enable TX and RX: initialize XMAC block register for port 1 */
+       ecore_wr(p_hwfn, p_ptt, XMAC_REG_CTRL + port_offset,
+                XMAC_REG_CTRL_TX_EN | XMAC_REG_CTRL_RX_EN);
+       xmac_rxctrl = ecore_rd(p_hwfn, p_ptt, XMAC_REG_RX_CTRL + port_offset);
+       xmac_rxctrl |= XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE;
+       ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_CTRL + port_offset, xmac_rxctrl);
+}
+#endif
+
+static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn,
+                                              struct ecore_ptt *p_ptt,
+                                              int hw_mode)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+
+       /* Init sequence */
+       rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
+                           hw_mode);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_ASIC(p_hwfn->p_dev))
+               return ECORE_SUCCESS;
+
+       if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
+               if (ECORE_IS_AH(p_hwfn->p_dev))
+                       return ECORE_SUCCESS;
+               ecore_link_init(p_hwfn, p_ptt, p_hwfn->port_id);
+       } else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+               if (p_hwfn->p_dev->num_hwfns > 1) {
+                       /* Activate OPTE in CMT */
+                       u32 val;
+
+                       val = ecore_rd(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV);
+                       val |= 0x10;
+                       ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV, val);
+                       ecore_wr(p_hwfn, p_ptt, MISC_REG_CLK_100G_MODE, 1);
+                       ecore_wr(p_hwfn, p_ptt, MISCS_REG_CLK_100G_MODE, 1);
+                       ecore_wr(p_hwfn, p_ptt, MISC_REG_OPTE_MODE, 1);
+                       ecore_wr(p_hwfn, p_ptt,
+                                NIG_REG_LLH_ENG_CLS_TCP_4_TUPLE_SEARCH, 1);
+                       ecore_wr(p_hwfn, p_ptt,
+                                NIG_REG_LLH_ENG_CLS_ENG_ID_TBL, 0x55555555);
+                       ecore_wr(p_hwfn, p_ptt,
+                                NIG_REG_LLH_ENG_CLS_ENG_ID_TBL + 0x4,
+                                0x55555555);
+               }
+
+               ecore_emul_link_init(p_hwfn, p_ptt);
+       } else {
+               DP_INFO(p_hwfn->p_dev, "link is not being configured\n");
+       }
+#endif
+
+       return rc;
+}
+
+static enum _ecore_status_t
+ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
+                             struct ecore_ptt *p_ptt)
+{
+       u32 pwm_regsize, norm_regsize;
+       u32 non_pwm_conn, min_addr_reg1;
+       u32 db_bar_size, n_cpus;
+       u32 pf_dems_shift;
+       int rc = ECORE_SUCCESS;
+
+       db_bar_size = ecore_hw_bar_size(p_hwfn, BAR_ID_1);
+       if (p_hwfn->p_dev->num_hwfns > 1)
+               db_bar_size /= 2;
+
+       /* Calculate doorbell regions
+        * -----------------------------------
+        * The doorbell BAR is made of two regions. The first is called normal
+        * region and the second is called PWM region. In the normal region
+        * each ICID has its own set of addresses so that writing to that
+        * specific address identifies the ICID. In the Process Window Mode
+        * region the ICID is given in the data written to the doorbell. The
+        * above per PF register denotes the offset in the doorbell BAR in which
+        * the PWM region begins.
+        * The normal region has ECORE_PF_DEMS_SIZE bytes per ICID, that is per
+        * non-PWM connection. The calculation below computes the total non-PWM
+        * connections. The DORQ_REG_PF_MIN_ADDR_REG1 register is
+        * in units of 4,096 bytes.
+        */
+       non_pwm_conn = ecore_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) +
+           ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE,
+                                         OSAL_NULL) +
+           ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, OSAL_NULL);
+       norm_regsize = ROUNDUP(ECORE_PF_DEMS_SIZE * non_pwm_conn, 4096);
+       min_addr_reg1 = norm_regsize / 4096;
+       pwm_regsize = db_bar_size - norm_regsize;
+
+       /* Check that the normal and PWM sizes are valid */
+       if (db_bar_size < norm_regsize) {
+               DP_ERR(p_hwfn->p_dev,
+                      "Doorbell BAR size 0x%x is too"
+                      " small (normal region is 0x%0x )\n",
+                      db_bar_size, norm_regsize);
+               return ECORE_NORESOURCES;
+       }
+       if (pwm_regsize < ECORE_MIN_PWM_REGION) {
+               DP_ERR(p_hwfn->p_dev,
+                      "PWM region size 0x%0x is too small."
+                      " Should be at least 0x%0x (Doorbell BAR size"
+                      " is 0x%x and normal region size is 0x%0x)\n",
+                      pwm_regsize, ECORE_MIN_PWM_REGION, db_bar_size,
+                      norm_regsize);
+               return ECORE_NORESOURCES;
+       }
+
+       /* Update hwfn */
+       p_hwfn->dpi_start_offset = norm_regsize; /* this is later used to
+                                                 * calculate the doorbell
+                                                 * address
+                                                 */
+
+       /* Update registers */
+       /* DEMS size is configured log2 of DWORDs, hence the division by 4 */
+       pf_dems_shift = OSAL_LOG2(ECORE_PF_DEMS_SIZE / 4);
+       ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, pf_dems_shift);
+       ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_MIN_ADDR_REG1, min_addr_reg1);
+
+       DP_INFO(p_hwfn,
+               "Doorbell size 0x%x, Normal region 0x%x, PWM region 0x%x\n",
+               db_bar_size, norm_regsize, pwm_regsize);
+       DP_INFO(p_hwfn, "DPI size 0x%x, DPI count 0x%x\n", p_hwfn->dpi_size,
+               p_hwfn->dpi_count);
+
+       return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
+                struct ecore_ptt *p_ptt,
+                struct ecore_tunn_start_params *p_tunn,
+                int hw_mode,
+                bool b_hw_start,
+                enum ecore_int_mode int_mode, bool allow_npar_tx_switch)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       u8 rel_pf_id = p_hwfn->rel_pf_id;
+       u32 prs_reg;
+       u16 ctrl;
+       int pos;
+
+       /* ILT/DQ/CM/QM */
+       if (p_hwfn->mcp_info) {
+               struct ecore_mcp_function_info *p_info;
+
+               p_info = &p_hwfn->mcp_info->func_info;
+               if (p_info->bandwidth_min)
+                       p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
+
+               /* Update rate limit once we'll actually have a link */
+               p_hwfn->qm_info.pf_rl = 100;
+       }
+       ecore_cxt_hw_init_pf(p_hwfn);
+
+       ecore_int_igu_init_rt(p_hwfn);  /* @@@TBD TODO MichalS multi hwfn ?? */
+
+       /* Set VLAN in NIG if needed */
+       if (hw_mode & (1 << MODE_MF_SD)) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "Configuring LLH_FUNC_TAG\n");
+               STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
+               STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
+                            p_hwfn->hw_info.ovlan);
+       }
+
+       /* Enable classification by MAC if needed */
+       if (hw_mode & (1 << MODE_MF_SI)) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+                          "Configuring TAGMAC_CLS_TYPE\n");
+               STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET,
+                            1);
+       }
+
+       /* Protocl Configuration  - @@@TBD - should we set 0 otherwise? */
+       STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 0);
+
+       /* perform debug configuration when chip is out of reset */
+       OSAL_BEFORE_PF_START((void *)p_hwfn->p_dev, p_hwfn->my_id);
+
+       /* Cleanup chip from previous driver if such remains exist */
+       rc = ecore_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false);
+       if (rc != ECORE_SUCCESS) {
+               ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL);
+               return rc;
+       }
+
+       /* PF Init sequence */
+       rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
+       if (rc)
+               return rc;
+
+       /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
+       rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode);
+       if (rc)
+               return rc;
+
+       /* Pure runtime initializations - directly to the HW  */
+       ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
+
+       /* PCI relaxed ordering causes a decrease in the performance on some
+        * systems. Till a root cause is found, disable this attribute in the
+        * PCI config space.
+        */
+       /* Not in use @DPDK
+        * pos = OSAL_PCI_FIND_CAPABILITY(p_hwfn->p_dev, PCI_CAP_ID_EXP);
+        * if (!pos) {
+        *      DP_NOTICE(p_hwfn, true,
+        *                "Failed to find the PCI Express"
+        *                " Capability structure in the PCI config space\n");
+        *      return ECORE_IO;
+        * }
+        * OSAL_PCI_READ_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL,
+        *                           &ctrl);
+        * ctrl &= ~PCI_EXP_DEVCTL_RELAX_EN;
+        * OSAL_PCI_WRITE_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL,
+        *                           &ctrl);
+        */
+
+       rc = ecore_hw_init_pf_doorbell_bar(p_hwfn, p_ptt);
+       if (rc)
+               return rc;
+
+       if (b_hw_start) {
+               /* enable interrupts */
+               ecore_int_igu_enable(p_hwfn, p_ptt, int_mode);
+
+               /* send function start command */
+               rc = ecore_sp_pf_start(p_hwfn, p_tunn, p_hwfn->p_dev->mf_mode,
+                                      allow_npar_tx_switch);
+               if (rc) {
+                       DP_NOTICE(p_hwfn, true,
+                                 "Function start ramrod failed\n");
+               } else {
+                       prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1);
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+                                  "PRS_REG_SEARCH_TAG1: %x\n", prs_reg);
+
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+                                  "PRS_REG_SEARCH register after start PFn\n");
+                       prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP);
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+                                  "PRS_REG_SEARCH_TCP: %x\n", prs_reg);
+                       prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP);
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+                                  "PRS_REG_SEARCH_UDP: %x\n", prs_reg);
+                       prs_reg = ecore_rd(p_hwfn, p_ptt,
+                                          PRS_REG_SEARCH_TCP_FIRST_FRAG);
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+                                  "PRS_REG_SEARCH_TCP_FIRST_FRAG: %x\n",
+                                  prs_reg);
+                       prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1);
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+                                  "PRS_REG_SEARCH_TAG1: %x\n", prs_reg);
+               }
+       }
+       return rc;
+}
+
+static enum _ecore_status_t
+ecore_change_pci_hwfn(struct ecore_hwfn *p_hwfn,
+                     struct ecore_ptt *p_ptt, u8 enable)
+{
+       u32 delay_idx = 0, val, set_val = enable ? 1 : 0;
+
+       /* Change PF in PXP */
+       ecore_wr(p_hwfn, p_ptt,
+                PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
+
+       /* wait until value is set - try for 1 second every 50us */
+       for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
+               val = ecore_rd(p_hwfn, p_ptt,
+                              PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
+               if (val == set_val)
+                       break;
+
+               OSAL_UDELAY(50);
+       }
+
+       if (val != set_val) {
+               DP_NOTICE(p_hwfn, true,
+                         "PFID_ENABLE_MASTER wasn't changed after a second\n");
+               return ECORE_UNKNOWN_ERROR;
+       }
+
+       return ECORE_SUCCESS;
+}
+
+static void ecore_reset_mb_shadow(struct ecore_hwfn *p_hwfn,
+                                 struct ecore_ptt *p_main_ptt)
+{
+       /* Read shadow of current MFW mailbox */
+       ecore_mcp_read_mb(p_hwfn, p_main_ptt);
+       OSAL_MEMCPY(p_hwfn->mcp_info->mfw_mb_shadow,
+                   p_hwfn->mcp_info->mfw_mb_cur,
+                   p_hwfn->mcp_info->mfw_mb_length);
+}
+
+enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
+                                  struct ecore_tunn_start_params *p_tunn,
+                                  bool b_hw_start,
+                                  enum ecore_int_mode int_mode,
+                                  bool allow_npar_tx_switch,
+                                  const u8 *bin_fw_data)
+{
+       enum _ecore_status_t rc, mfw_rc;
+       u32 load_code, param;
+       int i, j;
+
+       rc = ecore_init_fw_data(p_dev, bin_fw_data);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       for_each_hwfn(p_dev, i) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+               /* Enable DMAE in PXP */
+               rc = ecore_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
+
+               ecore_calc_hw_mode(p_hwfn);
+               /* @@@TBD need to add here:
+                * Check for fan failure
+                * Prev_unload
+                */
+               rc = ecore_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, &load_code);
+               if (rc) {
+                       DP_NOTICE(p_hwfn, true,
+                                 "Failed sending LOAD_REQ command\n");
+                       return rc;
+               }
+
+               /* CQ75580:
+                * When coming back from hiberbate state, the registers from
+                * which shadow is read initially are not initialized. It turns
+                * out that these registers get initialized during the call to
+                * ecore_mcp_load_req request. So we need to reread them here
+                * to get the proper shadow register value.
+                * Note: This is a workaround for the missinginig MFW
+                * initialization. It may be removed once the implementation
+                * is done.
+                */
+               ecore_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
+
+               DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+                          "Load request was sent.Resp:0x%x, Load code: 0x%x\n",
+                          rc, load_code);
+
+               /* Only relevant for recovery:
+                * Clear the indication after the LOAD_REQ command is responded
+                * by the MFW.
+                */
+               p_dev->recov_in_prog = false;
+
+               p_hwfn->first_on_engine = (load_code ==
+                                          FW_MSG_CODE_DRV_LOAD_ENGINE);
+
+               switch (load_code) {
+               case FW_MSG_CODE_DRV_LOAD_ENGINE:
+                       rc = ecore_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
+                                                 p_hwfn->hw_info.hw_mode);
+                       if (rc)
+                               break;
+                       /* Fall into */
+               case FW_MSG_CODE_DRV_LOAD_PORT:
+                       rc = ecore_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
+                                               p_hwfn->hw_info.hw_mode);
+                       if (rc)
+                               break;
+
+                       if (ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn)) {
+                               struct init_nig_pri_tc_map_req tc_map;
+
+                               OSAL_MEM_ZERO(&tc_map, sizeof(tc_map));
+
+                               /* remove this once flow control is
+                                * implemented
+                                */
+                               for (j = 0; j < NUM_OF_VLAN_PRIORITIES; j++) {
+                                       tc_map.pri[j].tc_id = 0;
+                                       tc_map.pri[j].valid = 1;
+                               }
+                               ecore_init_nig_pri_tc_map(p_hwfn,
+                                                         p_hwfn->p_main_ptt,
+                                                         &tc_map);
+                       }
+                       /* fallthrough */
+               case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+                       rc = ecore_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
+                                             p_tunn, p_hwfn->hw_info.hw_mode,
+                                             b_hw_start, int_mode,
+                                             allow_npar_tx_switch);
+                       break;
+               default:
+                       rc = ECORE_NOTIMPL;
+                       break;
+               }
+
+               if (rc != ECORE_SUCCESS)
+                       DP_NOTICE(p_hwfn, true,
+                                 "init phase failed loadcode 0x%x (rc %d)\n",
+                                 load_code, rc);
+
+               /* ACK mfw regardless of success or failure of initialization */
+               mfw_rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+                                      DRV_MSG_CODE_LOAD_DONE,
+                                      0, &load_code, &param);
+               if (rc != ECORE_SUCCESS)
+                       return rc;
+               if (mfw_rc != ECORE_SUCCESS) {
+                       DP_NOTICE(p_hwfn, true,
+                                 "Failed sending LOAD_DONE command\n");
+                       return mfw_rc;
+               }
+
+               p_hwfn->hw_init_done = true;
+       }
+
+       return ECORE_SUCCESS;
+}
+
+#define ECORE_HW_STOP_RETRY_LIMIT      (10)
+static OSAL_INLINE void ecore_hw_timers_stop(struct ecore_dev *p_dev,
+                                            struct ecore_hwfn *p_hwfn,
+                                            struct ecore_ptt *p_ptt)
+{
+       int i;
+
+       /* close timers */
+       ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
+       ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
+       for (i = 0; i < ECORE_HW_STOP_RETRY_LIMIT &&
+                                       !p_dev->recov_in_prog; i++) {
+               if ((!ecore_rd(p_hwfn, p_ptt,
+                              TM_REG_PF_SCAN_ACTIVE_CONN)) &&
+                   (!ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)))
+                       break;
+
+               /* Dependent on number of connection/tasks, possibly
+                * 1ms sleep is required between polls
+                */
+               OSAL_MSLEEP(1);
+       }
+       if (i == ECORE_HW_STOP_RETRY_LIMIT)
+               DP_NOTICE(p_hwfn, true,
+                         "Timers linear scans are not over"
+                         " [Connection %02x Tasks %02x]\n",
+                         (u8)ecore_rd(p_hwfn, p_ptt,
+                                      TM_REG_PF_SCAN_ACTIVE_CONN),
+                         (u8)ecore_rd(p_hwfn, p_ptt,
+                                      TM_REG_PF_SCAN_ACTIVE_TASK));
+}
+
+void ecore_hw_timers_stop_all(struct ecore_dev *p_dev)
+{
+       int j;
+
+       for_each_hwfn(p_dev, j) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
+               struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
+
+               ecore_hw_timers_stop(p_dev, p_hwfn, p_ptt);
+       }
+}
+
+enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS, t_rc;
+       int j;
+
+       for_each_hwfn(p_dev, j) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
+               struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
+
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Stopping hw/fw\n");
+
+               /* mark the hw as uninitialized... */
+               p_hwfn->hw_init_done = false;
+
+               rc = ecore_sp_pf_stop(p_hwfn);
+               if (rc)
+                       DP_NOTICE(p_hwfn, true,
+                                 "Failed to close PF against FW. Continue to"
+                                 " stop HW to prevent illegal host access"
+                                 " by the device\n");
+
+               /* perform debug action after PF stop was sent */
+               OSAL_AFTER_PF_STOP((void *)p_hwfn->p_dev, p_hwfn->my_id);
+
+               /* close NIG to BRB gate */
+               ecore_wr(p_hwfn, p_ptt,
+                        NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
+
+               /* close parser */
+               ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
+               ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
+               ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
+
+               /* @@@TBD - clean transmission queues (5.b) */
+               /* @@@TBD - clean BTB (5.c) */
+
+               ecore_hw_timers_stop(p_dev, p_hwfn, p_ptt);
+
+               /* @@@TBD - verify DMAE requests are done (8) */
+
+               /* Disable Attention Generation */
+               ecore_int_igu_disable_int(p_hwfn, p_ptt);
+               ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
+               ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
+               ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
+               /* Need to wait 1ms to guarantee SBs are cleared */
+               OSAL_MSLEEP(1);
+       }
+
+       /* Disable DMAE in PXP - in CMT, this should only be done for
+        * first hw-function, and only after all transactions have
+        * stopped for all active hw-functions.
+        */
+       t_rc = ecore_change_pci_hwfn(&p_dev->hwfns[0],
+                                    p_dev->hwfns[0].p_main_ptt, false);
+       if (t_rc != ECORE_SUCCESS)
+               rc = t_rc;
+
+       return rc;
+}
+
+void ecore_hw_stop_fastpath(struct ecore_dev *p_dev)
+{
+       int j;
+
+       for_each_hwfn(p_dev, j) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
+               struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
+
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN,
+                          "Shutting down the fastpath\n");
+
+               ecore_wr(p_hwfn, p_ptt,
+                        NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
+
+               ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
+               ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
+               ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
+
+               /* @@@TBD - clean transmission queues (5.b) */
+               /* @@@TBD - clean BTB (5.c) */
+
+               /* @@@TBD - verify DMAE requests are done (8) */
+
+               ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
+               /* Need to wait 1ms to guarantee SBs are cleared */
+               OSAL_MSLEEP(1);
+       }
+}
+
+void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
+
+       /* Re-open incoming traffic */
+       ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+                NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
+}
+
+static enum _ecore_status_t ecore_reg_assert(struct ecore_hwfn *p_hwfn,
+                                            struct ecore_ptt *p_ptt, u32 reg,
+                                            bool expected)
+{
+       u32 assert_val = ecore_rd(p_hwfn, p_ptt, reg);
+
+       if (assert_val != expected) {
+               DP_NOTICE(p_hwfn, true, "Value at address 0x%08x != 0x%08x\n",
+                         reg, expected);
+               return ECORE_UNKNOWN_ERROR;
+       }
+
+       return 0;
+}
+
+enum _ecore_status_t ecore_hw_reset(struct ecore_dev *p_dev)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       u32 unload_resp, unload_param;
+       int i;
+
+       for_each_hwfn(p_dev, i) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Resetting hw/fw\n");
+
+               /* Check for incorrect states */
+               if (!p_dev->recov_in_prog) {
+                       ecore_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
+                                        QM_REG_USG_CNT_PF_TX, 0);
+                       ecore_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
+                                        QM_REG_USG_CNT_PF_OTHER, 0);
+                       /* @@@TBD - assert on incorrect xCFC values (10.b) */
+               }
+
+               /* Disable PF in HW blocks */
+               ecore_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0);
+               ecore_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0);
+               ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+                        TCFC_REG_STRONG_ENABLE_PF, 0);
+               ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+                        CCFC_REG_STRONG_ENABLE_PF, 0);
+
+               if (p_dev->recov_in_prog) {
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN,
+                                  "Recovery is in progress -> skip "
+                                  "sending unload_req/done\n");
+                       break;
+               }
+
+               /* Send unload command to MCP */
+               rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+                                  DRV_MSG_CODE_UNLOAD_REQ,
+                                  DRV_MB_PARAM_UNLOAD_WOL_MCP,
+                                  &unload_resp, &unload_param);
+               if (rc != ECORE_SUCCESS) {
+                       DP_NOTICE(p_hwfn, true,
+                                 "ecore_hw_reset: UNLOAD_REQ failed\n");
+                       /* @@TBD - what to do? for now, assume ENG. */
+                       unload_resp = FW_MSG_CODE_DRV_UNLOAD_ENGINE;
+               }
+
+               rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+                                  DRV_MSG_CODE_UNLOAD_DONE,
+                                  0, &unload_resp, &unload_param);
+               if (rc != ECORE_SUCCESS) {
+                       DP_NOTICE(p_hwfn,
+                                 true, "ecore_hw_reset: UNLOAD_DONE failed\n");
+                       /* @@@TBD - Should it really ASSERT here ? */
+                       return rc;
+               }
+       }
+
+       return rc;
+}
+
+/* Free hwfn memory and resources acquired in hw_hwfn_prepare */
+static void ecore_hw_hwfn_free(struct ecore_hwfn *p_hwfn)
+{
+       ecore_ptt_pool_free(p_hwfn);
+       OSAL_FREE(p_hwfn->p_dev, p_hwfn->hw_info.p_igu_info);
+}
+
+/* Setup bar access */
+static void ecore_hw_hwfn_prepare(struct ecore_hwfn *p_hwfn)
+{
+       /* clear indirect access */
+       ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0);
+       ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
+       ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0);
+       ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0);
+
+       /* Clean Previous errors if such exist */
+       ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+                PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 1 << p_hwfn->abs_pf_id);
+
+       /* enable internal target-read */
+       ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+                PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
+}
+
+static void get_function_id(struct ecore_hwfn *p_hwfn)
+{
+       /* ME Register */
+       p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn,
+                                                PXP_PF_ME_OPAQUE_ADDR);
+
+       p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
+
+       /* Bits 16-19 from the ME registers are the pf_num */
+       /* @@ @TBD - check, may be wrong after B0 implementation for CMT */
+       p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf;
+       p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
+                                     PXP_CONCRETE_FID_PFID);
+       p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
+                                   PXP_CONCRETE_FID_PORT);
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
+                  "Read ME register: Concrete 0x%08x Opaque 0x%04x\n",
+                  p_hwfn->hw_info.concrete_fid, p_hwfn->hw_info.opaque_fid);
+}
+
+static void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn)
+{
+       u32 *feat_num = p_hwfn->hw_info.feat_num;
+       int num_features = 1;
+
+       /* L2 Queues require each: 1 status block. 1 L2 queue */
+       feat_num[ECORE_PF_L2_QUE] =
+           OSAL_MIN_T(u32,
+                      RESC_NUM(p_hwfn, ECORE_SB) / num_features,
+                      RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
+                  "#PF_L2_QUEUES=%d #SBS=%d num_features=%d\n",
+                  feat_num[ECORE_PF_L2_QUE],
+                  RESC_NUM(p_hwfn, ECORE_SB), num_features);
+}
+
+/* @@@TBD MK RESC: This info is currently hard code and set as if we were MF
+ * need to read it from shmem...
+ */
+static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn)
+{
+       u32 *resc_start = p_hwfn->hw_info.resc_start;
+       u8 num_funcs = p_hwfn->num_funcs_on_engine;
+       u32 *resc_num = p_hwfn->hw_info.resc_num;
+       int i, max_vf_vlan_filters;
+       struct ecore_sb_cnt_info sb_cnt_info;
+       bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
+
+       OSAL_MEM_ZERO(&sb_cnt_info, sizeof(sb_cnt_info));
+
+       max_vf_vlan_filters = 0;
+
+       ecore_int_get_num_sbs(p_hwfn, &sb_cnt_info);
+       resc_num[ECORE_SB] = OSAL_MIN_T(u32,
+                                       (MAX_SB_PER_PATH_BB / num_funcs),
+                                       sb_cnt_info.sb_cnt);
+
+       resc_num[ECORE_L2_QUEUE] = (b_ah ? MAX_NUM_L2_QUEUES_K2 :
+                                   MAX_NUM_L2_QUEUES_BB) / num_funcs;
+       resc_num[ECORE_VPORT] = (b_ah ? MAX_NUM_VPORTS_K2 :
+                                MAX_NUM_VPORTS_BB) / num_funcs;
+       resc_num[ECORE_RSS_ENG] = (b_ah ? ETH_RSS_ENGINE_NUM_K2 :
+                                  ETH_RSS_ENGINE_NUM_BB) / num_funcs;
+       resc_num[ECORE_PQ] = (b_ah ? MAX_QM_TX_QUEUES_K2 :
+                             MAX_QM_TX_QUEUES_BB) / num_funcs;
+       resc_num[ECORE_RL] = 8;
+       resc_num[ECORE_MAC] = ETH_NUM_MAC_FILTERS / num_funcs;
+       resc_num[ECORE_VLAN] = (ETH_NUM_VLAN_FILTERS -
+                               max_vf_vlan_filters +
+                               1 /*For vlan0 */) / num_funcs;
+
+       /* TODO - there will be a problem in AH - there are only 11k lines */
+       resc_num[ECORE_ILT] = (b_ah ? PXP_NUM_ILT_RECORDS_K2 :
+                              PXP_NUM_ILT_RECORDS_BB) / num_funcs;
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+               /* Reduced build contains less PQs */
+               if (!(p_hwfn->p_dev->b_is_emul_full))
+                       resc_num[ECORE_PQ] = 32;
+
+               /* For AH emulation, since we have a possible maximal number of
+                * 16 enabled PFs, in case there are not enough ILT lines -
+                * allocate only first PF as RoCE and have all the other ETH
+                * only with less ILT lines.
+                */
+               if (!p_hwfn->rel_pf_id && p_hwfn->p_dev->b_is_emul_full)
+                       resc_num[ECORE_ILT] = resc_num[ECORE_ILT];
+       }
+#endif
+
+       for (i = 0; i < ECORE_MAX_RESC; i++)
+               resc_start[i] = resc_num[i] * p_hwfn->rel_pf_id;
+
+#ifndef ASIC_ONLY
+       /* Correct the common ILT calculation if PF0 has more */
+       if (CHIP_REV_IS_SLOW(p_hwfn->p_dev) &&
+           p_hwfn->p_dev->b_is_emul_full &&
+           p_hwfn->rel_pf_id && resc_num[ECORE_ILT])
+               resc_start[ECORE_ILT] += resc_num[ECORE_ILT];
+#endif
+
+       /* Sanity for ILT */
+       if ((b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_K2)) ||
+           (!b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_BB))) {
+               DP_NOTICE(p_hwfn, true,
+                         "Can't assign ILT pages [%08x,...,%08x]\n",
+                         RESC_START(p_hwfn, ECORE_ILT), RESC_END(p_hwfn,
+                                                                 ECORE_ILT) -
+                         1);
+               return ECORE_INVAL;
+       }
+
+       ecore_hw_set_feat(p_hwfn);
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
+                  "The numbers for each resource are:\n"
+                  "SB = %d start = %d\n"
+                  "L2_QUEUE = %d start = %d\n"
+                  "VPORT = %d start = %d\n"
+                  "PQ = %d start = %d\n"
+                  "RL = %d start = %d\n"
+                  "MAC = %d start = %d\n"
+                  "VLAN = %d start = %d\n"
+                  "ILT = %d start = %d\n"
+                  "CMDQS_CQS = %d start = %d\n",
+                  RESC_NUM(p_hwfn, ECORE_SB), RESC_START(p_hwfn, ECORE_SB),
+                  RESC_NUM(p_hwfn, ECORE_L2_QUEUE),
+                  RESC_START(p_hwfn, ECORE_L2_QUEUE),
+                  RESC_NUM(p_hwfn, ECORE_VPORT),
+                  RESC_START(p_hwfn, ECORE_VPORT),
+                  RESC_NUM(p_hwfn, ECORE_PQ), RESC_START(p_hwfn, ECORE_PQ),
+                  RESC_NUM(p_hwfn, ECORE_RL), RESC_START(p_hwfn, ECORE_RL),
+                  RESC_NUM(p_hwfn, ECORE_MAC), RESC_START(p_hwfn, ECORE_MAC),
+                  RESC_NUM(p_hwfn, ECORE_VLAN),
+                  RESC_START(p_hwfn, ECORE_VLAN),
+                  RESC_NUM(p_hwfn, ECORE_ILT), RESC_START(p_hwfn, ECORE_ILT),
+                  RESC_NUM(p_hwfn, ECORE_CMDQS_CQS),
+                  RESC_START(p_hwfn, ECORE_CMDQS_CQS));
+
+       return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
+                                                 struct ecore_ptt *p_ptt)
+{
+       u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
+       u32 port_cfg_addr, link_temp, device_capabilities;
+       struct ecore_mcp_link_params *link;
+
+       /* Read global nvm_cfg address */
+       u32 nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
+
+       /* Verify MCP has initialized it */
+       if (nvm_cfg_addr == 0) {
+               DP_NOTICE(p_hwfn, false, "Shared memory not initialized\n");
+               return ECORE_INVAL;
+       }
+
+       /* Read nvm_cfg1  (Notice this is just offset, and not offsize (TBD) */
+       nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
+
+       addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+           OFFSETOF(struct nvm_cfg1, glob) + OFFSETOF(struct nvm_cfg1_glob,
+                                                      core_cfg);
+
+       core_cfg = ecore_rd(p_hwfn, p_ptt, addr);
+
+       switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
+               NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G:
+               p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X40G;
+               break;
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G:
+               p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X50G;
+               break;
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G:
+               p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X100G;
+               break;
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F:
+               p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X10G_F;
+               break;
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E:
+               p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X10G_E;
+               break;
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G:
+               p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X20G;
+               break;
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G:
+               p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X40G;
+               break;
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G:
+               p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X25G;
+               break;
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G:
+               p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X25G;
+               break;
+       default:
+               DP_NOTICE(p_hwfn, true, "Unknown port mode in 0x%08x\n",
+                         core_cfg);
+               break;
+       }
+
+       /* Read default link configuration */
+       link = &p_hwfn->mcp_info->link_input;
+       port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+           OFFSETOF(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
+       link_temp = ecore_rd(p_hwfn, p_ptt,
+                            port_cfg_addr +
+                            OFFSETOF(struct nvm_cfg1_port, speed_cap_mask));
+       link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
+       link->speed.advertised_speeds = link_temp;
+
+       link_temp = link->speed.advertised_speeds;
+       p_hwfn->mcp_info->link_capabilities.speed_capabilities = link_temp;
+
+       link_temp = ecore_rd(p_hwfn, p_ptt,
+                            port_cfg_addr +
+                            OFFSETOF(struct nvm_cfg1_port, link_settings));
+       switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >>
+               NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) {
+       case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG:
+               link->speed.autoneg = true;
+               break;
+       case NVM_CFG1_PORT_DRV_LINK_SPEED_1G:
+               link->speed.forced_speed = 1000;
+               break;
+       case NVM_CFG1_PORT_DRV_LINK_SPEED_10G:
+               link->speed.forced_speed = 10000;
+               break;
+       case NVM_CFG1_PORT_DRV_LINK_SPEED_25G:
+               link->speed.forced_speed = 25000;
+               break;
+       case NVM_CFG1_PORT_DRV_LINK_SPEED_40G:
+               link->speed.forced_speed = 40000;
+               break;
+       case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
+               link->speed.forced_speed = 50000;
+               break;
+       case NVM_CFG1_PORT_DRV_LINK_SPEED_100G:
+               link->speed.forced_speed = 100000;
+               break;
+       default:
+               DP_NOTICE(p_hwfn, true, "Unknown Speed in 0x%08x\n", link_temp);
+       }
+
+       link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
+       link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
+       link->pause.autoneg = !!(link_temp &
+                                 NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
+       link->pause.forced_rx = !!(link_temp &
+                                   NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
+       link->pause.forced_tx = !!(link_temp &
+                                   NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
+       link->loopback_mode = 0;
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+                  "Read default link: Speed 0x%08x, Adv. Speed 0x%08x,"
+                  " AN: 0x%02x, PAUSE AN: 0x%02x\n",
+                  link->speed.forced_speed, link->speed.advertised_speeds,
+                  link->speed.autoneg, link->pause.autoneg);
+
+       /* Read Multi-function information from shmem */
+       addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+           OFFSETOF(struct nvm_cfg1, glob) +
+           OFFSETOF(struct nvm_cfg1_glob, generic_cont0);
+
+       generic_cont0 = ecore_rd(p_hwfn, p_ptt, addr);
+
+       mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
+           NVM_CFG1_GLOB_MF_MODE_OFFSET;
+
+       switch (mf_mode) {
+       case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
+               p_hwfn->p_dev->mf_mode = ECORE_MF_OVLAN;
+               break;
+       case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
+               p_hwfn->p_dev->mf_mode = ECORE_MF_NPAR;
+               break;
+       case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
+               p_hwfn->p_dev->mf_mode = ECORE_MF_DEFAULT;
+               break;
+       }
+       DP_INFO(p_hwfn, "Multi function mode is %08x\n",
+               p_hwfn->p_dev->mf_mode);
+
+       /* Read Multi-function information from shmem */
+       addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+           OFFSETOF(struct nvm_cfg1, glob) +
+           OFFSETOF(struct nvm_cfg1_glob, device_capabilities);
+
+       device_capabilities = ecore_rd(p_hwfn, p_ptt, addr);
+       if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
+               OSAL_SET_BIT(ECORE_DEV_CAP_ETH,
+                            &p_hwfn->hw_info.device_capabilities);
+
+       return ecore_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
+}
+
+static void ecore_get_num_funcs(struct ecore_hwfn *p_hwfn,
+                               struct ecore_ptt *p_ptt)
+{
+       u8 num_funcs;
+       u32 tmp, mask;
+
+       num_funcs = ECORE_IS_AH(p_hwfn->p_dev) ? MAX_NUM_PFS_K2
+           : MAX_NUM_PFS_BB;
+
+       /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
+        * in the other bits are selected.
+        * Bits 1-15 are for functions 1-15, respectively, and their value is
+        * '0' only for enabled functions (function 0 always exists and
+        * enabled).
+        * In case of CMT, only the "even" functions are enabled, and thus the
+        * number of functions for both hwfns is learnt from the same bits.
+        */
+
+       tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE);
+       if (tmp & 0x1) {
+               if (ECORE_PATH_ID(p_hwfn) && p_hwfn->p_dev->num_hwfns == 1) {
+                       num_funcs = 0;
+                       mask = 0xaaaa;
+               } else {
+                       num_funcs = 1;
+                       mask = 0x5554;
+               }
+
+               tmp = (tmp ^ 0xffffffff) & mask;
+               while (tmp) {
+                       if (tmp & 0x1)
+                               num_funcs++;
+                       tmp >>= 0x1;
+               }
+       }
+
+       p_hwfn->num_funcs_on_engine = num_funcs;
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
+               DP_NOTICE(p_hwfn, false,
+                         "FPGA: Limit number of PFs to 4 [would affect"
+                         " resource allocation, needed for IOV]\n");
+               p_hwfn->num_funcs_on_engine = 4;
+       }
+#endif
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, "num_funcs_on_engine = %d\n",
+                  p_hwfn->num_funcs_on_engine);
+}
+
+static void ecore_hw_info_port_num_bb(struct ecore_hwfn *p_hwfn,
+                                     struct ecore_ptt *p_ptt)
+{
+       u32 port_mode;
+
+#ifndef ASIC_ONLY
+       /* Read the port mode */
+       if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
+               port_mode = 4;
+       else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) &&
+                (p_hwfn->p_dev->num_hwfns > 1))
+               /* In CMT on emulation, assume 1 port */
+               port_mode = 1;
+       else
+#endif
+               port_mode = ecore_rd(p_hwfn, p_ptt,
+                                    CNIG_REG_NW_PORT_MODE_BB_B0);
+
+       if (port_mode < 3) {
+               p_hwfn->p_dev->num_ports_in_engines = 1;
+       } else if (port_mode <= 5) {
+               p_hwfn->p_dev->num_ports_in_engines = 2;
+       } else {
+               DP_NOTICE(p_hwfn, true, "PORT MODE: %d not supported\n",
+                         p_hwfn->p_dev->num_ports_in_engines);
+
+               /* Default num_ports_in_engines to something */
+               p_hwfn->p_dev->num_ports_in_engines = 1;
+       }
+}
+
+static void ecore_hw_info_port_num_ah(struct ecore_hwfn *p_hwfn,
+                                     struct ecore_ptt *p_ptt)
+{
+       u32 port;
+       int i;
+
+       p_hwfn->p_dev->num_ports_in_engines = 0;
+
+       for (i = 0; i < MAX_NUM_PORTS_K2; i++) {
+               port = ecore_rd(p_hwfn, p_ptt,
+                               CNIG_REG_NIG_PORT0_CONF_K2 + (i * 4));
+               if (port & 1)
+                       p_hwfn->p_dev->num_ports_in_engines++;
+       }
+}
+
+static void ecore_hw_info_port_num(struct ecore_hwfn *p_hwfn,
+                                  struct ecore_ptt *p_ptt)
+{
+       if (ECORE_IS_BB(p_hwfn->p_dev))
+               ecore_hw_info_port_num_bb(p_hwfn, p_ptt);
+       else
+               ecore_hw_info_port_num_ah(p_hwfn, p_ptt);
+}
+
+static enum _ecore_status_t
+ecore_get_hw_info(struct ecore_hwfn *p_hwfn,
+                 struct ecore_ptt *p_ptt,
+                 enum ecore_pci_personality personality)
+{
+       enum _ecore_status_t rc;
+
+       /* TODO In get_hw_info, amoungst others:
+        * Get MCP FW revision and determine according to it the supported
+        * featrues (e.g. DCB)
+        * Get boot mode
+        * ecore_get_pcie_width_speed, WOL capability.
+        * Number of global CQ-s (for storage
+        */
+       ecore_hw_info_port_num(p_hwfn, p_ptt);
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_ASIC(p_hwfn->p_dev))
+#endif
+               ecore_hw_get_nvm_info(p_hwfn, p_ptt);
+
+       rc = ecore_int_igu_read_cam(p_hwfn, p_ptt);
+       if (rc)
+               return rc;
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_ASIC(p_hwfn->p_dev) && ecore_mcp_is_init(p_hwfn)) {
+#endif
+               OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr,
+                           p_hwfn->mcp_info->func_info.mac, ETH_ALEN);
+#ifndef ASIC_ONLY
+       } else {
+               static u8 mcp_hw_mac[6] = { 0, 2, 3, 4, 5, 6 };
+
+               OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr, mcp_hw_mac, ETH_ALEN);
+               p_hwfn->hw_info.hw_mac_addr[5] = p_hwfn->abs_pf_id;
+       }
+#endif
+
+       if (ecore_mcp_is_init(p_hwfn)) {
+               if (p_hwfn->mcp_info->func_info.ovlan != ECORE_MCP_VLAN_UNSET)
+                       p_hwfn->hw_info.ovlan =
+                           p_hwfn->mcp_info->func_info.ovlan;
+
+               ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
+       }
+
+       if (personality != ECORE_PCI_DEFAULT)
+               p_hwfn->hw_info.personality = personality;
+       else if (ecore_mcp_is_init(p_hwfn))
+               p_hwfn->hw_info.personality =
+                   p_hwfn->mcp_info->func_info.protocol;
+
+#ifndef ASIC_ONLY
+       /* To overcome ILT lack for emulation, until at least until we'll have
+        * a definite answer from system about it, allow only PF0 to be RoCE.
+        */
+       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev))
+               p_hwfn->hw_info.personality = ECORE_PCI_ETH;
+#endif
+
+       ecore_get_num_funcs(p_hwfn, p_ptt);
+
+       /* Feat num is dependent on personality and on the number of functions
+        * on the engine. Therefore it should be come after personality
+        * initialization and after getting the number of functions.
+        */
+       return ecore_hw_get_resc(p_hwfn);
+}
+
+/* @TMP - this should move to a proper .h */
+#define CHIP_NUM_AH                    0x8070
+
+static enum _ecore_status_t ecore_get_dev_info(struct ecore_dev *p_dev)
+{
+       struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+       u32 tmp;
+
+       /* Read Vendor Id / Device Id */
+       OSAL_PCI_READ_CONFIG_WORD(p_dev, PCICFG_VENDOR_ID_OFFSET,
+                                 &p_dev->vendor_id);
+       OSAL_PCI_READ_CONFIG_WORD(p_dev, PCICFG_DEVICE_ID_OFFSET,
+                                 &p_dev->device_id);
+
+       p_dev->chip_num = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
+                                        MISCS_REG_CHIP_NUM);
+       p_dev->chip_rev = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
+                                       MISCS_REG_CHIP_REV);
+
+       MASK_FIELD(CHIP_REV, p_dev->chip_rev);
+
+       /* Determine type */
+       if (p_dev->device_id == CHIP_NUM_AH)
+               p_dev->type = ECORE_DEV_TYPE_AH;
+       else
+               p_dev->type = ECORE_DEV_TYPE_BB;
+
+       /* Learn number of HW-functions */
+       tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
+                      MISCS_REG_CMT_ENABLED_FOR_PAIR);
+
+       if (tmp & (1 << p_hwfn->rel_pf_id)) {
+               DP_NOTICE(p_dev->hwfns, false, "device in CMT mode\n");
+               p_dev->num_hwfns = 2;
+       } else {
+               p_dev->num_hwfns = 1;
+       }
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_EMUL(p_dev)) {
+               /* For some reason we have problems with this register
+                * in B0 emulation; Simply assume no CMT
+                */
+               DP_NOTICE(p_dev->hwfns, false,
+                         "device on emul - assume no CMT\n");
+               p_dev->num_hwfns = 1;
+       }
+#endif
+
+       p_dev->chip_bond_id = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
+                                      MISCS_REG_CHIP_TEST_REG) >> 4;
+       MASK_FIELD(CHIP_BOND_ID, p_dev->chip_bond_id);
+       p_dev->chip_metal = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
+                                         MISCS_REG_CHIP_METAL);
+       MASK_FIELD(CHIP_METAL, p_dev->chip_metal);
+       DP_INFO(p_dev->hwfns,
+               "Chip details - %s%d, Num: %04x Rev: %04x Bond id: %04x"
+               " Metal: %04x\n",
+               ECORE_IS_BB(p_dev) ? "BB" : "AH",
+               CHIP_REV_IS_A0(p_dev) ? 0 : 1,
+               p_dev->chip_num, p_dev->chip_rev, p_dev->chip_bond_id,
+               p_dev->chip_metal);
+
+       if (ECORE_IS_BB(p_dev) && CHIP_REV_IS_A0(p_dev)) {
+               DP_NOTICE(p_dev->hwfns, false,
+                         "The chip type/rev (BB A0) is not supported!\n");
+               return ECORE_ABORTED;
+       }
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_AH(p_dev))
+               ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+                        MISCS_REG_PLL_MAIN_CTRL_4, 0x1);
+
+       if (CHIP_REV_IS_EMUL(p_dev)) {
+               tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
+                              MISCS_REG_ECO_RESERVED);
+               if (tmp & (1 << 29)) {
+                       DP_NOTICE(p_hwfn, false,
+                                 "Emulation: Running on a FULL build\n");
+                       p_dev->b_is_emul_full = true;
+               } else {
+                       DP_NOTICE(p_hwfn, false,
+                                 "Emulation: Running on a REDUCED build\n");
+               }
+       }
+#endif
+
+       return ECORE_SUCCESS;
+}
+
+void ecore_prepare_hibernate(struct ecore_dev *p_dev)
+{
+       int j;
+
+       for_each_hwfn(p_dev, j) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
+
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN,
+                          "Mark hw/fw uninitialized\n");
+
+               p_hwfn->hw_init_done = false;
+               p_hwfn->first_on_engine = false;
+       }
+}
+
+static enum _ecore_status_t
+ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
+                       void OSAL_IOMEM *p_regview,
+                       void OSAL_IOMEM *p_doorbells,
+                       enum ecore_pci_personality personality)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+
+       /* Split PCI bars evenly between hwfns */
+       p_hwfn->regview = p_regview;
+       p_hwfn->doorbells = p_doorbells;
+
+       /* Validate that chip access is feasible */
+       if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
+               DP_ERR(p_hwfn,
+                      "Reading the ME register returns all Fs;"
+                      " Preventing further chip access\n");
+               return ECORE_INVAL;
+       }
+
+       get_function_id(p_hwfn);
+
+       /* Allocate PTT pool */
+       rc = ecore_ptt_pool_alloc(p_hwfn);
+       if (rc) {
+               DP_NOTICE(p_hwfn, true, "Failed to prepare hwfn's hw\n");
+               goto err0;
+       }
+
+       /* Allocate the main PTT */
+       p_hwfn->p_main_ptt = ecore_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
+
+       /* First hwfn learns basic information, e.g., number of hwfns */
+       if (!p_hwfn->my_id) {
+               rc = ecore_get_dev_info(p_hwfn->p_dev);
+               if (rc != ECORE_SUCCESS)
+                       goto err1;
+       }
+
+       ecore_hw_hwfn_prepare(p_hwfn);
+
+       /* Initialize MCP structure */
+       rc = ecore_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
+       if (rc) {
+               DP_NOTICE(p_hwfn, true, "Failed initializing mcp command\n");
+               goto err1;
+       }
+
+       /* Read the device configuration information from the HW and SHMEM */
+       rc = ecore_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality);
+       if (rc) {
+               DP_NOTICE(p_hwfn, true, "Failed to get HW information\n");
+               goto err2;
+       }
+
+       /* Allocate the init RT array and initialize the init-ops engine */
+       rc = ecore_init_alloc(p_hwfn);
+       if (rc) {
+               DP_NOTICE(p_hwfn, true, "Failed to allocate the init array\n");
+               goto err2;
+       }
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
+               DP_NOTICE(p_hwfn, false,
+                         "FPGA: workaround; Prevent DMAE parities\n");
+               ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PCIE_REG_PRTY_MASK, 7);
+
+               DP_NOTICE(p_hwfn, false,
+                         "FPGA: workaround: Set VF bar0 size\n");
+               ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+                        PGLUE_B_REG_VF_BAR0_SIZE, 4);
+       }
+#endif
+
+       return rc;
+err2:
+       ecore_mcp_free(p_hwfn);
+err1:
+       ecore_hw_hwfn_free(p_hwfn);
+err0:
+       return rc;
+}
+
+enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev, int personality)
+{
+       struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+       enum _ecore_status_t rc;
+
+       /* Store the precompiled init data ptrs */
+       ecore_init_iro_array(p_dev);
+
+       /* Initialize the first hwfn - will learn number of hwfns */
+       rc = ecore_hw_prepare_single(p_hwfn,
+                                    p_dev->regview,
+                                    p_dev->doorbells, personality);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       personality = p_hwfn->hw_info.personality;
+
+       /* initialalize 2nd hwfn if necessary */
+       if (p_dev->num_hwfns > 1) {
+               void OSAL_IOMEM *p_regview, *p_doorbell;
+               u8 OSAL_IOMEM *addr;
+
+               /* adjust bar offset for second engine */
+               addr = (u8 OSAL_IOMEM *)p_dev->regview +
+                   ecore_hw_bar_size(p_hwfn, BAR_ID_0) / 2;
+               p_regview = (void OSAL_IOMEM *)addr;
+
+               addr = (u8 OSAL_IOMEM *)p_dev->doorbells +
+                   ecore_hw_bar_size(p_hwfn, BAR_ID_1) / 2;
+               p_doorbell = (void OSAL_IOMEM *)addr;
+
+               /* prepare second hw function */
+               rc = ecore_hw_prepare_single(&p_dev->hwfns[1], p_regview,
+                                            p_doorbell, personality);
+
+               /* in case of error, need to free the previously
+                * initialiazed hwfn 0
+                */
+               if (rc != ECORE_SUCCESS) {
+                       ecore_init_free(p_hwfn);
+                       ecore_mcp_free(p_hwfn);
+                       ecore_hw_hwfn_free(p_hwfn);
+                       return rc;
+               }
+       }
+
+       return ECORE_SUCCESS;
+}
+
+void ecore_hw_remove(struct ecore_dev *p_dev)
+{
+       int i;
+
+       for_each_hwfn(p_dev, i) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+               ecore_init_free(p_hwfn);
+               ecore_hw_hwfn_free(p_hwfn);
+               ecore_mcp_free(p_hwfn);
+
+               OSAL_MUTEX_DEALLOC(&p_hwfn->dmae_info.mutex);
+       }
+}
+
+static void ecore_chain_free_next_ptr(struct ecore_dev *p_dev,
+                                     struct ecore_chain *p_chain)
+{
+       void *p_virt = p_chain->p_virt_addr, *p_virt_next = OSAL_NULL;
+       dma_addr_t p_phys = p_chain->p_phys_addr, p_phys_next = 0;
+       struct ecore_chain_next *p_next;
+       u32 size, i;
+
+       if (!p_virt)
+               return;
+
+       size = p_chain->elem_size * p_chain->usable_per_page;
+
+       for (i = 0; i < p_chain->page_cnt; i++) {
+               if (!p_virt)
+                       break;
+
+               p_next = (struct ecore_chain_next *)((u8 *)p_virt + size);
+               p_virt_next = p_next->next_virt;
+               p_phys_next = HILO_DMA_REGPAIR(p_next->next_phys);
+
+               OSAL_DMA_FREE_COHERENT(p_dev, p_virt, p_phys,
+                                      ECORE_CHAIN_PAGE_SIZE);
+
+               p_virt = p_virt_next;
+               p_phys = p_phys_next;
+       }
+}
+
+static void ecore_chain_free_single(struct ecore_dev *p_dev,
+                                   struct ecore_chain *p_chain)
+{
+       if (!p_chain->p_virt_addr)
+               return;
+
+       OSAL_DMA_FREE_COHERENT(p_dev, p_chain->p_virt_addr,
+                              p_chain->p_phys_addr, ECORE_CHAIN_PAGE_SIZE);
+}
+
+static void ecore_chain_free_pbl(struct ecore_dev *p_dev,
+                                struct ecore_chain *p_chain)
+{
+       void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl;
+       u8 *p_pbl_virt = (u8 *)p_chain->pbl.p_virt_table;
+       u32 page_cnt = p_chain->page_cnt, i, pbl_size;
+
+       if (!pp_virt_addr_tbl)
+               return;
+
+       if (!p_chain->pbl.p_virt_table)
+               goto out;
+
+       for (i = 0; i < page_cnt; i++) {
+               if (!pp_virt_addr_tbl[i])
+                       break;
+
+               OSAL_DMA_FREE_COHERENT(p_dev, pp_virt_addr_tbl[i],
+                                      *(dma_addr_t *)p_pbl_virt,
+                                      ECORE_CHAIN_PAGE_SIZE);
+
+               p_pbl_virt += ECORE_CHAIN_PBL_ENTRY_SIZE;
+       }
+
+       pbl_size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE;
+       OSAL_DMA_FREE_COHERENT(p_dev, p_chain->pbl.p_virt_table,
+                              p_chain->pbl.p_phys_table, pbl_size);
+out:
+       OSAL_VFREE(p_dev, p_chain->pbl.pp_virt_addr_tbl);
+}
+
+void ecore_chain_free(struct ecore_dev *p_dev, struct ecore_chain *p_chain)
+{
+       switch (p_chain->mode) {
+       case ECORE_CHAIN_MODE_NEXT_PTR:
+               ecore_chain_free_next_ptr(p_dev, p_chain);
+               break;
+       case ECORE_CHAIN_MODE_SINGLE:
+               ecore_chain_free_single(p_dev, p_chain);
+               break;
+       case ECORE_CHAIN_MODE_PBL:
+               ecore_chain_free_pbl(p_dev, p_chain);
+               break;
+       }
+}
+
+static enum _ecore_status_t
+ecore_chain_alloc_sanity_check(struct ecore_dev *p_dev,
+                              enum ecore_chain_cnt_type cnt_type,
+                              osal_size_t elem_size, u32 page_cnt)
+{
+       u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt;
+
+       /* The actual chain size can be larger than the maximal possible value
+        * after rounding up the requested elements number to pages, and after
+        * taking into acount the unusuable elements (next-ptr elements).
+        * The size of a "u16" chain can be (U16_MAX + 1) since the chain
+        * size/capacity fields are of a u32 type.
+        */
+       if ((cnt_type == ECORE_CHAIN_CNT_TYPE_U16 &&
+            chain_size > ((u32)ECORE_U16_MAX + 1)) ||
+           (cnt_type == ECORE_CHAIN_CNT_TYPE_U32 &&
+            chain_size > ECORE_U32_MAX)) {
+               DP_NOTICE(p_dev, true,
+                         "The actual chain size (0x%lx) is larger than"
+                         " the maximal possible value\n",
+                         (unsigned long)chain_size);
+               return ECORE_INVAL;
+       }
+
+       return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_chain_alloc_next_ptr(struct ecore_dev *p_dev, struct ecore_chain *p_chain)
+{
+       void *p_virt = OSAL_NULL, *p_virt_prev = OSAL_NULL;
+       dma_addr_t p_phys = 0;
+       u32 i;
+
+       for (i = 0; i < p_chain->page_cnt; i++) {
+               p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
+                                                ECORE_CHAIN_PAGE_SIZE);
+               if (!p_virt) {
+                       DP_NOTICE(p_dev, true,
+                                 "Failed to allocate chain memory\n");
+                       return ECORE_NOMEM;
+               }
+
+               if (i == 0) {
+                       ecore_chain_init_mem(p_chain, p_virt, p_phys);
+                       ecore_chain_reset(p_chain);
+               } else {
+                       ecore_chain_init_next_ptr_elem(p_chain, p_virt_prev,
+                                                      p_virt, p_phys);
+               }
+
+               p_virt_prev = p_virt;
+       }
+       /* Last page's next element should point to the beginning of the
+        * chain.
+        */
+       ecore_chain_init_next_ptr_elem(p_chain, p_virt_prev,
+                                      p_chain->p_virt_addr,
+                                      p_chain->p_phys_addr);
+
+       return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_chain_alloc_single(struct ecore_dev *p_dev, struct ecore_chain *p_chain)
+{
+       void *p_virt = OSAL_NULL;
+       dma_addr_t p_phys = 0;
+
+       p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, ECORE_CHAIN_PAGE_SIZE);
+       if (!p_virt) {
+               DP_NOTICE(p_dev, true, "Failed to allocate chain memory\n");
+               return ECORE_NOMEM;
+       }
+
+       ecore_chain_init_mem(p_chain, p_virt, p_phys);
+       ecore_chain_reset(p_chain);
+
+       return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_chain_alloc_pbl(struct ecore_dev *p_dev,
+                                                 struct ecore_chain *p_chain)
+{
+       void *p_virt = OSAL_NULL;
+       u8 *p_pbl_virt = OSAL_NULL;
+       void **pp_virt_addr_tbl = OSAL_NULL;
+       dma_addr_t p_phys = 0, p_pbl_phys = 0;
+       u32 page_cnt = p_chain->page_cnt, size, i;
+
+       size = page_cnt * sizeof(*pp_virt_addr_tbl);
+       pp_virt_addr_tbl = (void **)OSAL_VALLOC(p_dev, size);
+       if (!pp_virt_addr_tbl) {
+               DP_NOTICE(p_dev, true,
+                         "Failed to allocate memory for the chain"
+                         " virtual addresses table\n");
+               return ECORE_NOMEM;
+       }
+       OSAL_MEM_ZERO(pp_virt_addr_tbl, size);
+
+       /* The allocation of the PBL table is done with its full size, since it
+        * is expected to be successive.
+        */
+       size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE;
+       p_pbl_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_pbl_phys, size);
+       if (!p_pbl_virt) {
+               DP_NOTICE(p_dev, true, "Failed to allocate chain pbl memory\n");
+               return ECORE_NOMEM;
+       }
+
+       ecore_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
+                                pp_virt_addr_tbl);
+
+       for (i = 0; i < page_cnt; i++) {
+               p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
+                                                ECORE_CHAIN_PAGE_SIZE);
+               if (!p_virt) {
+                       DP_NOTICE(p_dev, true,
+                                 "Failed to allocate chain memory\n");
+                       return ECORE_NOMEM;
+               }
+
+               if (i == 0) {
+                       ecore_chain_init_mem(p_chain, p_virt, p_phys);
+                       ecore_chain_reset(p_chain);
+               }
+
+               /* Fill the PBL table with the physical address of the page */
+               *(dma_addr_t *)p_pbl_virt = p_phys;
+               /* Keep the virtual address of the page */
+               p_chain->pbl.pp_virt_addr_tbl[i] = p_virt;
+
+               p_pbl_virt += ECORE_CHAIN_PBL_ENTRY_SIZE;
+       }
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_chain_alloc(struct ecore_dev *p_dev,
+                                      enum ecore_chain_use_mode intended_use,
+                                      enum ecore_chain_mode mode,
+                                      enum ecore_chain_cnt_type cnt_type,
+                                      u32 num_elems, osal_size_t elem_size,
+                                      struct ecore_chain *p_chain)
+{
+       u32 page_cnt;
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+
+       if (mode == ECORE_CHAIN_MODE_SINGLE)
+               page_cnt = 1;
+       else
+               page_cnt = ECORE_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
+
+       rc = ecore_chain_alloc_sanity_check(p_dev, cnt_type, elem_size,
+                                           page_cnt);
+       if (rc) {
+               DP_NOTICE(p_dev, true,
+                         "Cannot allocate a chain with the given arguments:\n"
+                         " [use_mode %d, mode %d, cnt_type %d, num_elems %d,"
+                         " elem_size %zu]\n",
+                         intended_use, mode, cnt_type, num_elems, elem_size);
+               return rc;
+       }
+
+       ecore_chain_init_params(p_chain, page_cnt, (u8)elem_size, intended_use,
+                               mode, cnt_type);
+
+       switch (mode) {
+       case ECORE_CHAIN_MODE_NEXT_PTR:
+               rc = ecore_chain_alloc_next_ptr(p_dev, p_chain);
+               break;
+       case ECORE_CHAIN_MODE_SINGLE:
+               rc = ecore_chain_alloc_single(p_dev, p_chain);
+               break;
+       case ECORE_CHAIN_MODE_PBL:
+               rc = ecore_chain_alloc_pbl(p_dev, p_chain);
+               break;
+       }
+       if (rc)
+               goto nomem;
+
+       return ECORE_SUCCESS;
+
+nomem:
+       ecore_chain_free(p_dev, p_chain);
+       return rc;
+}
+
+enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn,
+                                      u16 src_id, u16 *dst_id)
+{
+       if (src_id >= RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
+               u16 min, max;
+
+               min = (u16)RESC_START(p_hwfn, ECORE_L2_QUEUE);
+               max = min + RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
+               DP_NOTICE(p_hwfn, true,
+                         "l2_queue id [%d] is not valid, available"
+                         " indices [%d - %d]\n",
+                         src_id, min, max);
+
+               return ECORE_INVAL;
+       }
+
+       *dst_id = RESC_START(p_hwfn, ECORE_L2_QUEUE) + src_id;
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn,
+                                   u8 src_id, u8 *dst_id)
+{
+       if (src_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) {
+               u8 min, max;
+
+               min = (u8)RESC_START(p_hwfn, ECORE_VPORT);
+               max = min + RESC_NUM(p_hwfn, ECORE_VPORT);
+               DP_NOTICE(p_hwfn, true,
+                         "vport id [%d] is not valid, available"
+                         " indices [%d - %d]\n",
+                         src_id, min, max);
+
+               return ECORE_INVAL;
+       }
+
+       *dst_id = RESC_START(p_hwfn, ECORE_VPORT) + src_id;
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn,
+                                     u8 src_id, u8 *dst_id)
+{
+       if (src_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG)) {
+               u8 min, max;
+
+               min = (u8)RESC_START(p_hwfn, ECORE_RSS_ENG);
+               max = min + RESC_NUM(p_hwfn, ECORE_RSS_ENG);
+               DP_NOTICE(p_hwfn, true,
+                         "rss_eng id [%d] is not valid,avail idx [%d - %d]\n",
+                         src_id, min, max);
+
+               return ECORE_INVAL;
+       }
+
+       *dst_id = RESC_START(p_hwfn, ECORE_RSS_ENG) + src_id;
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn,
+                                             struct ecore_ptt *p_ptt,
+                                             u8 *p_filter)
+{
+       u32 high, low, en;
+       int i;
+
+       if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+               return ECORE_SUCCESS;
+
+       high = p_filter[1] | (p_filter[0] << 8);
+       low = p_filter[5] | (p_filter[4] << 8) |
+           (p_filter[3] << 16) | (p_filter[2] << 24);
+
+       /* Find a free entry and utilize it */
+       for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+               en = ecore_rd(p_hwfn, p_ptt,
+                             NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32));
+               if (en)
+                       continue;
+               ecore_wr(p_hwfn, p_ptt,
+                        NIG_REG_LLH_FUNC_FILTER_VALUE +
+                        2 * i * sizeof(u32), low);
+               ecore_wr(p_hwfn, p_ptt,
+                        NIG_REG_LLH_FUNC_FILTER_VALUE +
+                        (2 * i + 1) * sizeof(u32), high);
+               ecore_wr(p_hwfn, p_ptt,
+                        NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0);
+               ecore_wr(p_hwfn, p_ptt,
+                        NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
+                        i * sizeof(u32), 0);
+               ecore_wr(p_hwfn, p_ptt,
+                        NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1);
+               break;
+       }
+       if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
+               DP_NOTICE(p_hwfn, false,
+                         "Failed to find an empty LLH filter to utilize\n");
+               return ECORE_INVAL;
+       }
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+                  "MAC: %x:%x:%x:%x:%x:%x is added at %d\n",
+                  p_filter[0], p_filter[1], p_filter[2],
+                  p_filter[3], p_filter[4], p_filter[5], i);
+
+       return ECORE_SUCCESS;
+}
+
+void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn,
+                                struct ecore_ptt *p_ptt, u8 *p_filter)
+{
+       u32 high, low;
+       int i;
+
+       if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+               return;
+
+       high = p_filter[1] | (p_filter[0] << 8);
+       low = p_filter[5] | (p_filter[4] << 8) |
+           (p_filter[3] << 16) | (p_filter[2] << 24);
+
+       /* Find the entry and clean it */
+       for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+               if (ecore_rd(p_hwfn, p_ptt,
+                            NIG_REG_LLH_FUNC_FILTER_VALUE +
+                            2 * i * sizeof(u32)) != low)
+                       continue;
+               if (ecore_rd(p_hwfn, p_ptt,
+                            NIG_REG_LLH_FUNC_FILTER_VALUE +
+                            (2 * i + 1) * sizeof(u32)) != high)
+                       continue;
+
+               ecore_wr(p_hwfn, p_ptt,
+                        NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
+               ecore_wr(p_hwfn, p_ptt,
+                        NIG_REG_LLH_FUNC_FILTER_VALUE +
+                        2 * i * sizeof(u32), 0);
+               ecore_wr(p_hwfn, p_ptt,
+                        NIG_REG_LLH_FUNC_FILTER_VALUE +
+                        (2 * i + 1) * sizeof(u32), 0);
+               break;
+       }
+       if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
+               DP_NOTICE(p_hwfn, false,
+                         "Tried to remove a non-configured filter\n");
+}
+
+enum _ecore_status_t ecore_llh_add_ethertype_filter(struct ecore_hwfn *p_hwfn,
+                                                   struct ecore_ptt *p_ptt,
+                                                   u16 filter)
+{
+       u32 high, low, en;
+       int i;
+
+       if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+               return ECORE_SUCCESS;
+
+       high = filter;
+       low = 0;
+
+       /* Find a free entry and utilize it */
+       for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+               en = ecore_rd(p_hwfn, p_ptt,
+                             NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32));
+               if (en)
+                       continue;
+               ecore_wr(p_hwfn, p_ptt,
+                        NIG_REG_LLH_FUNC_FILTER_VALUE +
+                        2 * i * sizeof(u32), low);
+               ecore_wr(p_hwfn, p_ptt,
+                        NIG_REG_LLH_FUNC_FILTER_VALUE +
+                        (2 * i + 1) * sizeof(u32), high);
+               ecore_wr(p_hwfn, p_ptt,
+                        NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 1);
+               ecore_wr(p_hwfn, p_ptt,
+                        NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
+                        i * sizeof(u32), 1);
+               ecore_wr(p_hwfn, p_ptt,
+                        NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1);
+               break;
+       }
+       if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
+               DP_NOTICE(p_hwfn, false,
+                         "Failed to find an empty LLH filter to utilize\n");
+               return ECORE_INVAL;
+       }
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+                  "ETH type: %x is added at %d\n", filter, i);
+
+       return ECORE_SUCCESS;
+}
+
+void ecore_llh_remove_ethertype_filter(struct ecore_hwfn *p_hwfn,
+                                      struct ecore_ptt *p_ptt, u16 filter)
+{
+       u32 high, low;
+       int i;
+
+       if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+               return;
+
+       high = filter;
+       low = 0;
+
+       /* Find the entry and clean it */
+       for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+               if (ecore_rd(p_hwfn, p_ptt,
+                            NIG_REG_LLH_FUNC_FILTER_VALUE +
+                            2 * i * sizeof(u32)) != low)
+                       continue;
+               if (ecore_rd(p_hwfn, p_ptt,
+                            NIG_REG_LLH_FUNC_FILTER_VALUE +
+                            (2 * i + 1) * sizeof(u32)) != high)
+                       continue;
+
+               ecore_wr(p_hwfn, p_ptt,
+                        NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
+               ecore_wr(p_hwfn, p_ptt,
+                        NIG_REG_LLH_FUNC_FILTER_VALUE +
+                        2 * i * sizeof(u32), 0);
+               ecore_wr(p_hwfn, p_ptt,
+                        NIG_REG_LLH_FUNC_FILTER_VALUE +
+                        (2 * i + 1) * sizeof(u32), 0);
+               break;
+       }
+       if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
+               DP_NOTICE(p_hwfn, false,
+                         "Tried to remove a non-configured filter\n");
+}
+
+void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn,
+                                struct ecore_ptt *p_ptt)
+{
+       int i;
+
+       if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+               return;
+
+       for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+               ecore_wr(p_hwfn, p_ptt,
+                        NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
+               ecore_wr(p_hwfn, p_ptt,
+                        NIG_REG_LLH_FUNC_FILTER_VALUE +
+                        2 * i * sizeof(u32), 0);
+               ecore_wr(p_hwfn, p_ptt,
+                        NIG_REG_LLH_FUNC_FILTER_VALUE +
+                        (2 * i + 1) * sizeof(u32), 0);
+       }
+}
+
+enum _ecore_status_t ecore_test_registers(struct ecore_hwfn *p_hwfn,
+                                         struct ecore_ptt *p_ptt)
+{
+       u32 reg_tbl[] = {
+               BRB_REG_HEADER_SIZE,
+               BTB_REG_HEADER_SIZE,
+               CAU_REG_LONG_TIMEOUT_THRESHOLD,
+               CCFC_REG_ACTIVITY_COUNTER,
+               CDU_REG_CID_ADDR_PARAMS,
+               DBG_REG_CLIENT_ENABLE,
+               DMAE_REG_INIT,
+               DORQ_REG_IFEN,
+               GRC_REG_TIMEOUT_EN,
+               IGU_REG_BLOCK_CONFIGURATION,
+               MCM_REG_INIT,
+               MCP2_REG_DBG_DWORD_ENABLE,
+               MISC_REG_PORT_MODE,
+               MISCS_REG_CLK_100G_MODE,
+               MSDM_REG_ENABLE_IN1,
+               MSEM_REG_ENABLE_IN,
+               NIG_REG_CM_HDR,
+               NCSI_REG_CONFIG,
+               PBF_REG_INIT,
+               PTU_REG_ATC_INIT_ARRAY,
+               PCM_REG_INIT,
+               PGLUE_B_REG_ADMIN_PER_PF_REGION,
+               PRM_REG_DISABLE_PRM,
+               PRS_REG_SOFT_RST,
+               PSDM_REG_ENABLE_IN1,
+               PSEM_REG_ENABLE_IN,
+               PSWRQ_REG_DBG_SELECT,
+               PSWRQ2_REG_CDUT_P_SIZE,
+               PSWHST_REG_DISCARD_INTERNAL_WRITES,
+               PSWHST2_REG_DBGSYN_ALMOST_FULL_THR,
+               PSWRD_REG_DBG_SELECT,
+               PSWRD2_REG_CONF11,
+               PSWWR_REG_USDM_FULL_TH,
+               PSWWR2_REG_CDU_FULL_TH2,
+               QM_REG_MAXPQSIZE_0,
+               RSS_REG_RSS_INIT_EN,
+               RDIF_REG_STOP_ON_ERROR,
+               SRC_REG_SOFT_RST,
+               TCFC_REG_ACTIVITY_COUNTER,
+               TCM_REG_INIT,
+               TM_REG_PXP_READ_DATA_FIFO_INIT,
+               TSDM_REG_ENABLE_IN1,
+               TSEM_REG_ENABLE_IN,
+               TDIF_REG_STOP_ON_ERROR,
+               UCM_REG_INIT,
+               UMAC_REG_IPG_HD_BKP_CNTL_BB_B0,
+               USDM_REG_ENABLE_IN1,
+               USEM_REG_ENABLE_IN,
+               XCM_REG_INIT,
+               XSDM_REG_ENABLE_IN1,
+               XSEM_REG_ENABLE_IN,
+               YCM_REG_INIT,
+               YSDM_REG_ENABLE_IN1,
+               YSEM_REG_ENABLE_IN,
+               XYLD_REG_SCBD_STRICT_PRIO,
+               TMLD_REG_SCBD_STRICT_PRIO,
+               MULD_REG_SCBD_STRICT_PRIO,
+               YULD_REG_SCBD_STRICT_PRIO,
+       };
+       u32 test_val[] = { 0x0, 0x1 };
+       u32 val, save_val, i, j;
+
+       for (i = 0; i < OSAL_ARRAY_SIZE(test_val); i++) {
+               for (j = 0; j < OSAL_ARRAY_SIZE(reg_tbl); j++) {
+                       save_val = ecore_rd(p_hwfn, p_ptt, reg_tbl[j]);
+                       ecore_wr(p_hwfn, p_ptt, reg_tbl[j], test_val[i]);
+                       val = ecore_rd(p_hwfn, p_ptt, reg_tbl[j]);
+                       /* Restore the original register's value */
+                       ecore_wr(p_hwfn, p_ptt, reg_tbl[j], save_val);
+                       if (val != test_val[i]) {
+                               DP_INFO(p_hwfn->p_dev,
+                                       "offset 0x%x: val 0x%x != 0x%x\n",
+                                       reg_tbl[j], val, test_val[i]);
+                               return ECORE_AGAIN;
+                       }
+               }
+       }
+       return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_set_coalesce(struct ecore_hwfn *p_hwfn,
+                                              struct ecore_ptt *p_ptt,
+                                              u32 hw_addr, void *p_qzone,
+                                              osal_size_t qzone_size,
+                                              u8 timeset)
+{
+       struct coalescing_timeset *p_coalesce_timeset;
+
+       if (p_hwfn->p_dev->int_coalescing_mode != ECORE_COAL_MODE_ENABLE) {
+               DP_NOTICE(p_hwfn, true,
+                         "Coalescing configuration not enabled\n");
+               return ECORE_INVAL;
+       }
+
+       OSAL_MEMSET(p_qzone, 0, qzone_size);
+       p_coalesce_timeset = p_qzone;
+       p_coalesce_timeset->timeset = timeset;
+       p_coalesce_timeset->valid = 1;
+       ecore_memcpy_to(p_hwfn, p_ptt, hw_addr, p_qzone, qzone_size);
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
+                                           struct ecore_ptt *p_ptt,
+                                           u8 coalesce, u8 qid)
+{
+       struct ustorm_eth_queue_zone qzone;
+       u16 fw_qid = 0;
+       u32 address;
+       u8 timeset;
+       enum _ecore_status_t rc;
+
+       rc = ecore_fw_l2_queue(p_hwfn, (u16)qid, &fw_qid);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       address = BAR0_MAP_REG_USDM_RAM + USTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid);
+       /* Translate the coalescing time into a timeset, according to:
+        * Timeout[Rx] = TimeSet[Rx] << (TimerRes[Rx] + 1)
+        */
+       timeset = coalesce >> (ECORE_CAU_DEF_RX_TIMER_RES + 1);
+
+       rc = ecore_set_coalesce(p_hwfn, p_ptt, address, &qzone,
+                               sizeof(struct ustorm_eth_queue_zone), timeset);
+       if (rc != ECORE_SUCCESS)
+               goto out;
+
+       p_hwfn->p_dev->rx_coalesce_usecs = coalesce;
+out:
+       return rc;
+}
+
+enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
+                                           struct ecore_ptt *p_ptt,
+                                           u8 coalesce, u8 qid)
+{
+       struct ystorm_eth_queue_zone qzone;
+       u16 fw_qid = 0;
+       u32 address;
+       u8 timeset;
+       enum _ecore_status_t rc;
+
+       rc = ecore_fw_l2_queue(p_hwfn, (u16)qid, &fw_qid);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       address = BAR0_MAP_REG_YSDM_RAM + YSTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid);
+       /* Translate the coalescing time into a timeset, according to:
+        * Timeout[Tx] = TimeSet[Tx] << (TimerRes[Tx] + 1)
+        */
+       timeset = coalesce >> (ECORE_CAU_DEF_TX_TIMER_RES + 1);
+
+       rc = ecore_set_coalesce(p_hwfn, p_ptt, address, &qzone,
+                               sizeof(struct ystorm_eth_queue_zone), timeset);
+       if (rc != ECORE_SUCCESS)
+               goto out;
+
+       p_hwfn->p_dev->tx_coalesce_usecs = coalesce;
+out:
+       return rc;
+}
+
+/* Calculate final WFQ values for all vports and configure it.
+ * After this configuration each vport must have
+ * approx min rate =  vport_wfq * min_pf_rate / ECORE_WFQ_UNIT
+ */
+static void ecore_configure_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
+                                              struct ecore_ptt *p_ptt,
+                                              u32 min_pf_rate)
+{
+       struct init_qm_vport_params *vport_params;
+       int i, num_vports;
+
+       vport_params = p_hwfn->qm_info.qm_vport_params;
+       num_vports = p_hwfn->qm_info.num_vports;
+
+       for (i = 0; i < num_vports; i++) {
+               u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
+
+               vport_params[i].vport_wfq =
+                   (wfq_speed * ECORE_WFQ_UNIT) / min_pf_rate;
+               ecore_init_vport_wfq(p_hwfn, p_ptt,
+                                    vport_params[i].first_tx_pq_id,
+                                    vport_params[i].vport_wfq);
+       }
+}
+
+static void
+ecore_init_wfq_default_param(struct ecore_hwfn *p_hwfn, u32 min_pf_rate)
+{
+       int i, num_vports;
+       u32 min_speed;
+
+       num_vports = p_hwfn->qm_info.num_vports;
+       min_speed = min_pf_rate / num_vports;
+
+       for (i = 0; i < num_vports; i++) {
+               p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1;
+               p_hwfn->qm_info.wfq_data[i].default_min_speed = min_speed;
+       }
+}
+
+static void ecore_disable_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
+                                            struct ecore_ptt *p_ptt,
+                                            u32 min_pf_rate)
+{
+       struct init_qm_vport_params *vport_params;
+       int i, num_vports;
+
+       vport_params = p_hwfn->qm_info.qm_vport_params;
+       num_vports = p_hwfn->qm_info.num_vports;
+
+       for (i = 0; i < num_vports; i++) {
+               ecore_init_wfq_default_param(p_hwfn, min_pf_rate);
+               ecore_init_vport_wfq(p_hwfn, p_ptt,
+                                    vport_params[i].first_tx_pq_id,
+                                    vport_params[i].vport_wfq);
+       }
+}
+
+/* validate wfq for a given vport and required min rate */
+static enum _ecore_status_t ecore_init_wfq_param(struct ecore_hwfn *p_hwfn,
+                                                u16 vport_id, u32 req_rate,
+                                                u32 min_pf_rate)
+{
+       u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0;
+       int non_requested_count = 0, req_count = 0, i, num_vports;
+
+       num_vports = p_hwfn->qm_info.num_vports;
+
+       /* Check pre-set data for some of the vports */
+       for (i = 0; i < num_vports; i++) {
+               u32 tmp_speed;
+
+               if ((i != vport_id) && p_hwfn->qm_info.wfq_data[i].configured) {
+                       req_count++;
+                       tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
+                       total_req_min_rate += tmp_speed;
+               }
+       }
+
+       /* Include current vport data as well */
+       req_count++;
+       total_req_min_rate += req_rate;
+       non_requested_count = p_hwfn->qm_info.num_vports - req_count;
+
+       /* validate possible error cases */
+       if (req_rate > min_pf_rate) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+                          "Vport [%d] - Requested rate[%d Mbps] is greater"
+                          " than configured PF min rate[%d Mbps]\n",
+                          vport_id, req_rate, min_pf_rate);
+               return ECORE_INVAL;
+       }
+
+       if (req_rate * ECORE_WFQ_UNIT / min_pf_rate < 1) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+                          "Vport [%d] - Requested rate[%d Mbps] is less than"
+                          " one percent of configured PF min rate[%d Mbps]\n",
+                          vport_id, req_rate, min_pf_rate);
+               return ECORE_INVAL;
+       }
+
+       /* TBD - for number of vports greater than 100 */
+       if (ECORE_WFQ_UNIT / p_hwfn->qm_info.num_vports < 1) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+                          "Number of vports are greater than 100\n");
+               return ECORE_INVAL;
+       }
+
+       if (total_req_min_rate > min_pf_rate) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+                          "Total requested min rate for all vports[%d Mbps]"
+                          "is greater than configured PF min rate[%d Mbps]\n",
+                          total_req_min_rate, min_pf_rate);
+               return ECORE_INVAL;
+       }
+
+       /* Data left for non requested vports */
+       total_left_rate = min_pf_rate - total_req_min_rate;
+       left_rate_per_vp = total_left_rate / non_requested_count;
+
+       /* validate if non requested get < 1% of min bw */
+       if (left_rate_per_vp * ECORE_WFQ_UNIT / min_pf_rate < 1)
+               return ECORE_INVAL;
+
+       /* now req_rate for given vport passes all scenarios.
+        * assign final wfq rates to all vports.
+        */
+       p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate;
+       p_hwfn->qm_info.wfq_data[vport_id].configured = true;
+
+       for (i = 0; i < num_vports; i++) {
+               if (p_hwfn->qm_info.wfq_data[i].configured)
+                       continue;
+
+               p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp;
+       }
+
+       return ECORE_SUCCESS;
+}
+
+static int __ecore_configure_vport_wfq(struct ecore_hwfn *p_hwfn,
+                                      struct ecore_ptt *p_ptt,
+                                      u16 vp_id, u32 rate)
+{
+       struct ecore_mcp_link_state *p_link;
+       int rc = ECORE_SUCCESS;
+
+       p_link = &p_hwfn->p_dev->hwfns[0].mcp_info->link_output;
+
+       if (!p_link->min_pf_rate) {
+               p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate;
+               p_hwfn->qm_info.wfq_data[vp_id].configured = true;
+               return rc;
+       }
+
+       rc = ecore_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate);
+
+       if (rc == ECORE_SUCCESS)
+               ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt,
+                                                  p_link->min_pf_rate);
+       else
+               DP_NOTICE(p_hwfn, false,
+                         "Validation failed while configuring min rate\n");
+
+       return rc;
+}
+
+static int __ecore_configure_vp_wfq_on_link_change(struct ecore_hwfn *p_hwfn,
+                                                  struct ecore_ptt *p_ptt,
+                                                  u32 min_pf_rate)
+{
+       int rc = ECORE_SUCCESS;
+       bool use_wfq = false;
+       u16 i, num_vports;
+
+       num_vports = p_hwfn->qm_info.num_vports;
+
+       /* Validate all pre configured vports for wfq */
+       for (i = 0; i < num_vports; i++) {
+               if (p_hwfn->qm_info.wfq_data[i].configured) {
+                       u32 rate = p_hwfn->qm_info.wfq_data[i].min_speed;
+
+                       use_wfq = true;
+                       rc = ecore_init_wfq_param(p_hwfn, i, rate, min_pf_rate);
+                       if (rc == ECORE_INVAL) {
+                               DP_NOTICE(p_hwfn, false,
+                                         "Validation failed while"
+                                         " configuring min rate\n");
+                               break;
+                       }
+               }
+       }
+
+       if (rc == ECORE_SUCCESS && use_wfq)
+               ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
+       else
+               ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
+
+       return rc;
+}
+
+/* Main API for ecore clients to configure vport min rate.
+ * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)]
+ * rate - Speed in Mbps needs to be assigned to a given vport.
+ */
+int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate)
+{
+       int i, rc = ECORE_INVAL;
+
+       /* TBD - for multiple hardware functions - that is 100 gig */
+       if (p_dev->num_hwfns > 1) {
+               DP_NOTICE(p_dev, false,
+                         "WFQ configuration is not supported for this dev\n");
+               return rc;
+       }
+
+       for_each_hwfn(p_dev, i) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+               struct ecore_ptt *p_ptt;
+
+               p_ptt = ecore_ptt_acquire(p_hwfn);
+               if (!p_ptt)
+                       return ECORE_TIMEOUT;
+
+               rc = __ecore_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate);
+
+               if (rc != ECORE_SUCCESS) {
+                       ecore_ptt_release(p_hwfn, p_ptt);
+                       return rc;
+               }
+
+               ecore_ptt_release(p_hwfn, p_ptt);
+       }
+
+       return rc;
+}
+
+/* API to configure WFQ from mcp link change */
+void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev,
+                                          u32 min_pf_rate)
+{
+       int i;
+
+       /* TBD - for multiple hardware functions - that is 100 gig */
+       if (p_dev->num_hwfns > 1) {
+               DP_VERBOSE(p_dev, ECORE_MSG_LINK,
+                          "WFQ configuration is not supported for this dev\n");
+               return;
+       }
+
+       for_each_hwfn(p_dev, i) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+               __ecore_configure_vp_wfq_on_link_change(p_hwfn,
+                                                       p_hwfn->p_dpc_ptt,
+                                                       min_pf_rate);
+       }
+}
+
+int __ecore_configure_pf_max_bandwidth(struct ecore_hwfn *p_hwfn,
+                                      struct ecore_ptt *p_ptt,
+                                      struct ecore_mcp_link_state *p_link,
+                                      u8 max_bw)
+{
+       int rc = ECORE_SUCCESS;
+
+       p_hwfn->mcp_info->func_info.bandwidth_max = max_bw;
+
+       if (!p_link->line_speed)
+               return rc;
+
+       p_link->speed = (p_link->line_speed * max_bw) / 100;
+
+       rc = ecore_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id, p_link->speed);
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+                  "Configured MAX bandwidth to be %08x Mb/sec\n",
+                  p_link->speed);
+
+       return rc;
+}
+
+/* Main API to configure PF max bandwidth where bw range is [1 - 100] */
+int ecore_configure_pf_max_bandwidth(struct ecore_dev *p_dev, u8 max_bw)
+{
+       int i, rc = ECORE_INVAL;
+
+       if (max_bw < 1 || max_bw > 100) {
+               DP_NOTICE(p_dev, false, "PF max bw valid range is [1-100]\n");
+               return rc;
+       }
+
+       for_each_hwfn(p_dev, i) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+               struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_dev);
+               struct ecore_mcp_link_state *p_link;
+               struct ecore_ptt *p_ptt;
+
+               p_link = &p_lead->mcp_info->link_output;
+
+               p_ptt = ecore_ptt_acquire(p_hwfn);
+               if (!p_ptt)
+                       return ECORE_TIMEOUT;
+
+               rc = __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
+                                                       p_link, max_bw);
+               if (rc != ECORE_SUCCESS) {
+                       ecore_ptt_release(p_hwfn, p_ptt);
+                       return rc;
+               }
+
+               ecore_ptt_release(p_hwfn, p_ptt);
+       }
+
+       return rc;
+}
+
+int __ecore_configure_pf_min_bandwidth(struct ecore_hwfn *p_hwfn,
+                                      struct ecore_ptt *p_ptt,
+                                      struct ecore_mcp_link_state *p_link,
+                                      u8 min_bw)
+{
+       int rc = ECORE_SUCCESS;
+
+       p_hwfn->mcp_info->func_info.bandwidth_min = min_bw;
+
+       if (!p_link->line_speed)
+               return rc;
+
+       p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100;
+
+       rc = ecore_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw);
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+                  "Configured MIN bandwidth to be %d Mb/sec\n",
+                  p_link->min_pf_rate);
+
+       return rc;
+}
+
+/* Main API to configure PF min bandwidth where bw range is [1-100] */
+int ecore_configure_pf_min_bandwidth(struct ecore_dev *p_dev, u8 min_bw)
+{
+       int i, rc = ECORE_INVAL;
+
+       if (min_bw < 1 || min_bw > 100) {
+               DP_NOTICE(p_dev, false, "PF min bw valid range is [1-100]\n");
+               return rc;
+       }
+
+       for_each_hwfn(p_dev, i) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+               struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_dev);
+               struct ecore_mcp_link_state *p_link;
+               struct ecore_ptt *p_ptt;
+
+               p_link = &p_lead->mcp_info->link_output;
+
+               p_ptt = ecore_ptt_acquire(p_hwfn);
+               if (!p_ptt)
+                       return ECORE_TIMEOUT;
+
+               rc = __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
+                                                       p_link, min_bw);
+               if (rc != ECORE_SUCCESS) {
+                       ecore_ptt_release(p_hwfn, p_ptt);
+                       return rc;
+               }
+
+               if (p_link->min_pf_rate) {
+                       u32 min_rate = p_link->min_pf_rate;
+
+                       rc = __ecore_configure_vp_wfq_on_link_change(p_hwfn,
+                                                                    p_ptt,
+                                                                    min_rate);
+               }
+
+               ecore_ptt_release(p_hwfn, p_ptt);
+       }
+
+       return rc;
+}
+
+void ecore_clean_wfq_db(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+       struct ecore_mcp_link_state *p_link;
+
+       p_link = &p_hwfn->mcp_info->link_output;
+
+       if (p_link->min_pf_rate)
+               ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt,
+                                                p_link->min_pf_rate);
+
+       OSAL_MEMSET(p_hwfn->qm_info.wfq_data, 0,
+                   sizeof(*p_hwfn->qm_info.wfq_data) *
+                   p_hwfn->qm_info.num_vports);
+}
+
+int ecore_device_num_engines(struct ecore_dev *p_dev)
+{
+       return ECORE_IS_BB(p_dev) ? 2 : 1;
+}
+
+int ecore_device_num_ports(struct ecore_dev *p_dev)
+{
+       /* in CMT always only one port */
+       if (p_dev->num_hwfns > 1)
+               return 1;
+
+       return p_dev->num_ports_in_engines * ecore_device_num_engines(p_dev);
+}
diff --git a/drivers/net/qede/base/ecore_dev_api.h b/drivers/net/qede/base/ecore_dev_api.h
new file mode 100644 (file)
index 0000000..535b82b
--- /dev/null
@@ -0,0 +1,497 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_DEV_API_H__
+#define __ECORE_DEV_API_H__
+
+#include "ecore_status.h"
+#include "ecore_chain.h"
+#include "ecore_int_api.h"
+
+struct ecore_tunn_start_params;
+
+/**
+ * @brief ecore_init_dp - initialize the debug level
+ *
+ * @param p_dev
+ * @param dp_module
+ * @param dp_level
+ * @param dp_ctx
+ */
+void ecore_init_dp(struct ecore_dev *p_dev,
+                  u32 dp_module, u8 dp_level, void *dp_ctx);
+
+/**
+ * @brief ecore_init_struct - initialize the device structure to
+ *        its defaults
+ *
+ * @param p_dev
+ */
+void ecore_init_struct(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_resc_free -
+ *
+ * @param p_dev
+ */
+void ecore_resc_free(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_resc_alloc -
+ *
+ * @param p_dev
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_resc_setup -
+ *
+ * @param p_dev
+ */
+void ecore_resc_setup(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_hw_init -
+ *
+ * @param p_dev
+ * @param p_tunn - tunneling parameters
+ * @param b_hw_start
+ * @param int_mode - interrupt mode [msix, inta, etc.] to use.
+ * @param allow_npar_tx_switch - npar tx switching to be used
+ *       for vports configured for tx-switching.
+ * @param bin_fw_data - binary fw data pointer in binary fw file.
+ *                     Pass NULL if not using binary fw file.
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
+                                  struct ecore_tunn_start_params *p_tunn,
+                                  bool b_hw_start,
+                                  enum ecore_int_mode int_mode,
+                                  bool allow_npar_tx_switch,
+                                  const u8 *bin_fw_data);
+
+/**
+ * @brief ecore_hw_timers_stop_all -
+ *
+ * @param p_dev
+ *
+ * @return void
+ */
+void ecore_hw_timers_stop_all(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_hw_stop -
+ *
+ * @param p_dev
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_hw_stop_fastpath -should be called incase
+ *        slowpath is still required for the device, but
+ *        fastpath is not.
+ *
+ * @param p_dev
+ *
+ */
+void ecore_hw_stop_fastpath(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_prepare_hibernate -should be called when
+ *        the system is going into the hibernate state
+ *
+ * @param p_dev
+ *
+ */
+void ecore_prepare_hibernate(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_hw_start_fastpath -restart fastpath traffic,
+ *        only if hw_stop_fastpath was called
+
+ * @param p_dev
+ *
+ */
+void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_hw_reset -
+ *
+ * @param p_dev
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_hw_reset(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_hw_prepare -
+ *
+ * @param p_dev
+ * @param personality - personality to initialize
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev, int personality);
+
+/**
+ * @brief ecore_hw_remove -
+ *
+ * @param p_dev
+ */
+void ecore_hw_remove(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_ptt_acquire - Allocate a PTT window
+ *
+ * Should be called at the entry point to the driver (at the beginning of an
+ * exported function)
+ *
+ * @param p_hwfn
+ *
+ * @return struct ecore_ptt
+ */
+struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_ptt_release - Release PTT Window
+ *
+ * Should be called at the end of a flow - at the end of the function that
+ * acquired the PTT.
+ *
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void ecore_ptt_release(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+
+#ifndef __EXTRACT__LINUX__
+struct ecore_eth_stats {
+       u64 no_buff_discards;
+       u64 packet_too_big_discard;
+       u64 ttl0_discard;
+       u64 rx_ucast_bytes;
+       u64 rx_mcast_bytes;
+       u64 rx_bcast_bytes;
+       u64 rx_ucast_pkts;
+       u64 rx_mcast_pkts;
+       u64 rx_bcast_pkts;
+       u64 mftag_filter_discards;
+       u64 mac_filter_discards;
+       u64 tx_ucast_bytes;
+       u64 tx_mcast_bytes;
+       u64 tx_bcast_bytes;
+       u64 tx_ucast_pkts;
+       u64 tx_mcast_pkts;
+       u64 tx_bcast_pkts;
+       u64 tx_err_drop_pkts;
+       u64 tpa_coalesced_pkts;
+       u64 tpa_coalesced_events;
+       u64 tpa_aborts_num;
+       u64 tpa_not_coalesced_pkts;
+       u64 tpa_coalesced_bytes;
+
+       /* port */
+       u64 rx_64_byte_packets;
+       u64 rx_65_to_127_byte_packets;
+       u64 rx_128_to_255_byte_packets;
+       u64 rx_256_to_511_byte_packets;
+       u64 rx_512_to_1023_byte_packets;
+       u64 rx_1024_to_1518_byte_packets;
+       u64 rx_1519_to_1522_byte_packets;
+       u64 rx_1519_to_2047_byte_packets;
+       u64 rx_2048_to_4095_byte_packets;
+       u64 rx_4096_to_9216_byte_packets;
+       u64 rx_9217_to_16383_byte_packets;
+       u64 rx_crc_errors;
+       u64 rx_mac_crtl_frames;
+       u64 rx_pause_frames;
+       u64 rx_pfc_frames;
+       u64 rx_align_errors;
+       u64 rx_carrier_errors;
+       u64 rx_oversize_packets;
+       u64 rx_jabbers;
+       u64 rx_undersize_packets;
+       u64 rx_fragments;
+       u64 tx_64_byte_packets;
+       u64 tx_65_to_127_byte_packets;
+       u64 tx_128_to_255_byte_packets;
+       u64 tx_256_to_511_byte_packets;
+       u64 tx_512_to_1023_byte_packets;
+       u64 tx_1024_to_1518_byte_packets;
+       u64 tx_1519_to_2047_byte_packets;
+       u64 tx_2048_to_4095_byte_packets;
+       u64 tx_4096_to_9216_byte_packets;
+       u64 tx_9217_to_16383_byte_packets;
+       u64 tx_pause_frames;
+       u64 tx_pfc_frames;
+       u64 tx_lpi_entry_count;
+       u64 tx_total_collisions;
+       u64 brb_truncates;
+       u64 brb_discards;
+       u64 rx_mac_bytes;
+       u64 rx_mac_uc_packets;
+       u64 rx_mac_mc_packets;
+       u64 rx_mac_bc_packets;
+       u64 rx_mac_frames_ok;
+       u64 tx_mac_bytes;
+       u64 tx_mac_uc_packets;
+       u64 tx_mac_mc_packets;
+       u64 tx_mac_bc_packets;
+       u64 tx_mac_ctrl_frames;
+};
+#endif
+
+enum ecore_dmae_address_type_t {
+       ECORE_DMAE_ADDRESS_HOST_VIRT,
+       ECORE_DMAE_ADDRESS_HOST_PHYS,
+       ECORE_DMAE_ADDRESS_GRC
+};
+
+/* value of flags If ECORE_DMAE_FLAG_RW_REPL_SRC flag is set and the
+ * source is a block of length DMAE_MAX_RW_SIZE and the
+ * destination is larger, the source block will be duplicated as
+ * many times as required to fill the destination block. This is
+ * used mostly to write a zeroed buffer to destination address
+ * using DMA
+ */
+#define ECORE_DMAE_FLAG_RW_REPL_SRC    0x00000001
+#define ECORE_DMAE_FLAG_VF_SRC         0x00000002
+#define ECORE_DMAE_FLAG_VF_DST         0x00000004
+#define ECORE_DMAE_FLAG_COMPLETION_DST 0x00000008
+
+struct ecore_dmae_params {
+       u32 flags;              /* consists of ECORE_DMAE_FLAG_* values */
+       u8 src_vfid;
+       u8 dst_vfid;
+};
+
+/**
+* @brief ecore_dmae_host2grc - copy data from source addr to
+* dmae registers using the given ptt
+*
+* @param p_hwfn
+* @param p_ptt
+* @param source_addr
+* @param grc_addr (dmae_data_offset)
+* @param size_in_dwords
+* @param flags (one of the flags defined above)
+*/
+enum _ecore_status_t
+ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
+                   struct ecore_ptt *p_ptt,
+                   u64 source_addr,
+                   u32 grc_addr, u32 size_in_dwords, u32 flags);
+
+/**
+* @brief ecore_dmae_grc2host - Read data from dmae data offset
+* to source address using the given ptt
+*
+* @param p_ptt
+* @param grc_addr (dmae_data_offset)
+* @param dest_addr
+* @param size_in_dwords
+* @param flags - one of the flags defined above
+*/
+enum _ecore_status_t
+ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
+                   struct ecore_ptt *p_ptt,
+                   u32 grc_addr,
+                   dma_addr_t dest_addr, u32 size_in_dwords, u32 flags);
+
+/**
+* @brief ecore_dmae_host2host - copy data from to source address
+* to a destination address (for SRIOV) using the given ptt
+*
+* @param p_hwfn
+* @param p_ptt
+* @param source_addr
+* @param dest_addr
+* @param size_in_dwords
+* @param params
+*/
+enum _ecore_status_t
+ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
+                    struct ecore_ptt *p_ptt,
+                    dma_addr_t source_addr,
+                    dma_addr_t dest_addr,
+                    u32 size_in_dwords, struct ecore_dmae_params *p_params);
+
+/**
+ * @brief ecore_chain_alloc - Allocate and initialize a chain
+ *
+ * @param p_hwfn
+ * @param intended_use
+ * @param mode
+ * @param num_elems
+ * @param elem_size
+ * @param p_chain
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_chain_alloc(struct ecore_dev *p_dev,
+                 enum ecore_chain_use_mode intended_use,
+                 enum ecore_chain_mode mode,
+                 enum ecore_chain_cnt_type cnt_type,
+                 u32 num_elems,
+                 osal_size_t elem_size, struct ecore_chain *p_chain);
+
+/**
+ * @brief ecore_chain_free - Free chain DMA memory
+ *
+ * @param p_hwfn
+ * @param p_chain
+ */
+void ecore_chain_free(struct ecore_dev *p_dev, struct ecore_chain *p_chain);
+
+/**
+ * @@brief ecore_fw_l2_queue - Get absolute L2 queue ID
+ *
+ *  @param p_hwfn
+ *  @param src_id - relative to p_hwfn
+ *  @param dst_id - absolute per engine
+ *
+ *  @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn,
+                                      u16 src_id, u16 *dst_id);
+
+/**
+ * @@brief ecore_fw_vport - Get absolute vport ID
+ *
+ *  @param p_hwfn
+ *  @param src_id - relative to p_hwfn
+ *  @param dst_id - absolute per engine
+ *
+ *  @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn,
+                                   u8 src_id, u8 *dst_id);
+
+/**
+ * @@brief ecore_fw_rss_eng - Get absolute RSS engine ID
+ *
+ *  @param p_hwfn
+ *  @param src_id - relative to p_hwfn
+ *  @param dst_id - absolute per engine
+ *
+ *  @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn,
+                                     u8 src_id, u8 *dst_id);
+
+/**
+ * @brief ecore_llh_add_mac_filter - configures a MAC filter in llh
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_filter - MAC to add
+ */
+enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn,
+                                             struct ecore_ptt *p_ptt,
+                                             u8 *p_filter);
+
+/**
+ * @brief ecore_llh_remove_mac_filter - removes a MAC filtre from llh
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_filter - MAC to remove
+ */
+void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn,
+                                struct ecore_ptt *p_ptt, u8 *p_filter);
+
+/**
+ * @brief ecore_llh_add_ethertype_filter - configures a ethertype filter in llh
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param filter - ethertype to add
+ */
+enum _ecore_status_t ecore_llh_add_ethertype_filter(struct ecore_hwfn *p_hwfn,
+                                                   struct ecore_ptt *p_ptt,
+                                                   u16 filter);
+
+/**
+ * @brief ecore_llh_remove_ethertype_filter - removes a ethertype llh filter
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param filter - ethertype to remove
+ */
+void ecore_llh_remove_ethertype_filter(struct ecore_hwfn *p_hwfn,
+                                      struct ecore_ptt *p_ptt, u16 filter);
+
+/**
+ * @brief ecore_llh_clear_all_filters - removes all MAC filters from llh
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn,
+                                struct ecore_ptt *p_ptt);
+
+ /**
+*@brief Cleanup of previous driver remains prior to load
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param id - For PF, engine-relative. For VF, PF-relative.
+ * @param is_vf - true iff cleanup is made for a VF.
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn,
+                                        struct ecore_ptt *p_ptt,
+                                        u16 id, bool is_vf);
+
+/**
+ * @brief ecore_test_registers - Perform register tests
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ *  @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_test_registers(struct ecore_hwfn *p_hwfn,
+                                         struct ecore_ptt *p_ptt);
+
+/**
+ * @brief ecore_set_rxq_coalesce - Configure coalesce parameters for an Rx queue
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param coalesce - Coalesce value in micro seconds.
+ * @param qid - Queue index.
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
+                                           struct ecore_ptt *p_ptt,
+                                           u8 coalesce, u8 qid);
+
+/**
+ * @brief ecore_set_txq_coalesce - Configure coalesce parameters for a Tx queue
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param coalesce - Coalesce value in micro seconds.
+ * @param qid - Queue index.
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
+                                           struct ecore_ptt *p_ptt,
+                                           u8 coalesce, u8 qid);
+
+#endif
diff --git a/drivers/net/qede/base/ecore_gtt_reg_addr.h b/drivers/net/qede/base/ecore_gtt_reg_addr.h
new file mode 100644 (file)
index 0000000..cc49fc7
--- /dev/null
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef GTT_REG_ADDR_H
+#define GTT_REG_ADDR_H
+
+/* Win 2 */
+#define GTT_BAR0_MAP_REG_IGU_CMD 0x00f000UL
+
+/* Win 3 */
+#define GTT_BAR0_MAP_REG_TSDM_RAM 0x010000UL
+
+/* Win 4 */
+#define GTT_BAR0_MAP_REG_MSDM_RAM 0x011000UL
+
+/* Win 5 */
+#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 0x012000UL
+
+/* Win 6 */
+#define GTT_BAR0_MAP_REG_USDM_RAM 0x013000UL
+
+/* Win 7 */
+#define GTT_BAR0_MAP_REG_USDM_RAM_1024 0x014000UL
+
+/* Win 8 */
+#define GTT_BAR0_MAP_REG_USDM_RAM_2048 0x015000UL
+
+/* Win 9 */
+#define GTT_BAR0_MAP_REG_XSDM_RAM  0x016000UL
+
+/* Win 10 */
+#define GTT_BAR0_MAP_REG_YSDM_RAM 0x017000UL
+
+/* Win 11 */
+#define GTT_BAR0_MAP_REG_PSDM_RAM 0x018000UL
+
+#endif
diff --git a/drivers/net/qede/base/ecore_gtt_values.h b/drivers/net/qede/base/ecore_gtt_values.h
new file mode 100644 (file)
index 0000000..f2efe24
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __PREVENT_PXP_GLOBAL_WIN__
+
+static u32 pxp_global_win[] = {
+       0,
+       0,
+       0x1c02,                 /* win 2: addr=0x1c02000, size=4096 bytes */
+       0x1c80,                 /* win 3: addr=0x1c80000, size=4096 bytes */
+       0x1d00,                 /* win 4: addr=0x1d00000, size=4096 bytes */
+       0x1d01,                 /* win 5: addr=0x1d01000, size=4096 bytes */
+       0x1d80,                 /* win 6: addr=0x1d80000, size=4096 bytes */
+       0x1d81,                 /* win 7: addr=0x1d81000, size=4096 bytes */
+       0x1d82,                 /* win 8: addr=0x1d82000, size=4096 bytes */
+       0x1e00,                 /* win 9: addr=0x1e00000, size=4096 bytes */
+       0x1e80,                 /* win 10: addr=0x1e80000, size=4096 bytes */
+       0x1f00,                 /* win 11: addr=0x1f00000, size=4096 bytes */
+       0,
+       0,
+       0,
+       0,
+       0,
+       0,
+       0,
+};
+
+#endif /* __PREVENT_PXP_GLOBAL_WIN__ */
diff --git a/drivers/net/qede/base/ecore_hsi_common.h b/drivers/net/qede/base/ecore_hsi_common.h
new file mode 100644 (file)
index 0000000..e341b95
--- /dev/null
@@ -0,0 +1,1912 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_HSI_COMMON__
+#define __ECORE_HSI_COMMON__
+/********************************/
+/* Add include to common target */
+/********************************/
+#include "common_hsi.h"
+
+/*
+ * opcodes for the event ring
+ */
+enum common_event_opcode {
+       COMMON_EVENT_PF_START,
+       COMMON_EVENT_PF_STOP,
+       COMMON_EVENT_VF_START,
+       COMMON_EVENT_VF_STOP,
+       COMMON_EVENT_VF_PF_CHANNEL,
+       COMMON_EVENT_VF_FLR,
+       COMMON_EVENT_PF_UPDATE,
+       COMMON_EVENT_MALICIOUS_VF,
+       COMMON_EVENT_EMPTY,
+       MAX_COMMON_EVENT_OPCODE
+};
+
+/*
+ * Common Ramrod Command IDs
+ */
+enum common_ramrod_cmd_id {
+       COMMON_RAMROD_UNUSED,
+       COMMON_RAMROD_PF_START /* PF Function Start Ramrod */,
+       COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */,
+       COMMON_RAMROD_VF_START /* VF Function Start */,
+       COMMON_RAMROD_VF_STOP /* VF Function Stop Ramrod */,
+       COMMON_RAMROD_PF_UPDATE /* PF update Ramrod */,
+       COMMON_RAMROD_EMPTY /* Empty Ramrod */,
+       MAX_COMMON_RAMROD_CMD_ID
+};
+
+/*
+ * The core storm context for the Ystorm
+ */
+struct ystorm_core_conn_st_ctx {
+       __le32 reserved[4];
+};
+
+/*
+ * The core storm context for the Pstorm
+ */
+struct pstorm_core_conn_st_ctx {
+       __le32 reserved[4];
+};
+
+/*
+ * Core Slowpath Connection storm context of Xstorm
+ */
+struct xstorm_core_conn_st_ctx {
+       __le32 spq_base_lo /* SPQ Ring Base Address low dword */;
+       __le32 spq_base_hi /* SPQ Ring Base Address high dword */;
+       struct regpair consolid_base_addr /* Consolidation Ring Base Address */
+         ;
+       __le16 spq_cons /* SPQ Ring Consumer */;
+       __le16 consolid_cons /* Consolidation Ring Consumer */;
+       __le32 reserved0[55] /* Pad to 15 cycles */;
+};
+
+struct xstorm_core_conn_ag_ctx {
+       u8 reserved0 /* cdu_validation */;
+       u8 core_state /* state */;
+       u8 flags0;
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK         0x1
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT        0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK            0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT           1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK            0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT           2
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK         0x1
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT        3
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK            0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT           4
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK            0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT           5
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK            0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT           6
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK            0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT           7
+       u8 flags1;
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK            0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT           0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK            0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT           1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK            0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT           2
+#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK                0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT               3
+#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK                0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT               4
+#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK                0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT               5
+#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK       0x1
+#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT      6
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK         0x1
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT        7
+       u8 flags2;
+#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK                  0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT                 0
+#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK                  0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT                 2
+#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK                  0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT                 4
+#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK                  0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT                 6
+       u8 flags3;
+#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK                  0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT                 0
+#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK                  0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT                 2
+#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK                  0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT                 4
+#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK                  0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT                 6
+       u8 flags4;
+#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK                  0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT                 0
+#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK                  0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT                 2
+#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK                 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT                4
+#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK                 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT                6
+       u8 flags5;
+#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK                 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT                0
+#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK                 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT                2
+#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK                 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT                4
+#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK                 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT                6
+       u8 flags6;
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK     0x3
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT    0
+#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK                 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT                2
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK                0x3
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT               4
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK         0x3
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT        6
+       u8 flags7;
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK             0x3
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT            0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK           0x3
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT          2
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK            0x3
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT           4
+#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK                0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT               6
+#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK                0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT               7
+       u8 flags8;
+#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK                0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT               0
+#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK                0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT               1
+#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK                0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT               2
+#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK                0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT               3
+#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK                0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT               4
+#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK                0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT               5
+#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK                0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT               6
+#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK                0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT               7
+       u8 flags9;
+#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK               0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT              0
+#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK               0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT              1
+#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK               0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT              2
+#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK               0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT              3
+#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK               0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT              4
+#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK               0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT              5
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK  0x1
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK               0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT              7
+       u8 flags10;
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK             0x1
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT            0
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK      0x1
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT     1
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK          0x1
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT         2
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK           0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT          3
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK         0x1
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT        4
+#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK               0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT              5
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK           0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT          6
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK           0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT          7
+       u8 flags11;
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK           0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT          0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK           0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT          1
+#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK       0x1
+#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT      2
+#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK              0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT             3
+#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK              0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT             4
+#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK              0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT             5
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK         0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT        6
+#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK              0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT             7
+       u8 flags12;
+#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK             0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT            0
+#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK             0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT            1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK         0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT        2
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK         0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT        3
+#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK             0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT            4
+#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK             0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT            5
+#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK             0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT            6
+#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK             0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT            7
+       u8 flags13;
+#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK             0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT            0
+#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK             0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT            1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK         0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT        2
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK         0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT        3
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK         0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT        4
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK         0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT        5
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK         0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT        6
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK         0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT        7
+       u8 flags14;
+#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK                0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT               0
+#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK                0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT               1
+#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK                0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT               2
+#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK                0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT               3
+#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK                0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT               4
+#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK                0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT               5
+#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK                 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT                6
+       u8 byte2 /* byte2 */;
+       __le16 physical_q0 /* physical_q0 */;
+       __le16 consolid_prod /* physical_q1 */;
+       __le16 reserved16 /* physical_q2 */;
+       __le16 tx_bd_cons /* word3 */;
+       __le16 tx_bd_or_spq_prod /* word4 */;
+       __le16 word5 /* word5 */;
+       __le16 conn_dpi /* conn_dpi */;
+       u8 byte3 /* byte3 */;
+       u8 byte4 /* byte4 */;
+       u8 byte5 /* byte5 */;
+       u8 byte6 /* byte6 */;
+       __le32 reg0 /* reg0 */;
+       __le32 reg1 /* reg1 */;
+       __le32 reg2 /* reg2 */;
+       __le32 reg3 /* reg3 */;
+       __le32 reg4 /* reg4 */;
+       __le32 reg5 /* cf_array0 */;
+       __le32 reg6 /* cf_array1 */;
+       __le16 word7 /* word7 */;
+       __le16 word8 /* word8 */;
+       __le16 word9 /* word9 */;
+       __le16 word10 /* word10 */;
+       __le32 reg7 /* reg7 */;
+       __le32 reg8 /* reg8 */;
+       __le32 reg9 /* reg9 */;
+       u8 byte7 /* byte7 */;
+       u8 byte8 /* byte8 */;
+       u8 byte9 /* byte9 */;
+       u8 byte10 /* byte10 */;
+       u8 byte11 /* byte11 */;
+       u8 byte12 /* byte12 */;
+       u8 byte13 /* byte13 */;
+       u8 byte14 /* byte14 */;
+       u8 byte15 /* byte15 */;
+       u8 byte16 /* byte16 */;
+       __le16 word11 /* word11 */;
+       __le32 reg10 /* reg10 */;
+       __le32 reg11 /* reg11 */;
+       __le32 reg12 /* reg12 */;
+       __le32 reg13 /* reg13 */;
+       __le32 reg14 /* reg14 */;
+       __le32 reg15 /* reg15 */;
+       __le32 reg16 /* reg16 */;
+       __le32 reg17 /* reg17 */;
+       __le32 reg18 /* reg18 */;
+       __le32 reg19 /* reg19 */;
+       __le16 word12 /* word12 */;
+       __le16 word13 /* word13 */;
+       __le16 word14 /* word14 */;
+       __le16 word15 /* word15 */;
+};
+
+struct tstorm_core_conn_ag_ctx {
+       u8 byte0 /* cdu_validation */;
+       u8 byte1 /* state */;
+       u8 flags0;
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK     0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT    2
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK     0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT    3
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK     0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT    4
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK     0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT    5
+#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT     6
+       u8 flags1;
+#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT     0
+#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT     2
+#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK      0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT     4
+#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK      0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT     6
+       u8 flags2;
+#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK      0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT     0
+#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK      0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT     2
+#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK      0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT     4
+#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK      0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT     6
+       u8 flags3;
+#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK      0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT     0
+#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK     0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT    2
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   4
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   5
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   6
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK    0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT   7
+       u8 flags4;
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK    0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT   0
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK    0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT   1
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK    0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT   2
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK    0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT   3
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK    0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT   4
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK    0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT   5
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK   0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT  6
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+       u8 flags5;
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK  0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK  0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK  0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK  0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+       __le32 reg0 /* reg0 */;
+       __le32 reg1 /* reg1 */;
+       __le32 reg2 /* reg2 */;
+       __le32 reg3 /* reg3 */;
+       __le32 reg4 /* reg4 */;
+       __le32 reg5 /* reg5 */;
+       __le32 reg6 /* reg6 */;
+       __le32 reg7 /* reg7 */;
+       __le32 reg8 /* reg8 */;
+       u8 byte2 /* byte2 */;
+       u8 byte3 /* byte3 */;
+       __le16 word0 /* word0 */;
+       u8 byte4 /* byte4 */;
+       u8 byte5 /* byte5 */;
+       __le16 word1 /* word1 */;
+       __le16 word2 /* conn_dpi */;
+       __le16 word3 /* word3 */;
+       __le32 reg9 /* reg9 */;
+       __le32 reg10 /* reg10 */;
+};
+
+struct ustorm_core_conn_ag_ctx {
+       u8 reserved /* cdu_validation */;
+       u8 byte1 /* state */;
+       u8 flags0;
+#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1
+#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
+#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1
+#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
+#define USTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3
+#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT     2
+#define USTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3
+#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT     4
+#define USTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3
+#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT     6
+       u8 flags1;
+#define USTORM_CORE_CONN_AG_CTX_CF3_MASK      0x3
+#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT     0
+#define USTORM_CORE_CONN_AG_CTX_CF4_MASK      0x3
+#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT     2
+#define USTORM_CORE_CONN_AG_CTX_CF5_MASK      0x3
+#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT     4
+#define USTORM_CORE_CONN_AG_CTX_CF6_MASK      0x3
+#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT     6
+       u8 flags2;
+#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1
+#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   0
+#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1
+#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   1
+#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1
+#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   2
+#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK    0x1
+#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT   3
+#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK    0x1
+#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT   4
+#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK    0x1
+#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT   5
+#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK    0x1
+#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT   6
+#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+       u8 flags3;
+#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK  0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK  0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK  0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK  0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+       u8 byte2 /* byte2 */;
+       u8 byte3 /* byte3 */;
+       __le16 word0 /* conn_dpi */;
+       __le16 word1 /* word1 */;
+       __le32 rx_producers /* reg0 */;
+       __le32 reg1 /* reg1 */;
+       __le32 reg2 /* reg2 */;
+       __le32 reg3 /* reg3 */;
+       __le16 word2 /* word2 */;
+       __le16 word3 /* word3 */;
+};
+
+/*
+ * The core storm context for the Mstorm
+ */
+struct mstorm_core_conn_st_ctx {
+       __le32 reserved[24];
+};
+
+/*
+ * The core storm context for the Ustorm
+ */
+struct ustorm_core_conn_st_ctx {
+       __le32 reserved[4];
+};
+
+/*
+ * core connection context
+ */
+struct core_conn_context {
+       struct ystorm_core_conn_st_ctx ystorm_st_context
+           /* ystorm storm context */;
+       struct regpair ystorm_st_padding[2] /* padding */;
+       struct pstorm_core_conn_st_ctx pstorm_st_context
+           /* pstorm storm context */;
+       struct regpair pstorm_st_padding[2] /* padding */;
+       struct xstorm_core_conn_st_ctx xstorm_st_context
+           /* xstorm storm context */;
+       struct xstorm_core_conn_ag_ctx xstorm_ag_context
+           /* xstorm aggregative context */;
+       struct tstorm_core_conn_ag_ctx tstorm_ag_context
+           /* tstorm aggregative context */;
+       struct ustorm_core_conn_ag_ctx ustorm_ag_context
+           /* ustorm aggregative context */;
+       struct mstorm_core_conn_st_ctx mstorm_st_context
+           /* mstorm storm context */;
+       struct ustorm_core_conn_st_ctx ustorm_st_context
+           /* ustorm storm context */;
+       struct regpair ustorm_st_padding[2] /* padding */;
+};
+
+/*
+ * How ll2 should deal with packet upon errors
+ */
+enum core_error_handle {
+       LL2_DROP_PACKET /* If error occurs drop packet */,
+       LL2_DO_NOTHING /* If error occurs do nothing */,
+       LL2_ASSERT /* If error occurs assert */,
+       MAX_CORE_ERROR_HANDLE
+};
+
+/*
+ * opcodes for the event ring
+ */
+enum core_event_opcode {
+       CORE_EVENT_TX_QUEUE_START,
+       CORE_EVENT_TX_QUEUE_STOP,
+       CORE_EVENT_RX_QUEUE_START,
+       CORE_EVENT_RX_QUEUE_STOP,
+       MAX_CORE_EVENT_OPCODE
+};
+
+/*
+ * The L4 pseudo checksum mode for Core
+ */
+enum core_l4_pseudo_checksum_mode {
+       CORE_L4_PSEUDO_CSUM_CORRECT_LENGTH
+           ,
+       CORE_L4_PSEUDO_CSUM_ZERO_LENGTH
+           /* Pseudo Checksum on packet is calculated with zero length. */,
+       MAX_CORE_L4_PSEUDO_CHECKSUM_MODE
+};
+
+/*
+ * Light-L2 RX Producers in Tstorm RAM
+ */
+struct core_ll2_port_stats {
+       struct regpair gsi_invalid_hdr;
+       struct regpair gsi_invalid_pkt_length;
+       struct regpair gsi_unsupported_pkt_typ;
+       struct regpair gsi_crcchksm_error;
+};
+
+/*
+ * Ethernet TX Per Queue Stats
+ */
+struct core_ll2_pstorm_per_queue_stat {
+       struct regpair sent_ucast_bytes
+           /* number of total bytes sent without errors */;
+       struct regpair sent_mcast_bytes
+           /* number of total bytes sent without errors */;
+       struct regpair sent_bcast_bytes
+           /* number of total bytes sent without errors */;
+       struct regpair sent_ucast_pkts
+           /* number of total packets sent without errors */;
+       struct regpair sent_mcast_pkts
+           /* number of total packets sent without errors */;
+       struct regpair sent_bcast_pkts
+           /* number of total packets sent without errors */;
+};
+
+/*
+ * Light-L2 RX Producers in Tstorm RAM
+ */
+struct core_ll2_rx_prod {
+       __le16 bd_prod /* BD Producer */;
+       __le16 cqe_prod /* CQE Producer */;
+       __le32 reserved;
+};
+
+struct core_ll2_tstorm_per_queue_stat {
+       struct regpair packet_too_big_discard
+           /* Number of packets discarded because they are bigger than MTU */;
+       struct regpair no_buff_discard
+           /* Number of packets discarded due to lack of host buffers */;
+};
+
+struct core_ll2_ustorm_per_queue_stat {
+       struct regpair rcv_ucast_bytes;
+       struct regpair rcv_mcast_bytes;
+       struct regpair rcv_bcast_bytes;
+       struct regpair rcv_ucast_pkts;
+       struct regpair rcv_mcast_pkts;
+       struct regpair rcv_bcast_pkts;
+};
+
+/*
+ * Core Ramrod Command IDs (light L2)
+ */
+enum core_ramrod_cmd_id {
+       CORE_RAMROD_UNUSED,
+       CORE_RAMROD_RX_QUEUE_START /* RX Queue Start Ramrod */,
+       CORE_RAMROD_TX_QUEUE_START /* TX Queue Start Ramrod */,
+       CORE_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */,
+       CORE_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */,
+       MAX_CORE_RAMROD_CMD_ID
+};
+
+/*
+ * Specifies how ll2 should deal with packets errors: packet_too_big and no_buff
+ */
+struct core_rx_action_on_error {
+       u8 error_type;
+#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_MASK  0x3
+#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT 0
+#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK         0x3
+#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT        2
+#define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK        0xF
+#define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT       4
+};
+
+/*
+ * Core RX BD for Light L2
+ */
+struct core_rx_bd {
+       struct regpair addr;
+       __le16 reserved[4];
+};
+
+/*
+ * Core RX CM offload BD for Light L2
+ */
+struct core_rx_bd_with_buff_len {
+       struct regpair addr;
+       __le16 buff_length;
+       __le16 reserved[3];
+};
+
+/*
+ * Core RX CM offload BD for Light L2
+ */
+union core_rx_bd_union {
+       struct core_rx_bd rx_bd /* Core Rx Bd static buffer size */;
+       struct core_rx_bd_with_buff_len rx_bd_with_len
+           /* Core Rx Bd with dynamic buffer length */;
+};
+
+/*
+ * Opaque Data for Light L2 RX CQE .
+ */
+struct core_rx_cqe_opaque_data {
+       __le32 data[2] /* Opaque CQE Data */;
+};
+
+/*
+ * Core RX CQE Type for Light L2
+ */
+enum core_rx_cqe_type {
+       CORE_RX_CQE_ILLIGAL_TYPE /* Bad RX Cqe type */,
+       CORE_RX_CQE_TYPE_REGULAR /* Regular Core RX CQE */,
+       CORE_RX_CQE_TYPE_GSI_OFFLOAD /* Fp Gsi offload RX CQE */,
+       CORE_RX_CQE_TYPE_SLOW_PATH /* Slow path Core RX CQE */,
+       MAX_CORE_RX_CQE_TYPE
+};
+
+/*
+ * Core RX CQE for Light L2 .
+ */
+struct core_rx_fast_path_cqe {
+       u8 type /* CQE type */;
+       u8 placement_offset
+           /* Offset (in bytes) of the packet from start of the buffer */;
+       struct parsing_and_err_flags parse_flags
+           /* Parsing and error flags from the parser */;
+       __le16 packet_length /* Total packet length (from the parser) */;
+       __le16 vlan /* 802.1q VLAN tag */;
+       struct core_rx_cqe_opaque_data opaque_data /* Opaque Data */;
+       __le32 reserved[4];
+};
+
+/*
+ * Core Rx CM offload CQE .
+ */
+struct core_rx_gsi_offload_cqe {
+       u8 type /* CQE type */;
+       u8 data_length_error /* set if gsi data is bigger than buff */;
+       struct parsing_and_err_flags parse_flags
+           /* Parsing and error flags from the parser */;
+       __le16 data_length /* Total packet length (from the parser) */;
+       __le16 vlan /* 802.1q VLAN tag */;
+       __le32 src_mac_addrhi /* hi 4 bytes source mac address */;
+       __le16 src_mac_addrlo /* lo 2 bytes of source mac address */;
+       u8 reserved1[2];
+       __le32 gid_dst[4] /* Gid destination address */;
+};
+
+/*
+ * Core RX CQE for Light L2 .
+ */
+struct core_rx_slow_path_cqe {
+       u8 type /* CQE type */;
+       u8 ramrod_cmd_id;
+       __le16 echo;
+       __le32 reserved1[7];
+};
+
+/*
+ * Core RX CM offload BD for Light L2
+ */
+union core_rx_cqe_union {
+       struct core_rx_fast_path_cqe rx_cqe_fp /* Fast path CQE */;
+       struct core_rx_gsi_offload_cqe rx_cqe_gsi /* GSI offload CQE */;
+       struct core_rx_slow_path_cqe rx_cqe_sp /* Slow path CQE */;
+};
+
+/*
+ * Ramrod data for rx queue start ramrod
+ */
+struct core_rx_start_ramrod_data {
+       struct regpair bd_base /* bd address of the first bd page */;
+       struct regpair cqe_pbl_addr /* Base address on host of CQE PBL */;
+       __le16 mtu /* Maximum transmission unit */;
+       __le16 sb_id /* Status block ID */;
+       u8 sb_index /* index of the protocol index */;
+       u8 complete_cqe_flg /* post completion to the CQE ring if set */;
+       u8 complete_event_flg /* post completion to the event ring if set */;
+       u8 drop_ttl0_flg /* drop packet with ttl0 if set */;
+       __le16 num_of_pbl_pages /* Num of pages in CQE PBL */;
+       u8 inner_vlan_removal_en
+           /* if set, 802.1q tags will be removed and copied to CQE */;
+       u8 queue_id /* Light L2 RX Queue ID */;
+       u8 main_func_queue /* Is this the main queue for the PF */;
+       u8 mf_si_bcast_accept_all;
+       u8 mf_si_mcast_accept_all;
+       struct core_rx_action_on_error action_on_error;
+       u8 gsi_offload_flag
+           /* set when in GSI offload mode on ROCE connection */;
+       u8 reserved[7];
+};
+
+/*
+ * Ramrod data for rx queue stop ramrod
+ */
+struct core_rx_stop_ramrod_data {
+       u8 complete_cqe_flg /* post completion to the CQE ring if set */;
+       u8 complete_event_flg /* post completion to the event ring if set */;
+       u8 queue_id /* Light L2 RX Queue ID */;
+       u8 reserved1;
+       __le16 reserved2[2];
+};
+
+/*
+ * Flags for Core TX BD
+ */
+struct core_tx_bd_flags {
+       u8 as_bitfield;
+#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_MASK      0x1
+#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_SHIFT     0
+#define CORE_TX_BD_FLAGS_VLAN_INSERTION_MASK       0x1
+#define CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT      1
+#define CORE_TX_BD_FLAGS_START_BD_MASK             0x1
+#define CORE_TX_BD_FLAGS_START_BD_SHIFT            2
+#define CORE_TX_BD_FLAGS_IP_CSUM_MASK              0x1
+#define CORE_TX_BD_FLAGS_IP_CSUM_SHIFT             3
+#define CORE_TX_BD_FLAGS_L4_CSUM_MASK              0x1
+#define CORE_TX_BD_FLAGS_L4_CSUM_SHIFT             4
+#define CORE_TX_BD_FLAGS_IPV6_EXT_MASK             0x1
+#define CORE_TX_BD_FLAGS_IPV6_EXT_SHIFT            5
+#define CORE_TX_BD_FLAGS_L4_PROTOCOL_MASK          0x1
+#define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT         6
+#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK  0x1
+#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7
+};
+
+/*
+ * Core TX BD for Light L2
+ */
+struct core_tx_bd {
+       struct regpair addr /* Buffer Address */;
+       __le16 nbytes /* Number of Bytes in Buffer */;
+       __le16 vlan /* VLAN to insert to packet (if insertion flag set) */;
+       u8 nbds /* Number of BDs that make up one packet */;
+       struct core_tx_bd_flags bd_flags /* BD Flags */;
+       __le16 l4_hdr_offset_w;
+};
+
+/*
+ * Light L2 TX Destination
+ */
+enum core_tx_dest {
+       CORE_TX_DEST_NW /* Light L2 TX Destination to the Network */,
+       CORE_TX_DEST_LB /* Light L2 TX Destination to the Loopback */,
+       MAX_CORE_TX_DEST
+};
+
+/*
+ * Ramrod data for rx queue start ramrod
+ */
+struct core_tx_start_ramrod_data {
+       struct regpair pbl_base_addr /* Address of the pbl page */;
+       __le16 mtu /* Maximum transmission unit */;
+       __le16 sb_id /* Status block ID */;
+       u8 sb_index /* Status block protocol index */;
+       u8 tx_dest /* TX Destination (either Network or LB) */;
+       u8 stats_en /* Statistics Enable */;
+       u8 stats_id /* Statistics Counter ID */;
+       __le16 pbl_size /* Number of BD pages pointed by PBL */;
+       __le16 qm_pq_id /* QM PQ ID */;
+       u8 conn_type /* connection type that loaded ll2 */;
+       u8 gsi_offload_flag
+           /* set when in GSI offload mode on ROCE connection */;
+       u8 resrved[2];
+};
+
+/*
+ * Ramrod data for tx queue stop ramrod
+ */
+struct core_tx_stop_ramrod_data {
+       __le32 reserved0[2];
+};
+
+struct eth_mstorm_per_queue_stat {
+       struct regpair ttl0_discard;
+       struct regpair packet_too_big_discard;
+       struct regpair no_buff_discard;
+       struct regpair not_active_discard;
+       struct regpair tpa_coalesced_pkts;
+       struct regpair tpa_coalesced_events;
+       struct regpair tpa_aborts_num;
+       struct regpair tpa_coalesced_bytes;
+};
+
+/*
+ * Ethernet TX Per Queue Stats
+ */
+struct eth_pstorm_per_queue_stat {
+       struct regpair sent_ucast_bytes
+           /* number of total bytes sent without errors */;
+       struct regpair sent_mcast_bytes
+           /* number of total bytes sent without errors */;
+       struct regpair sent_bcast_bytes
+           /* number of total bytes sent without errors */;
+       struct regpair sent_ucast_pkts
+           /* number of total packets sent without errors */;
+       struct regpair sent_mcast_pkts
+           /* number of total packets sent without errors */;
+       struct regpair sent_bcast_pkts
+           /* number of total packets sent without errors */;
+       struct regpair error_drop_pkts
+           /* number of total packets dropped due to errors */;
+};
+
+/*
+ * ETH Rx producers data
+ */
+struct eth_rx_rate_limit {
+       __le16 mult;
+       __le16 cnst
+           /* Constant term to add (or subtract from number of cycles) */;
+       u8 add_sub_cnst /* Add (1) or subtract (0) constant term */;
+       u8 reserved0;
+       __le16 reserved1;
+};
+
+struct eth_ustorm_per_queue_stat {
+       struct regpair rcv_ucast_bytes;
+       struct regpair rcv_mcast_bytes;
+       struct regpair rcv_bcast_bytes;
+       struct regpair rcv_ucast_pkts;
+       struct regpair rcv_mcast_pkts;
+       struct regpair rcv_bcast_pkts;
+};
+
+/*
+ * Event Ring Next Page Address
+ */
+struct event_ring_next_addr {
+       struct regpair addr /* Next Page Address */;
+       __le32 reserved[2] /* Reserved */;
+};
+
+/*
+ * Event Ring Element
+ */
+union event_ring_element {
+       struct event_ring_entry entry /* Event Ring Entry */;
+       struct event_ring_next_addr next_addr /* Event Ring Next Page Address */
+         ;
+};
+
+/*
+ * Ports mode
+ */
+enum fw_flow_ctrl_mode {
+       flow_ctrl_pause,
+       flow_ctrl_pfc,
+       MAX_FW_FLOW_CTRL_MODE
+};
+
+/*
+ * Integration Phase
+ */
+enum integ_phase {
+       INTEG_PHASE_BB_A0_LATEST = 3 /* BB A0 latest integration phase */,
+       INTEG_PHASE_BB_B0_NO_MCP = 10 /* BB B0 without MCP */,
+       INTEG_PHASE_BB_B0_WITH_MCP = 11 /* BB B0 with MCP */,
+       MAX_INTEG_PHASE
+};
+
+/*
+ * Malicious VF error ID
+ */
+enum malicious_vf_error_id {
+       MALICIOUS_VF_NO_ERROR /* Zero placeholder value */,
+       VF_PF_CHANNEL_NOT_READY
+           /* Writing to VF/PF channel when it is not ready */,
+       VF_ZONE_MSG_NOT_VALID /* VF channel message is not valid */,
+       VF_ZONE_FUNC_NOT_ENABLED /* Parent PF of VF channel is not active */,
+       ETH_PACKET_TOO_SMALL
+           /* TX packet is shorter then reported on BDs or from minimal size */
+           ,
+       ETH_ILLEGAL_VLAN_MODE
+           /* Tx packet with marked as insert VLAN when its illegal */,
+       ETH_MTU_VIOLATION /* TX packet is greater then MTU */,
+       ETH_ILLEGAL_INBAND_TAGS /* TX packet has illegal inband tags marked */,
+       ETH_VLAN_INSERT_AND_INBAND_VLAN /* Vlan cant be added to inband tag */,
+       ETH_ILLEGAL_NBDS /* indicated number of BDs for the packet is illegal */
+           ,
+       ETH_FIRST_BD_WO_SOP /* 1st BD must have start_bd flag set */,
+       ETH_INSUFFICIENT_BDS
+           /* There are not enough BDs for transmission of even one packet */,
+       ETH_ILLEGAL_LSO_HDR_NBDS /* Header NBDs value is illegal */,
+       ETH_ILLEGAL_LSO_MSS /* LSO MSS value is more than allowed */,
+       ETH_ZERO_SIZE_BD
+           /* empty BD (which not contains control flags) is illegal  */,
+       ETH_ILLEGAL_LSO_HDR_LEN /* LSO header size is above the limit  */,
+       ETH_INSUFFICIENT_PAYLOAD
+           ,
+       ETH_EDPM_OUT_OF_SYNC /* Valid BDs on local ring after EDPM L2 sync */,
+       ETH_TUNN_IPV6_EXT_NBD_ERR
+           /* Tunneled packet with IPv6+Ext without a proper number of BDs */,
+       MAX_MALICIOUS_VF_ERROR_ID
+};
+
+/*
+ * Mstorm non-triggering VF zone
+ */
+struct mstorm_non_trigger_vf_zone {
+       struct eth_mstorm_per_queue_stat eth_queue_stat
+           /* VF statistic bucket */;
+};
+
+/*
+ * Mstorm VF zone
+ */
+struct mstorm_vf_zone {
+       struct mstorm_non_trigger_vf_zone non_trigger
+           /* non-interrupt-triggering zone */;
+};
+
+/*
+ * personality per PF
+ */
+enum personality_type {
+       BAD_PERSONALITY_TYP,
+       PERSONALITY_ISCSI /* iSCSI and LL2 */,
+       PERSONALITY_FCOE /* Fcoe and LL2 */,
+       PERSONALITY_RDMA_AND_ETH /* Roce or Iwarp, Eth and LL2 */,
+       PERSONALITY_RDMA /* Roce and LL2 */,
+       PERSONALITY_CORE /* CORE(LL2) */,
+       PERSONALITY_ETH /* Ethernet */,
+       PERSONALITY_TOE /* Toe and LL2 */,
+       MAX_PERSONALITY_TYPE
+};
+
+/*
+ * tunnel configuration
+ */
+struct pf_start_tunnel_config {
+       u8 set_vxlan_udp_port_flg /* Set VXLAN tunnel UDP destination port. */;
+       u8 set_geneve_udp_port_flg /* Set GENEVE tunnel UDP destination port. */
+         ;
+       u8 tx_enable_vxlan /* If set, enable VXLAN tunnel in TX path. */;
+       u8 tx_enable_l2geneve /* If set, enable l2 GENEVE tunnel in TX path. */
+         ;
+       u8 tx_enable_ipgeneve /* If set, enable IP GENEVE tunnel in TX path. */
+         ;
+       u8 tx_enable_l2gre /* If set, enable l2 GRE tunnel in TX path. */;
+       u8 tx_enable_ipgre /* If set, enable IP GRE tunnel in TX path. */;
+       u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */;
+       u8 tunnel_clss_l2geneve
+           /* Classification scheme for l2 GENEVE tunnel. */;
+       u8 tunnel_clss_ipgeneve
+           /* Classification scheme for ip GENEVE tunnel. */;
+       u8 tunnel_clss_l2gre /* Classification scheme for l2 GRE tunnel. */;
+       u8 tunnel_clss_ipgre /* Classification scheme for ip GRE tunnel. */;
+       __le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. */;
+       __le16 geneve_udp_port /* GENEVE tunnel UDP destination port. */;
+};
+
+/*
+ * Ramrod data for PF start ramrod
+ */
+struct pf_start_ramrod_data {
+       struct regpair event_ring_pbl_addr /* Address of event ring PBL */;
+       struct regpair consolid_q_pbl_addr
+           /* PBL address of consolidation queue */;
+       struct pf_start_tunnel_config tunnel_config /* tunnel configuration. */
+         ;
+       __le16 event_ring_sb_id /* Status block ID */;
+       u8 base_vf_id;
+         ;
+       u8 num_vfs /* Amount of vfs owned by PF */;
+       u8 event_ring_num_pages /* Number of PBL pages in event ring */;
+       u8 event_ring_sb_index /* Status block index */;
+       u8 path_id /* HW path ID (engine ID) */;
+       u8 warning_as_error /* In FW asserts, treat warning as error */;
+       u8 dont_log_ramrods
+           /* If not set - throw a warning for each ramrod (for debug) */;
+       u8 personality /* define what type of personality is new PF */;
+       __le16 log_type_mask;
+       u8 mf_mode /* Multi function mode */;
+       u8 integ_phase /* Integration phase */;
+       u8 allow_npar_tx_switching;
+       u8 inner_to_outer_pri_map[8];
+       u8 pri_map_valid
+           /* If inner_to_outer_pri_map is initialize then set pri_map_valid */
+         ;
+       __le32 outer_tag;
+       u8 reserved0[4];
+};
+
+/*
+ * Data for port update ramrod
+ */
+struct protocol_dcb_data {
+       u8 dcb_enable_flag /* dcbEnable flag value */;
+       u8 dcb_priority /* dcbPri flag value */;
+       u8 dcb_tc /* dcb TC value */;
+       u8 reserved;
+};
+
+/*
+ * tunnel configuration
+ */
+struct pf_update_tunnel_config {
+       u8 update_rx_pf_clss;
+       u8 update_tx_pf_clss;
+       u8 set_vxlan_udp_port_flg
+           /* Update VXLAN tunnel UDP destination port. */;
+       u8 set_geneve_udp_port_flg
+           /* Update GENEVE tunnel UDP destination port. */;
+       u8 tx_enable_vxlan /* If set, enable VXLAN tunnel in TX path. */;
+       u8 tx_enable_l2geneve /* If set, enable l2 GENEVE tunnel in TX path. */
+         ;
+       u8 tx_enable_ipgeneve /* If set, enable IP GENEVE tunnel in TX path. */
+         ;
+       u8 tx_enable_l2gre /* If set, enable l2 GRE tunnel in TX path. */;
+       u8 tx_enable_ipgre /* If set, enable IP GRE tunnel in TX path. */;
+       u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */;
+       u8 tunnel_clss_l2geneve
+           /* Classification scheme for l2 GENEVE tunnel. */;
+       u8 tunnel_clss_ipgeneve
+           /* Classification scheme for ip GENEVE tunnel. */;
+       u8 tunnel_clss_l2gre /* Classification scheme for l2 GRE tunnel. */;
+       u8 tunnel_clss_ipgre /* Classification scheme for ip GRE tunnel. */;
+       __le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. */;
+       __le16 geneve_udp_port /* GENEVE tunnel UDP destination port. */;
+       __le16 reserved[3];
+};
+
+/*
+ * Data for port update ramrod
+ */
+struct pf_update_ramrod_data {
+       u8 pf_id;
+       u8 update_eth_dcb_data_flag /* Update Eth DCB  data indication */;
+       u8 update_fcoe_dcb_data_flag /* Update FCOE DCB  data indication */;
+       u8 update_iscsi_dcb_data_flag /* Update iSCSI DCB  data indication */;
+       u8 update_roce_dcb_data_flag /* Update ROCE DCB  data indication */;
+       u8 update_iwarp_dcb_data_flag /* Update IWARP DCB  data indication */;
+       u8 update_mf_vlan_flag /* Update MF outer vlan Id */;
+       u8 reserved;
+       struct protocol_dcb_data eth_dcb_data /* core eth related fields */;
+       struct protocol_dcb_data fcoe_dcb_data /* core fcoe related fields */;
+       struct protocol_dcb_data iscsi_dcb_data /* core iscsi related fields */
+         ;
+       struct protocol_dcb_data roce_dcb_data /* core roce related fields */;
+       struct protocol_dcb_data iwarp_dcb_data /* core iwarp related fields */
+         ;
+       __le16 mf_vlan /* new outer vlan id value */;
+       __le16 reserved2;
+       struct pf_update_tunnel_config tunnel_config /* tunnel configuration. */
+         ;
+};
+
+/*
+ * Ports mode
+ */
+enum ports_mode {
+       ENGX2_PORTX1 /* 2 engines x 1 port */,
+       ENGX2_PORTX2 /* 2 engines x 2 ports */,
+       ENGX1_PORTX1 /* 1 engine  x 1 port */,
+       ENGX1_PORTX2 /* 1 engine  x 2 ports */,
+       ENGX1_PORTX4 /* 1 engine  x 4 ports */,
+       MAX_PORTS_MODE
+};
+
+/*
+ * RDMA TX Stats
+ */
+struct rdma_sent_stats {
+       struct regpair sent_bytes /* number of total RDMA bytes sent */;
+       struct regpair sent_pkts /* number of total RDMA packets sent */;
+};
+
+/*
+ * Pstorm non-triggering VF zone
+ */
+struct pstorm_non_trigger_vf_zone {
+       struct eth_pstorm_per_queue_stat eth_queue_stat
+           /* VF statistic bucket */;
+       struct rdma_sent_stats rdma_stats /* RoCE sent statistics */;
+};
+
+/*
+ * Pstorm VF zone
+ */
+struct pstorm_vf_zone {
+       struct pstorm_non_trigger_vf_zone non_trigger
+           /* non-interrupt-triggering zone */;
+       struct regpair reserved[7] /* vf_zone size mus be power of 2 */;
+};
+
+/*
+ * Ramrod Header of SPQE
+ */
+struct ramrod_header {
+       __le32 cid /* Slowpath Connection CID */;
+       u8 cmd_id /* Ramrod Cmd (Per Protocol Type) */;
+       u8 protocol_id /* Ramrod Protocol ID */;
+       __le16 echo /* Ramrod echo */;
+};
+
+/*
+ * RDMA RX Stats
+ */
+struct rdma_rcv_stats {
+       struct regpair rcv_bytes /* number of total RDMA bytes received */;
+       struct regpair rcv_pkts /* number of total RDMA packets received */;
+};
+
+/*
+ * Slowpath Element (SPQE)
+ */
+struct slow_path_element {
+       struct ramrod_header hdr /* Ramrod Header */;
+       struct regpair data_ptr /* Pointer to the Ramrod Data on the Host */;
+};
+
+/*
+ * Tstorm non-triggering VF zone
+ */
+struct tstorm_non_trigger_vf_zone {
+       struct rdma_rcv_stats rdma_stats /* RoCE received statistics */;
+};
+
+struct tstorm_per_port_stat {
+       struct regpair trunc_error_discard
+           /* packet is dropped because it was truncated in NIG */;
+       struct regpair mac_error_discard
+           /* packet is dropped because of Ethernet FCS error */;
+       struct regpair mftag_filter_discard
+           /* packet is dropped because classification was unsuccessful */;
+       struct regpair eth_mac_filter_discard;
+       struct regpair ll2_mac_filter_discard;
+       struct regpair ll2_conn_disabled_discard;
+       struct regpair iscsi_irregular_pkt
+           /* packet is an ISCSI irregular packet */;
+       struct regpair fcoe_irregular_pkt
+           /* packet is an FCOE irregular packet */;
+       struct regpair roce_irregular_pkt
+           /* packet is an ROCE irregular packet */;
+       struct regpair eth_irregular_pkt /* packet is an ETH irregular packet */
+         ;
+       struct regpair toe_irregular_pkt /* packet is an TOE irregular packet */
+         ;
+       struct regpair preroce_irregular_pkt
+           /* packet is an PREROCE irregular packet */;
+};
+
+/*
+ * Tstorm VF zone
+ */
+struct tstorm_vf_zone {
+       struct tstorm_non_trigger_vf_zone non_trigger
+           /* non-interrupt-triggering zone */;
+};
+
+/*
+ * Tunnel classification scheme
+ */
+enum tunnel_clss {
+       TUNNEL_CLSS_MAC_VLAN =
+           0
+           /* Use MAC & VLAN from first L2 header for vport classification. */
+           ,
+       TUNNEL_CLSS_MAC_VNI
+           ,
+       TUNNEL_CLSS_INNER_MAC_VLAN
+           /* Use MAC and VLAN from last L2 header for vport classification */
+           ,
+       TUNNEL_CLSS_INNER_MAC_VNI
+           ,
+       MAX_TUNNEL_CLSS
+};
+
+/*
+ * Ustorm non-triggering VF zone
+ */
+struct ustorm_non_trigger_vf_zone {
+       struct eth_ustorm_per_queue_stat eth_queue_stat
+           /* VF statistic bucket */;
+       struct regpair vf_pf_msg_addr /* VF-PF message address */;
+};
+
+/*
+ * Ustorm triggering VF zone
+ */
+struct ustorm_trigger_vf_zone {
+       u8 vf_pf_msg_valid /* VF-PF message valid flag */;
+       u8 reserved[7];
+};
+
+/*
+ * Ustorm VF zone
+ */
+struct ustorm_vf_zone {
+       struct ustorm_non_trigger_vf_zone non_trigger
+           /* non-interrupt-triggering zone */;
+       struct ustorm_trigger_vf_zone trigger /* interrupt triggering zone */;
+};
+
+/*
+ * VF-PF channel data
+ */
+struct vf_pf_channel_data {
+       __le32 ready;
+       u8 valid;
+       u8 reserved0;
+       __le16 reserved1;
+};
+
+/*
+ * Ramrod data for VF start ramrod
+ */
+struct vf_start_ramrod_data {
+       u8 vf_id /* VF ID */;
+       u8 enable_flr_ack;
+       __le16 opaque_fid /* VF opaque FID */;
+       u8 personality /* define what type of personality is new VF */;
+       u8 reserved[3];
+};
+
+/*
+ * Ramrod data for VF start ramrod
+ */
+struct vf_stop_ramrod_data {
+       u8 vf_id /* VF ID */;
+       u8 reserved0;
+       __le16 reserved1;
+       __le32 reserved2;
+};
+
+/*
+ * Attentions status block
+ */
+struct atten_status_block {
+       __le32 atten_bits;
+       __le32 atten_ack;
+       __le16 reserved0;
+       __le16 sb_index /* status block running index */;
+       __le32 reserved1;
+};
+
+enum block_addr {
+       GRCBASE_GRC = 0x50000,
+       GRCBASE_MISCS = 0x9000,
+       GRCBASE_MISC = 0x8000,
+       GRCBASE_DBU = 0xa000,
+       GRCBASE_PGLUE_B = 0x2a8000,
+       GRCBASE_CNIG = 0x218000,
+       GRCBASE_CPMU = 0x30000,
+       GRCBASE_NCSI = 0x40000,
+       GRCBASE_OPTE = 0x53000,
+       GRCBASE_BMB = 0x540000,
+       GRCBASE_PCIE = 0x54000,
+       GRCBASE_MCP = 0xe00000,
+       GRCBASE_MCP2 = 0x52000,
+       GRCBASE_PSWHST = 0x2a0000,
+       GRCBASE_PSWHST2 = 0x29e000,
+       GRCBASE_PSWRD = 0x29c000,
+       GRCBASE_PSWRD2 = 0x29d000,
+       GRCBASE_PSWWR = 0x29a000,
+       GRCBASE_PSWWR2 = 0x29b000,
+       GRCBASE_PSWRQ = 0x280000,
+       GRCBASE_PSWRQ2 = 0x240000,
+       GRCBASE_PGLCS = 0x0,
+       GRCBASE_DMAE = 0xc000,
+       GRCBASE_PTU = 0x560000,
+       GRCBASE_TCM = 0x1180000,
+       GRCBASE_MCM = 0x1200000,
+       GRCBASE_UCM = 0x1280000,
+       GRCBASE_XCM = 0x1000000,
+       GRCBASE_YCM = 0x1080000,
+       GRCBASE_PCM = 0x1100000,
+       GRCBASE_QM = 0x2f0000,
+       GRCBASE_TM = 0x2c0000,
+       GRCBASE_DORQ = 0x100000,
+       GRCBASE_BRB = 0x340000,
+       GRCBASE_SRC = 0x238000,
+       GRCBASE_PRS = 0x1f0000,
+       GRCBASE_TSDM = 0xfb0000,
+       GRCBASE_MSDM = 0xfc0000,
+       GRCBASE_USDM = 0xfd0000,
+       GRCBASE_XSDM = 0xf80000,
+       GRCBASE_YSDM = 0xf90000,
+       GRCBASE_PSDM = 0xfa0000,
+       GRCBASE_TSEM = 0x1700000,
+       GRCBASE_MSEM = 0x1800000,
+       GRCBASE_USEM = 0x1900000,
+       GRCBASE_XSEM = 0x1400000,
+       GRCBASE_YSEM = 0x1500000,
+       GRCBASE_PSEM = 0x1600000,
+       GRCBASE_RSS = 0x238800,
+       GRCBASE_TMLD = 0x4d0000,
+       GRCBASE_MULD = 0x4e0000,
+       GRCBASE_YULD = 0x4c8000,
+       GRCBASE_XYLD = 0x4c0000,
+       GRCBASE_PRM = 0x230000,
+       GRCBASE_PBF_PB1 = 0xda0000,
+       GRCBASE_PBF_PB2 = 0xda4000,
+       GRCBASE_RPB = 0x23c000,
+       GRCBASE_BTB = 0xdb0000,
+       GRCBASE_PBF = 0xd80000,
+       GRCBASE_RDIF = 0x300000,
+       GRCBASE_TDIF = 0x310000,
+       GRCBASE_CDU = 0x580000,
+       GRCBASE_CCFC = 0x2e0000,
+       GRCBASE_TCFC = 0x2d0000,
+       GRCBASE_IGU = 0x180000,
+       GRCBASE_CAU = 0x1c0000,
+       GRCBASE_UMAC = 0x51000,
+       GRCBASE_XMAC = 0x210000,
+       GRCBASE_DBG = 0x10000,
+       GRCBASE_NIG = 0x500000,
+       GRCBASE_WOL = 0x600000,
+       GRCBASE_BMBN = 0x610000,
+       GRCBASE_IPC = 0x20000,
+       GRCBASE_NWM = 0x800000,
+       GRCBASE_NWS = 0x700000,
+       GRCBASE_MS = 0x6a0000,
+       GRCBASE_PHY_PCIE = 0x620000,
+       GRCBASE_MISC_AEU = 0x8000,
+       GRCBASE_BAR0_MAP = 0x1c00000,
+       MAX_BLOCK_ADDR
+};
+
+enum block_id {
+       BLOCK_GRC,
+       BLOCK_MISCS,
+       BLOCK_MISC,
+       BLOCK_DBU,
+       BLOCK_PGLUE_B,
+       BLOCK_CNIG,
+       BLOCK_CPMU,
+       BLOCK_NCSI,
+       BLOCK_OPTE,
+       BLOCK_BMB,
+       BLOCK_PCIE,
+       BLOCK_MCP,
+       BLOCK_MCP2,
+       BLOCK_PSWHST,
+       BLOCK_PSWHST2,
+       BLOCK_PSWRD,
+       BLOCK_PSWRD2,
+       BLOCK_PSWWR,
+       BLOCK_PSWWR2,
+       BLOCK_PSWRQ,
+       BLOCK_PSWRQ2,
+       BLOCK_PGLCS,
+       BLOCK_DMAE,
+       BLOCK_PTU,
+       BLOCK_TCM,
+       BLOCK_MCM,
+       BLOCK_UCM,
+       BLOCK_XCM,
+       BLOCK_YCM,
+       BLOCK_PCM,
+       BLOCK_QM,
+       BLOCK_TM,
+       BLOCK_DORQ,
+       BLOCK_BRB,
+       BLOCK_SRC,
+       BLOCK_PRS,
+       BLOCK_TSDM,
+       BLOCK_MSDM,
+       BLOCK_USDM,
+       BLOCK_XSDM,
+       BLOCK_YSDM,
+       BLOCK_PSDM,
+       BLOCK_TSEM,
+       BLOCK_MSEM,
+       BLOCK_USEM,
+       BLOCK_XSEM,
+       BLOCK_YSEM,
+       BLOCK_PSEM,
+       BLOCK_RSS,
+       BLOCK_TMLD,
+       BLOCK_MULD,
+       BLOCK_YULD,
+       BLOCK_XYLD,
+       BLOCK_PRM,
+       BLOCK_PBF_PB1,
+       BLOCK_PBF_PB2,
+       BLOCK_RPB,
+       BLOCK_BTB,
+       BLOCK_PBF,
+       BLOCK_RDIF,
+       BLOCK_TDIF,
+       BLOCK_CDU,
+       BLOCK_CCFC,
+       BLOCK_TCFC,
+       BLOCK_IGU,
+       BLOCK_CAU,
+       BLOCK_UMAC,
+       BLOCK_XMAC,
+       BLOCK_DBG,
+       BLOCK_NIG,
+       BLOCK_WOL,
+       BLOCK_BMBN,
+       BLOCK_IPC,
+       BLOCK_NWM,
+       BLOCK_NWS,
+       BLOCK_MS,
+       BLOCK_PHY_PCIE,
+       BLOCK_MISC_AEU,
+       BLOCK_BAR0_MAP,
+       MAX_BLOCK_ID
+};
+
+/*
+ * Igu cleanup bit values to distinguish between clean or producer consumer
+ */
+enum command_type_bit {
+       IGU_COMMAND_TYPE_NOP = 0,
+       IGU_COMMAND_TYPE_SET = 1,
+       MAX_COMMAND_TYPE_BIT
+};
+
+/*
+ * DMAE command
+ */
+struct dmae_cmd {
+       __le32 opcode;
+#define DMAE_CMD_SRC_MASK              0x1
+#define DMAE_CMD_SRC_SHIFT             0
+#define DMAE_CMD_DST_MASK              0x3
+#define DMAE_CMD_DST_SHIFT             1
+#define DMAE_CMD_C_DST_MASK            0x1
+#define DMAE_CMD_C_DST_SHIFT           3
+#define DMAE_CMD_CRC_RESET_MASK        0x1
+#define DMAE_CMD_CRC_RESET_SHIFT       4
+#define DMAE_CMD_SRC_ADDR_RESET_MASK   0x1
+#define DMAE_CMD_SRC_ADDR_RESET_SHIFT  5
+#define DMAE_CMD_DST_ADDR_RESET_MASK   0x1
+#define DMAE_CMD_DST_ADDR_RESET_SHIFT  6
+#define DMAE_CMD_COMP_FUNC_MASK        0x1
+#define DMAE_CMD_COMP_FUNC_SHIFT       7
+#define DMAE_CMD_COMP_WORD_EN_MASK     0x1
+#define DMAE_CMD_COMP_WORD_EN_SHIFT    8
+#define DMAE_CMD_COMP_CRC_EN_MASK      0x1
+#define DMAE_CMD_COMP_CRC_EN_SHIFT     9
+#define DMAE_CMD_COMP_CRC_OFFSET_MASK  0x7
+#define DMAE_CMD_COMP_CRC_OFFSET_SHIFT 10
+#define DMAE_CMD_RESERVED1_MASK        0x1
+#define DMAE_CMD_RESERVED1_SHIFT       13
+#define DMAE_CMD_ENDIANITY_MODE_MASK   0x3
+#define DMAE_CMD_ENDIANITY_MODE_SHIFT  14
+#define DMAE_CMD_ERR_HANDLING_MASK     0x3
+#define DMAE_CMD_ERR_HANDLING_SHIFT    16
+#define DMAE_CMD_PORT_ID_MASK          0x3
+#define DMAE_CMD_PORT_ID_SHIFT         18
+#define DMAE_CMD_SRC_PF_ID_MASK        0xF
+#define DMAE_CMD_SRC_PF_ID_SHIFT       20
+#define DMAE_CMD_DST_PF_ID_MASK        0xF
+#define DMAE_CMD_DST_PF_ID_SHIFT       24
+#define DMAE_CMD_SRC_VF_ID_VALID_MASK  0x1
+#define DMAE_CMD_SRC_VF_ID_VALID_SHIFT 28
+#define DMAE_CMD_DST_VF_ID_VALID_MASK  0x1
+#define DMAE_CMD_DST_VF_ID_VALID_SHIFT 29
+#define DMAE_CMD_RESERVED2_MASK        0x3
+#define DMAE_CMD_RESERVED2_SHIFT       30
+       __le32 src_addr_lo
+           /* PCIe source address low in bytes or GRC source address in DW */;
+       __le32 src_addr_hi;
+       __le32 dst_addr_lo;
+       __le32 dst_addr_hi;
+       __le16 length /* Length in DW */;
+       __le16 opcode_b;
+#define DMAE_CMD_SRC_VF_ID_MASK        0xFF
+#define DMAE_CMD_SRC_VF_ID_SHIFT       0
+#define DMAE_CMD_DST_VF_ID_MASK        0xFF
+#define DMAE_CMD_DST_VF_ID_SHIFT       8
+       __le32 comp_addr_lo /* PCIe completion address low or grc address */;
+       __le32 comp_addr_hi;
+       __le32 comp_val /* Value to write to completion address */;
+       __le32 crc32 /* crc16 result */;
+       __le32 crc_32_c /* crc32_c result */;
+       __le16 crc16 /* crc16 result */;
+       __le16 crc16_c /* crc16_c result */;
+       __le16 crc10 /* crc_t10 result */;
+       __le16 reserved;
+       __le16 xsum16 /* checksum16 result  */;
+       __le16 xsum8 /* checksum8 result  */;
+};
+
+struct fw_ver_num {
+       u8 major /* Firmware major version number */;
+       u8 minor /* Firmware minor version number */;
+       u8 rev /* Firmware revision version number */;
+       u8 eng /* Firmware engineering version number (for bootleg versions) */
+         ;
+};
+
+struct fw_ver_info {
+       __le16 tools_ver /* Tools version number */;
+       u8 image_id /* FW image ID (e.g. main, l2b, kuku) */;
+       u8 reserved1;
+       struct fw_ver_num num /* FW version number */;
+       __le32 timestamp /* FW Timestamp in unix time  (sec. since 1970) */;
+       __le32 reserved2;
+};
+
+struct storm_ram_section {
+       __le16 offset
+           /* The offset of the section in the RAM (in 64 bit units) */;
+       __le16 size /* The size of the section (in 64 bit units) */;
+};
+
+struct fw_info {
+       struct fw_ver_info ver /* FW version information */;
+       struct storm_ram_section fw_asserts_section
+           /* The FW Asserts offset/size in Storm RAM */;
+       __le32 reserved;
+};
+
+struct fw_info_location {
+       __le32 grc_addr /* GRC address where the fw_info struct is located. */;
+       __le32 size
+           /* Size of the fw_info structure (thats located at the grc_addr). */
+         ;
+};
+
+/*
+ * IGU cleanup command
+ */
+struct igu_cleanup {
+       __le32 sb_id_and_flags;
+#define IGU_CLEANUP_RESERVED0_MASK     0x7FFFFFF
+#define IGU_CLEANUP_RESERVED0_SHIFT    0
+#define IGU_CLEANUP_CLEANUP_SET_MASK   0x1
+#define IGU_CLEANUP_CLEANUP_SET_SHIFT  27
+#define IGU_CLEANUP_CLEANUP_TYPE_MASK  0x7
+#define IGU_CLEANUP_CLEANUP_TYPE_SHIFT 28
+#define IGU_CLEANUP_COMMAND_TYPE_MASK  0x1
+#define IGU_CLEANUP_COMMAND_TYPE_SHIFT 31
+       __le32 reserved1;
+};
+
+/*
+ * IGU firmware driver command
+ */
+union igu_command {
+       struct igu_prod_cons_update prod_cons_update;
+       struct igu_cleanup cleanup;
+};
+
+/*
+ * IGU firmware driver command
+ */
+struct igu_command_reg_ctrl {
+       __le16 opaque_fid;
+       __le16 igu_command_reg_ctrl_fields;
+#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_MASK  0xFFF
+#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_SHIFT 0
+#define IGU_COMMAND_REG_CTRL_RESERVED_MASK      0x7
+#define IGU_COMMAND_REG_CTRL_RESERVED_SHIFT     12
+#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_MASK  0x1
+#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_SHIFT 15
+};
+
+/*
+ * IGU mapping line structure
+ */
+struct igu_mapping_line {
+       __le32 igu_mapping_line_fields;
+#define IGU_MAPPING_LINE_VALID_MASK            0x1
+#define IGU_MAPPING_LINE_VALID_SHIFT           0
+#define IGU_MAPPING_LINE_VECTOR_NUMBER_MASK    0xFF
+#define IGU_MAPPING_LINE_VECTOR_NUMBER_SHIFT   1
+#define IGU_MAPPING_LINE_FUNCTION_NUMBER_MASK  0xFF
+#define IGU_MAPPING_LINE_FUNCTION_NUMBER_SHIFT 9
+#define IGU_MAPPING_LINE_PF_VALID_MASK         0x1
+#define IGU_MAPPING_LINE_PF_VALID_SHIFT        17
+#define IGU_MAPPING_LINE_IPS_GROUP_MASK        0x3F
+#define IGU_MAPPING_LINE_IPS_GROUP_SHIFT       18
+#define IGU_MAPPING_LINE_RESERVED_MASK         0xFF
+#define IGU_MAPPING_LINE_RESERVED_SHIFT        24
+};
+
+/*
+ * IGU MSIX line structure
+ */
+struct igu_msix_vector {
+       struct regpair address;
+       __le32 data;
+       __le32 msix_vector_fields;
+#define IGU_MSIX_VECTOR_MASK_BIT_MASK      0x1
+#define IGU_MSIX_VECTOR_MASK_BIT_SHIFT     0
+#define IGU_MSIX_VECTOR_RESERVED0_MASK     0x7FFF
+#define IGU_MSIX_VECTOR_RESERVED0_SHIFT    1
+#define IGU_MSIX_VECTOR_STEERING_TAG_MASK  0xFF
+#define IGU_MSIX_VECTOR_STEERING_TAG_SHIFT 16
+#define IGU_MSIX_VECTOR_RESERVED1_MASK     0xFF
+#define IGU_MSIX_VECTOR_RESERVED1_SHIFT    24
+};
+
+enum init_modes {
+       MODE_BB_A0,
+       MODE_BB_B0,
+       MODE_K2,
+       MODE_ASIC,
+       MODE_EMUL_REDUCED,
+       MODE_EMUL_FULL,
+       MODE_FPGA,
+       MODE_CHIPSIM,
+       MODE_SF,
+       MODE_MF_SD,
+       MODE_MF_SI,
+       MODE_PORTS_PER_ENG_1,
+       MODE_PORTS_PER_ENG_2,
+       MODE_PORTS_PER_ENG_4,
+       MODE_100G,
+       MODE_EAGLE_ENG1_WORKAROUND,
+       MAX_INIT_MODES
+};
+
+enum init_phases {
+       PHASE_ENGINE,
+       PHASE_PORT,
+       PHASE_PF,
+       PHASE_VF,
+       PHASE_QM_PF,
+       MAX_INIT_PHASES
+};
+
+struct mstorm_core_conn_ag_ctx {
+       u8 byte0 /* cdu_validation */;
+       u8 byte1 /* state */;
+       u8 flags0;
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
+#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT     2
+#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT     4
+#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT     6
+       u8 flags1;
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   0
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   1
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   2
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+       __le16 word0 /* word0 */;
+       __le16 word1 /* word1 */;
+       __le32 reg0 /* reg0 */;
+       __le32 reg1 /* reg1 */;
+};
+
+/*
+ * per encapsulation type enabling flags
+ */
+struct prs_reg_encapsulation_type_en {
+       u8 flags;
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_MASK     0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT    0
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_MASK      0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT     1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_MASK            0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT           2
+#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_MASK            0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_SHIFT           3
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_MASK  0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT 4
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_MASK   0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT  5
+#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_MASK                0x3
+#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_SHIFT               6
+};
+
+enum pxp_tph_st_hint {
+       TPH_ST_HINT_BIDIR /* Read/Write access by Host and Device */,
+       TPH_ST_HINT_REQUESTER /* Read/Write access by Device */,
+       TPH_ST_HINT_TARGET
+           /* Device Write and Host Read, or Host Write and Device Read */,
+       TPH_ST_HINT_TARGET_PRIO,
+       MAX_PXP_TPH_ST_HINT
+};
+
+/*
+ * QM hardware structure of enable bypass credit mask
+ */
+struct qm_rf_bypass_mask {
+       u8 flags;
+#define QM_RF_BYPASS_MASK_LINEVOQ_MASK    0x1
+#define QM_RF_BYPASS_MASK_LINEVOQ_SHIFT   0
+#define QM_RF_BYPASS_MASK_RESERVED0_MASK  0x1
+#define QM_RF_BYPASS_MASK_RESERVED0_SHIFT 1
+#define QM_RF_BYPASS_MASK_PFWFQ_MASK      0x1
+#define QM_RF_BYPASS_MASK_PFWFQ_SHIFT     2
+#define QM_RF_BYPASS_MASK_VPWFQ_MASK      0x1
+#define QM_RF_BYPASS_MASK_VPWFQ_SHIFT     3
+#define QM_RF_BYPASS_MASK_PFRL_MASK       0x1
+#define QM_RF_BYPASS_MASK_PFRL_SHIFT      4
+#define QM_RF_BYPASS_MASK_VPQCNRL_MASK    0x1
+#define QM_RF_BYPASS_MASK_VPQCNRL_SHIFT   5
+#define QM_RF_BYPASS_MASK_FWPAUSE_MASK    0x1
+#define QM_RF_BYPASS_MASK_FWPAUSE_SHIFT   6
+#define QM_RF_BYPASS_MASK_RESERVED1_MASK  0x1
+#define QM_RF_BYPASS_MASK_RESERVED1_SHIFT 7
+};
+
+/*
+ * QM hardware structure of opportunistic credit mask
+ */
+struct qm_rf_opportunistic_mask {
+       __le16 flags;
+#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_MASK     0x1
+#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT    0
+#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_MASK     0x1
+#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT    1
+#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_MASK       0x1
+#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT      2
+#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_MASK       0x1
+#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT      3
+#define QM_RF_OPPORTUNISTIC_MASK_PFRL_MASK        0x1
+#define QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT       4
+#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_MASK     0x1
+#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT    5
+#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_MASK     0x1
+#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT    6
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_MASK   0x1
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_SHIFT  7
+#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_MASK  0x1
+#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT 8
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_MASK   0x7F
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_SHIFT  9
+};
+
+/*
+ * QM hardware structure of QM map memory
+ */
+struct qm_rf_pq_map {
+       __le32 reg;
+#define QM_RF_PQ_MAP_PQ_VALID_MASK          0x1
+#define QM_RF_PQ_MAP_PQ_VALID_SHIFT         0
+#define QM_RF_PQ_MAP_RL_ID_MASK             0xFF
+#define QM_RF_PQ_MAP_RL_ID_SHIFT            1
+#define QM_RF_PQ_MAP_VP_PQ_ID_MASK          0x1FF
+#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT         9
+#define QM_RF_PQ_MAP_VOQ_MASK               0x1F
+#define QM_RF_PQ_MAP_VOQ_SHIFT              18
+#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK  0x3
+#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23
+#define QM_RF_PQ_MAP_RL_VALID_MASK          0x1
+#define QM_RF_PQ_MAP_RL_VALID_SHIFT         25
+#define QM_RF_PQ_MAP_RESERVED_MASK          0x3F
+#define QM_RF_PQ_MAP_RESERVED_SHIFT         26
+};
+
+/*
+ * Completion params for aggregated interrupt completion
+ */
+struct sdm_agg_int_comp_params {
+       __le16 params;
+#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_MASK      0x3F
+#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT     0
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_MASK  0x1
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT 6
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_MASK     0x1FF
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT    7
+};
+
+/*
+ * SDM operation gen command (generate aggregative interrupt)
+ */
+struct sdm_op_gen {
+       __le32 command;
+#define SDM_OP_GEN_COMP_PARAM_MASK  0xFFFF
+#define SDM_OP_GEN_COMP_PARAM_SHIFT 0
+#define SDM_OP_GEN_COMP_TYPE_MASK   0xF
+#define SDM_OP_GEN_COMP_TYPE_SHIFT  16
+#define SDM_OP_GEN_RESERVED_MASK    0xFFF
+#define SDM_OP_GEN_RESERVED_SHIFT   20
+};
+
+struct ystorm_core_conn_ag_ctx {
+       u8 byte0 /* cdu_validation */;
+       u8 byte1 /* state */;
+       u8 flags0;
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
+#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT     2
+#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT     4
+#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT     6
+       u8 flags1;
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   0
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   1
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   2
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+       u8 byte2 /* byte2 */;
+       u8 byte3 /* byte3 */;
+       __le16 word0 /* word0 */;
+       __le32 reg0 /* reg0 */;
+       __le32 reg1 /* reg1 */;
+       __le16 word1 /* word1 */;
+       __le16 word2 /* word2 */;
+       __le16 word3 /* word3 */;
+       __le16 word4 /* word4 */;
+       __le32 reg2 /* reg2 */;
+       __le32 reg3 /* reg3 */;
+};
+
+#endif /* __ECORE_HSI_COMMON__ */
diff --git a/drivers/net/qede/base/ecore_hsi_eth.h b/drivers/net/qede/base/ecore_hsi_eth.h
new file mode 100644 (file)
index 0000000..80f4165
--- /dev/null
@@ -0,0 +1,1912 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_HSI_ETH__
+#define __ECORE_HSI_ETH__
+/************************************************************************/
+/* Add include to common eth target for both eCore and protocol driver */
+/************************************************************************/
+#include "eth_common.h"
+
+/*
+ * The eth storm context for the Tstorm
+ */
+struct tstorm_eth_conn_st_ctx {
+       __le32 reserved[4];
+};
+
+/*
+ * The eth storm context for the Pstorm
+ */
+struct pstorm_eth_conn_st_ctx {
+       __le32 reserved[8];
+};
+
+/*
+ * The eth storm context for the Xstorm
+ */
+struct xstorm_eth_conn_st_ctx {
+       __le32 reserved[60];
+};
+
+struct xstorm_eth_conn_ag_ctx {
+       u8 reserved0 /* cdu_validation */;
+       u8 eth_state /* state */;
+       u8 flags0;
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK            0x1
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT           0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK               0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT              1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK               0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT              2
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK            0x1
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT           3
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK               0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT              4
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK               0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT              5
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK               0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT              6
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK               0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT              7
+       u8 flags1;
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK               0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT              0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK               0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT              1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK               0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT              2
+#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK                   0x1
+#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT                  3
+#define XSTORM_ETH_CONN_AG_CTX_BIT12_MASK                   0x1
+#define XSTORM_ETH_CONN_AG_CTX_BIT12_SHIFT                  4
+#define XSTORM_ETH_CONN_AG_CTX_BIT13_MASK                   0x1
+#define XSTORM_ETH_CONN_AG_CTX_BIT13_SHIFT                  5
+#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK          0x1
+#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT         6
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK            0x1
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT           7
+       u8 flags2;
+#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK                     0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT                    0
+#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK                     0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT                    2
+#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK                     0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT                    4
+#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK                     0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT                    6
+       u8 flags3;
+#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK                     0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT                    0
+#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK                     0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT                    2
+#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK                     0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT                    4
+#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK                     0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT                    6
+       u8 flags4;
+#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK                     0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT                    0
+#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK                     0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT                    2
+#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK                    0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT                   4
+#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK                    0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT                   6
+       u8 flags5;
+#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK                    0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT                   0
+#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK                    0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT                   2
+#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK                    0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT                   4
+#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK                    0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT                   6
+       u8 flags6;
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK        0x3
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT       0
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK        0x3
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT       2
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK                   0x3
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT                  4
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK            0x3
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT           6
+       u8 flags7;
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK                0x3
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT               0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK              0x3
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT             2
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK               0x3
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT              4
+#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK                   0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT                  6
+#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK                   0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT                  7
+       u8 flags8;
+#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK                   0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT                  0
+#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK                   0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT                  1
+#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK                   0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT                  2
+#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK                   0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT                  3
+#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK                   0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT                  4
+#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK                   0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT                  5
+#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK                   0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT                  6
+#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK                   0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT                  7
+       u8 flags9;
+#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK                  0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT                 0
+#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK                  0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT                 1
+#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK                  0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT                 2
+#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK                  0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT                 3
+#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK                  0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT                 4
+#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK                  0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT                 5
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK     0x1
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT    6
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK     0x1
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT    7
+       u8 flags10;
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK                0x1
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT               0
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK         0x1
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT        1
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK             0x1
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT            2
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK              0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT             3
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK            0x1
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT           4
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK  0x1
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK              0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT             6
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK              0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT             7
+       u8 flags11;
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK              0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT             0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK              0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT             1
+#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK          0x1
+#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT         2
+#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK                 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT                3
+#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK                 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT                4
+#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK                 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT                5
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK            0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT           6
+#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK                 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT                7
+       u8 flags12;
+#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK                0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT               0
+#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK                0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT               1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK            0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT           2
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK            0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT           3
+#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK                0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT               4
+#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK                0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT               5
+#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK                0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT               6
+#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK                0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT               7
+       u8 flags13;
+#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK                0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT               0
+#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK                0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT               1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK            0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT           2
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK            0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT           3
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK            0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT           4
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK            0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT           5
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK            0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT           6
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK            0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT           7
+       u8 flags14;
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK        0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT       0
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK      0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT     1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK    0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT   2
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK    0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT   3
+#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK          0x1
+#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT         4
+#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK        0x1
+#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT       5
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK              0x3
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT             6
+       u8 edpm_event_id /* byte2 */;
+       __le16 physical_q0 /* physical_q0 */;
+       __le16 word1 /* physical_q1 */;
+       __le16 edpm_num_bds /* physical_q2 */;
+       __le16 tx_bd_cons /* word3 */;
+       __le16 tx_bd_prod /* word4 */;
+       __le16 go_to_bd_cons /* word5 */;
+       __le16 conn_dpi /* conn_dpi */;
+       u8 byte3 /* byte3 */;
+       u8 byte4 /* byte4 */;
+       u8 byte5 /* byte5 */;
+       u8 byte6 /* byte6 */;
+       __le32 reg0 /* reg0 */;
+       __le32 reg1 /* reg1 */;
+       __le32 reg2 /* reg2 */;
+       __le32 reg3 /* reg3 */;
+       __le32 reg4 /* reg4 */;
+       __le32 reg5 /* cf_array0 */;
+       __le32 reg6 /* cf_array1 */;
+       __le16 word7 /* word7 */;
+       __le16 word8 /* word8 */;
+       __le16 word9 /* word9 */;
+       __le16 word10 /* word10 */;
+       __le32 reg7 /* reg7 */;
+       __le32 reg8 /* reg8 */;
+       __le32 reg9 /* reg9 */;
+       u8 byte7 /* byte7 */;
+       u8 byte8 /* byte8 */;
+       u8 byte9 /* byte9 */;
+       u8 byte10 /* byte10 */;
+       u8 byte11 /* byte11 */;
+       u8 byte12 /* byte12 */;
+       u8 byte13 /* byte13 */;
+       u8 byte14 /* byte14 */;
+       u8 byte15 /* byte15 */;
+       u8 byte16 /* byte16 */;
+       __le16 word11 /* word11 */;
+       __le32 reg10 /* reg10 */;
+       __le32 reg11 /* reg11 */;
+       __le32 reg12 /* reg12 */;
+       __le32 reg13 /* reg13 */;
+       __le32 reg14 /* reg14 */;
+       __le32 reg15 /* reg15 */;
+       __le32 reg16 /* reg16 */;
+       __le32 reg17 /* reg17 */;
+       __le32 reg18 /* reg18 */;
+       __le32 reg19 /* reg19 */;
+       __le16 word12 /* word12 */;
+       __le16 word13 /* word13 */;
+       __le16 word14 /* word14 */;
+       __le16 word15 /* word15 */;
+};
+
+/*
+ * The eth storm context for the Ystorm
+ */
+struct ystorm_eth_conn_st_ctx {
+       __le32 reserved[8];
+};
+
+struct ystorm_eth_conn_ag_ctx {
+       u8 byte0 /* cdu_validation */;
+       u8 byte1 /* state */;
+       u8 flags0;
+#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK                  0x1
+#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT                 0
+#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK                  0x1
+#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT                 1
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK     0x3
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT    2
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK      0x3
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT     4
+#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK                   0x3
+#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT                  6
+       u8 flags1;
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK  0x1
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK   0x1
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT  1
+#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK                 0x1
+#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT                2
+#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK               0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT              3
+#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK               0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT              4
+#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK               0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT              5
+#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK               0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT              6
+#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK               0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT              7
+       u8 byte2 /* byte2 */;
+       u8 byte3 /* byte3 */;
+       __le16 word0 /* word0 */;
+       __le32 terminate_spqe /* reg0 */;
+       __le32 reg1 /* reg1 */;
+       __le16 tx_bd_cons_upd /* word1 */;
+       __le16 word2 /* word2 */;
+       __le16 word3 /* word3 */;
+       __le16 word4 /* word4 */;
+       __le32 reg2 /* reg2 */;
+       __le32 reg3 /* reg3 */;
+};
+
+struct tstorm_eth_conn_ag_ctx {
+       u8 byte0 /* cdu_validation */;
+       u8 byte1 /* state */;
+       u8 flags0;
+#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK      0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT     0
+#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK      0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT     1
+#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK      0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT     2
+#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK      0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT     3
+#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK      0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT     4
+#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK      0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT     5
+#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK       0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT      6
+       u8 flags1;
+#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK       0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT      0
+#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK       0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT      2
+#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK       0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT      4
+#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK       0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT      6
+       u8 flags2;
+#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK       0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT      0
+#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK       0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT      2
+#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK       0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT      4
+#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK       0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT      6
+       u8 flags3;
+#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK       0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT      0
+#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK      0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT     2
+#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK     0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT    4
+#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK     0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT    5
+#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK     0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT    6
+#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK     0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT    7
+       u8 flags4;
+#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK     0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT    0
+#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK     0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT    1
+#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK     0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT    2
+#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK     0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT    3
+#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK     0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT    4
+#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK     0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT    5
+#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK    0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT   6
+#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK   0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT  7
+       u8 flags5;
+#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK   0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT  0
+#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK   0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT  1
+#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK   0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT  2
+#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK   0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT  3
+#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK   0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT  4
+#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK  0x1
+#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK   0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT  6
+#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK   0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT  7
+       __le32 reg0 /* reg0 */;
+       __le32 reg1 /* reg1 */;
+       __le32 reg2 /* reg2 */;
+       __le32 reg3 /* reg3 */;
+       __le32 reg4 /* reg4 */;
+       __le32 reg5 /* reg5 */;
+       __le32 reg6 /* reg6 */;
+       __le32 reg7 /* reg7 */;
+       __le32 reg8 /* reg8 */;
+       u8 byte2 /* byte2 */;
+       u8 byte3 /* byte3 */;
+       __le16 rx_bd_cons /* word0 */;
+       u8 byte4 /* byte4 */;
+       u8 byte5 /* byte5 */;
+       __le16 rx_bd_prod /* word1 */;
+       __le16 word2 /* conn_dpi */;
+       __le16 word3 /* word3 */;
+       __le32 reg9 /* reg9 */;
+       __le32 reg10 /* reg10 */;
+};
+
+struct ustorm_eth_conn_ag_ctx {
+       u8 byte0 /* cdu_validation */;
+       u8 byte1 /* state */;
+       u8 flags0;
+#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK                    0x1
+#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT                   0
+#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK                    0x1
+#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT                   1
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK     0x3
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT    2
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK     0x3
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT    4
+#define USTORM_ETH_CONN_AG_CTX_CF2_MASK                     0x3
+#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT                    6
+       u8 flags1;
+#define USTORM_ETH_CONN_AG_CTX_CF3_MASK                     0x3
+#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT                    0
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK               0x3
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT              2
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK               0x3
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT              4
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK       0x3
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT      6
+       u8 flags2;
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK  0x1
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK  0x1
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK                   0x1
+#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT                  2
+#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK                   0x1
+#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT                  3
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK            0x1
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT           4
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK            0x1
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT           5
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK    0x1
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT   6
+#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK                 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT                7
+       u8 flags3;
+#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK                 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT                0
+#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK                 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT                1
+#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK                 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT                2
+#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK                 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT                3
+#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK                 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT                4
+#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK                 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT                5
+#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK                 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT                6
+#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK                 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT                7
+       u8 byte2 /* byte2 */;
+       u8 byte3 /* byte3 */;
+       __le16 word0 /* conn_dpi */;
+       __le16 tx_bd_cons /* word1 */;
+       __le32 reg0 /* reg0 */;
+       __le32 reg1 /* reg1 */;
+       __le32 reg2 /* reg2 */;
+       __le32 tx_int_coallecing_timeset /* reg3 */;
+       __le16 tx_drv_bd_cons /* word2 */;
+       __le16 rx_drv_cqe_cons /* word3 */;
+};
+
+/*
+ * The eth storm context for the Ustorm
+ */
+struct ustorm_eth_conn_st_ctx {
+       __le32 reserved[40];
+};
+
+/*
+ * The eth storm context for the Mstorm
+ */
+struct mstorm_eth_conn_st_ctx {
+       __le32 reserved[8];
+};
+
+/*
+ * eth connection context
+ */
+struct eth_conn_context {
+       struct tstorm_eth_conn_st_ctx tstorm_st_context
+           /* tstorm storm context */;
+       struct regpair tstorm_st_padding[2] /* padding */;
+       struct pstorm_eth_conn_st_ctx pstorm_st_context
+           /* pstorm storm context */;
+       struct xstorm_eth_conn_st_ctx xstorm_st_context
+           /* xstorm storm context */;
+       struct xstorm_eth_conn_ag_ctx xstorm_ag_context
+           /* xstorm aggregative context */;
+       struct ystorm_eth_conn_st_ctx ystorm_st_context
+           /* ystorm storm context */;
+       struct ystorm_eth_conn_ag_ctx ystorm_ag_context
+           /* ystorm aggregative context */;
+       struct tstorm_eth_conn_ag_ctx tstorm_ag_context
+           /* tstorm aggregative context */;
+       struct ustorm_eth_conn_ag_ctx ustorm_ag_context
+           /* ustorm aggregative context */;
+       struct ustorm_eth_conn_st_ctx ustorm_st_context
+           /* ustorm storm context */;
+       struct mstorm_eth_conn_st_ctx mstorm_st_context
+           /* mstorm storm context */;
+};
+
+/*
+ * Ethernet filter types: mac/vlan/pair
+ */
+enum eth_error_code {
+       ETH_OK = 0x00 /* command succeeded */,
+       ETH_FILTERS_MAC_ADD_FAIL_FULL
+           /* mac add filters command failed due to cam full state */,
+       ETH_FILTERS_MAC_ADD_FAIL_FULL_MTT2
+           /* mac add filters command failed due to mtt2 full state */,
+       ETH_FILTERS_MAC_ADD_FAIL_DUP_MTT2
+           /* mac add filters command failed due to duplicate mac address */,
+       ETH_FILTERS_MAC_ADD_FAIL_DUP_STT2
+           /* mac add filters command failed due to duplicate mac address */,
+       ETH_FILTERS_MAC_DEL_FAIL_NOF
+           /* mac delete filters command failed due to not found state */,
+       ETH_FILTERS_MAC_DEL_FAIL_NOF_MTT2
+           /* mac delete filters command failed due to not found state */,
+       ETH_FILTERS_MAC_DEL_FAIL_NOF_STT2
+           /* mac delete filters command failed due to not found state */,
+       ETH_FILTERS_MAC_ADD_FAIL_ZERO_MAC
+           /* mac add filters command failed due to MAC Address of
+            * 00:00:00:00:00:00
+            */
+           ,
+       ETH_FILTERS_VLAN_ADD_FAIL_FULL
+           /* vlan add filters command failed due to cam full state */,
+       ETH_FILTERS_VLAN_ADD_FAIL_DUP
+           /* vlan add filters command failed due to duplicate VLAN filter */,
+       ETH_FILTERS_VLAN_DEL_FAIL_NOF
+           /* vlan delete filters command failed due to not found state */,
+       ETH_FILTERS_VLAN_DEL_FAIL_NOF_TT1
+           /* vlan delete filters command failed due to not found state */,
+       ETH_FILTERS_PAIR_ADD_FAIL_DUP
+           /* pair add filters command failed due to duplicate request */,
+       ETH_FILTERS_PAIR_ADD_FAIL_FULL
+           /* pair add filters command failed due to full state */,
+       ETH_FILTERS_PAIR_ADD_FAIL_FULL_MAC
+           /* pair add filters command failed due to full state */,
+       ETH_FILTERS_PAIR_DEL_FAIL_NOF
+           /* pair add filters command failed due not found state */,
+       ETH_FILTERS_PAIR_DEL_FAIL_NOF_TT1
+           /* pair add filters command failed due not found state */,
+       ETH_FILTERS_PAIR_ADD_FAIL_ZERO_MAC
+           /* pair add filters command failed due to MAC Address of
+            * 00:00:00:00:00:00
+            */
+           ,
+       ETH_FILTERS_VNI_ADD_FAIL_FULL
+           /* vni add filters command failed due to cam full state */,
+       ETH_FILTERS_VNI_ADD_FAIL_DUP
+           /* vni add filters command failed due to duplicate VNI filter */,
+       MAX_ETH_ERROR_CODE
+};
+
+/*
+ * opcodes for the event ring
+ */
+enum eth_event_opcode {
+       ETH_EVENT_UNUSED,
+       ETH_EVENT_VPORT_START,
+       ETH_EVENT_VPORT_UPDATE,
+       ETH_EVENT_VPORT_STOP,
+       ETH_EVENT_TX_QUEUE_START,
+       ETH_EVENT_TX_QUEUE_STOP,
+       ETH_EVENT_RX_QUEUE_START,
+       ETH_EVENT_RX_QUEUE_UPDATE,
+       ETH_EVENT_RX_QUEUE_STOP,
+       ETH_EVENT_FILTERS_UPDATE,
+       ETH_EVENT_RX_ADD_OPENFLOW_FILTER,
+       ETH_EVENT_RX_DELETE_OPENFLOW_FILTER,
+       ETH_EVENT_RX_CREATE_OPENFLOW_ACTION,
+       ETH_EVENT_RX_ADD_UDP_FILTER,
+       ETH_EVENT_RX_DELETE_UDP_FILTER,
+       ETH_EVENT_RX_ADD_GFT_FILTER,
+       ETH_EVENT_RX_DELETE_GFT_FILTER,
+       ETH_EVENT_RX_CREATE_GFT_ACTION,
+       MAX_ETH_EVENT_OPCODE
+};
+
+/*
+ * Classify rule types in E2/E3
+ */
+enum eth_filter_action {
+       ETH_FILTER_ACTION_UNUSED,
+       ETH_FILTER_ACTION_REMOVE,
+       ETH_FILTER_ACTION_ADD,
+       ETH_FILTER_ACTION_REMOVE_ALL
+           /* Remove all filters of given type and vport ID. */,
+       MAX_ETH_FILTER_ACTION
+};
+
+/*
+ * Command for adding/removing a classification rule $$KEEP_ENDIANNESS$$
+ */
+struct eth_filter_cmd {
+       u8 type /* Filter Type (MAC/VLAN/Pair/VNI) */;
+       u8 vport_id /* the vport id */;
+       u8 action /* filter command action: add/remove/replace */;
+       u8 reserved0;
+       __le32 vni;
+       __le16 mac_lsb;
+       __le16 mac_mid;
+       __le16 mac_msb;
+       __le16 vlan_id;
+};
+
+/*
+ *  $$KEEP_ENDIANNESS$$
+ */
+struct eth_filter_cmd_header {
+       u8 rx /* If set, apply these commands to the RX path */;
+       u8 tx /* If set, apply these commands to the TX path */;
+       u8 cmd_cnt /* Number of filter commands */;
+       u8 assert_on_error;
+       u8 reserved1[4];
+};
+
+/*
+ * Ethernet filter types: mac/vlan/pair
+ */
+enum eth_filter_type {
+       ETH_FILTER_TYPE_UNUSED,
+       ETH_FILTER_TYPE_MAC /* Add/remove a MAC address */,
+       ETH_FILTER_TYPE_VLAN /* Add/remove a VLAN */,
+       ETH_FILTER_TYPE_PAIR /* Add/remove a MAC-VLAN pair */,
+       ETH_FILTER_TYPE_INNER_MAC /* Add/remove a inner MAC address */,
+       ETH_FILTER_TYPE_INNER_VLAN /* Add/remove a inner VLAN */,
+       ETH_FILTER_TYPE_INNER_PAIR /* Add/remove a inner MAC-VLAN pair */,
+       ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR /* Add/remove a inner MAC-VNI pair */
+           ,
+       ETH_FILTER_TYPE_MAC_VNI_PAIR /* Add/remove a MAC-VNI pair */,
+       ETH_FILTER_TYPE_VNI /* Add/remove a VNI */,
+       MAX_ETH_FILTER_TYPE
+};
+
+/*
+ * eth IPv4 Fragment Type
+ */
+enum eth_ipv4_frag_type {
+       ETH_IPV4_NOT_FRAG /* IPV4 Packet Not Fragmented */,
+       ETH_IPV4_FIRST_FRAG
+           /* First Fragment of IPv4 Packet (contains headers) */,
+       ETH_IPV4_NON_FIRST_FRAG
+           /* Non-First Fragment of IPv4 Packet (does not contain headers) */,
+       MAX_ETH_IPV4_FRAG_TYPE
+};
+
+/*
+ * eth IPv4 Fragment Type
+ */
+enum eth_ip_type {
+       ETH_IPV4 /* IPv4 */,
+       ETH_IPV6 /* IPv6 */,
+       MAX_ETH_IP_TYPE
+};
+
+/*
+ * Ethernet Ramrod Command IDs
+ */
+enum eth_ramrod_cmd_id {
+       ETH_RAMROD_UNUSED,
+       ETH_RAMROD_VPORT_START /* VPort Start Ramrod */,
+       ETH_RAMROD_VPORT_UPDATE /* VPort Update Ramrod */,
+       ETH_RAMROD_VPORT_STOP /* VPort Stop Ramrod */,
+       ETH_RAMROD_RX_QUEUE_START /* RX Queue Start Ramrod */,
+       ETH_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */,
+       ETH_RAMROD_TX_QUEUE_START /* TX Queue Start Ramrod */,
+       ETH_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */,
+       ETH_RAMROD_FILTERS_UPDATE /* Add or Remove Mac/Vlan/Pair filters */,
+       ETH_RAMROD_RX_QUEUE_UPDATE /* RX Queue Update Ramrod */,
+       ETH_RAMROD_RX_CREATE_OPENFLOW_ACTION
+           /* RX - Create an Openflow Action */,
+       ETH_RAMROD_RX_ADD_OPENFLOW_FILTER
+           /* RX - Add an Openflow Filter to the Searcher */,
+       ETH_RAMROD_RX_DELETE_OPENFLOW_FILTER
+           /* RX - Delete an Openflow Filter to the Searcher */,
+       ETH_RAMROD_RX_ADD_UDP_FILTER /* RX - Add a UDP Filter to the Searcher */
+           ,
+       ETH_RAMROD_RX_DELETE_UDP_FILTER
+           /* RX - Delete a UDP Filter to the Searcher */,
+       ETH_RAMROD_RX_CREATE_GFT_ACTION /* RX - Create an Gft Action */,
+       ETH_RAMROD_RX_DELETE_GFT_FILTER
+           /* RX - Delete an GFT Filter to the Searcher */,
+       ETH_RAMROD_RX_ADD_GFT_FILTER
+           /* RX - Add an GFT Filter to the Searcher */,
+       MAX_ETH_RAMROD_CMD_ID
+};
+
+/*
+ * return code from eth sp ramrods
+ */
+struct eth_return_code {
+       u8 value;
+#define ETH_RETURN_CODE_ERR_CODE_MASK  0x1F
+#define ETH_RETURN_CODE_ERR_CODE_SHIFT 0
+#define ETH_RETURN_CODE_RESERVED_MASK  0x3
+#define ETH_RETURN_CODE_RESERVED_SHIFT 5
+#define ETH_RETURN_CODE_RX_TX_MASK     0x1
+#define ETH_RETURN_CODE_RX_TX_SHIFT    7
+};
+
+/*
+ * What to do in case an error occurs
+ */
+enum eth_tx_err {
+       ETH_TX_ERR_DROP /* Drop erroneous packet. */,
+       ETH_TX_ERR_ASSERT_MALICIOUS
+           /* Assert an interrupt for PF, declare as malicious for VF */,
+       MAX_ETH_TX_ERR
+};
+
+/*
+ * Array of the different error type behaviors
+ */
+struct eth_tx_err_vals {
+       __le16 values;
+#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_MASK            0x1
+#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_SHIFT           0
+#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_MASK             0x1
+#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_SHIFT            1
+#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_MASK            0x1
+#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_SHIFT           2
+#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_MASK          0x1
+#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_SHIFT         3
+#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_MASK  0x1
+#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_SHIFT 4
+#define ETH_TX_ERR_VALS_MTU_VIOLATION_MASK                0x1
+#define ETH_TX_ERR_VALS_MTU_VIOLATION_SHIFT               5
+#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_MASK        0x1
+#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_SHIFT       6
+#define ETH_TX_ERR_VALS_RESERVED_MASK                     0x1FF
+#define ETH_TX_ERR_VALS_RESERVED_SHIFT                    7
+};
+
+/*
+ * vport rss configuration data
+ */
+struct eth_vport_rss_config {
+       __le16 capabilities;
+#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_MASK        0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_SHIFT       0
+#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_MASK        0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_SHIFT       1
+#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_MASK    0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_SHIFT   2
+#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_MASK    0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_SHIFT   3
+#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_MASK    0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_SHIFT   4
+#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_MASK    0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_SHIFT   5
+#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_MASK  0x1
+#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_SHIFT 6
+#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK              0x1FF
+#define ETH_VPORT_RSS_CONFIG_RESERVED0_SHIFT             7
+       u8 rss_id;
+       u8 rss_mode /* The RSS mode for this function */;
+       u8 update_rss_key /* if set update the rss key */;
+       u8 update_rss_ind_table /* if set update the indirection table */;
+       u8 update_rss_capabilities /* if set update the capabilities */;
+       u8 tbl_size /* rss mask (Tbl size) */;
+       __le32 reserved2[2];
+       __le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM]
+           /* RSS indirection table */;
+       __le32 rss_key[ETH_RSS_KEY_SIZE_REGS] /* RSS key supplied to us by OS */
+          ;
+       __le32 reserved3[2];
+};
+
+/*
+ * eth vport RSS mode
+ */
+enum eth_vport_rss_mode {
+       ETH_VPORT_RSS_MODE_DISABLED /* RSS Disabled */,
+       ETH_VPORT_RSS_MODE_REGULAR /* Regular (ndis-like) RSS */,
+       MAX_ETH_VPORT_RSS_MODE
+};
+
+/*
+ * Command for setting classification flags for a vport $$KEEP_ENDIANNESS$$
+ */
+struct eth_vport_rx_mode {
+       __le16 state;
+#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_MASK          0x1
+#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_SHIFT         0
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_MASK        0x1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_SHIFT       1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_MASK  0x1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_SHIFT 2
+#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_MASK          0x1
+#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_SHIFT         3
+#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_MASK        0x1
+#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_SHIFT       4
+#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_MASK        0x1
+#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT       5
+#define ETH_VPORT_RX_MODE_RESERVED1_MASK               0x3FF
+#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT              6
+       __le16 reserved2[3];
+};
+
+/*
+ * Command for setting tpa parameters
+ */
+struct eth_vport_tpa_param {
+       u8 tpa_ipv4_en_flg /* Enable TPA for IPv4 packets */;
+       u8 tpa_ipv6_en_flg /* Enable TPA for IPv6 packets */;
+       u8 tpa_ipv4_tunn_en_flg /* Enable TPA for IPv4 over tunnel */;
+       u8 tpa_ipv6_tunn_en_flg /* Enable TPA for IPv6 over tunnel */;
+       u8 tpa_pkt_split_flg;
+       u8 tpa_hdr_data_split_flg
+           /* If set, put header of first TPA segment on bd and data on SGE */
+          ;
+       u8 tpa_gro_consistent_flg
+           /* If set, GRO data consistent will checked for TPA continue */;
+       u8 tpa_max_aggs_num
+           /* maximum number of opened aggregations per v-port  */;
+       __le16 tpa_max_size /* maximal size for the aggregated TPA packets */;
+       __le16 tpa_min_size_to_start
+           /* minimum TCP payload size for a packet to start aggregation */;
+       __le16 tpa_min_size_to_cont
+           /* minimum TCP payload size for a packet to continue aggregation */
+          ;
+       u8 max_buff_num
+           /* maximal number of buffers that can be used for one aggregation */
+          ;
+       u8 reserved;
+};
+
+/*
+ * Command for setting classification flags for a vport $$KEEP_ENDIANNESS$$
+ */
+struct eth_vport_tx_mode {
+       __le16 state;
+#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_MASK    0x1
+#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_SHIFT   0
+#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_MASK  0x1
+#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_SHIFT 1
+#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_MASK    0x1
+#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_SHIFT   2
+#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_MASK  0x1
+#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_SHIFT 3
+#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_MASK  0x1
+#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_SHIFT 4
+#define ETH_VPORT_TX_MODE_RESERVED1_MASK         0x7FF
+#define ETH_VPORT_TX_MODE_RESERVED1_SHIFT        5
+       __le16 reserved2[3];
+};
+
+/*
+ * Ramrod data for rx add gft filter data
+ */
+struct rx_add_gft_filter_data {
+       struct regpair pkt_hdr_addr /* Packet Header That Defines GFT Filter */
+          ;
+       __le16 action_icid /* ICID of Action to run for this filter */;
+       __le16 pkt_hdr_length /* Packet Header Length */;
+       u8 reserved[4];
+};
+
+/*
+ * Ramrod data for rx add openflow filter
+ */
+struct rx_add_openflow_filter_data {
+       __le16 action_icid /* CID of Action to run for this filter */;
+       u8 priority /* Searcher String - Packet priority */;
+       u8 reserved0;
+       __le32 tenant_id /* Searcher String - Tenant ID */;
+       __le16 dst_mac_hi /* Searcher String - Destination Mac Bytes 0 to 1 */;
+       __le16 dst_mac_mid /* Searcher String - Destination Mac Bytes 2 to 3 */
+          ;
+       __le16 dst_mac_lo /* Searcher String - Destination Mac Bytes 4 to 5 */;
+       __le16 src_mac_hi /* Searcher String - Source Mac 0 to 1 */;
+       __le16 src_mac_mid /* Searcher String - Source Mac 2 to 3 */;
+       __le16 src_mac_lo /* Searcher String - Source Mac 4 to 5 */;
+       __le16 vlan_id /* Searcher String - Vlan ID */;
+       __le16 l2_eth_type /* Searcher String - Last L2 Ethertype */;
+       u8 ipv4_dscp /* Searcher String - IPv4 6 MSBs of the TOS Field */;
+       u8 ipv4_frag_type /* Searcher String - IPv4 Fragmentation Type */;
+       u8 ipv4_over_ip /* Searcher String - IPv4 Over IP Type */;
+       u8 tenant_id_exists /* Searcher String - Tenant ID Exists */;
+       __le32 ipv4_dst_addr /* Searcher String - IPv4 Destination Address */;
+       __le32 ipv4_src_addr /* Searcher String - IPv4 Source Address */;
+       __le16 l4_dst_port /* Searcher String - TCP/UDP Destination Port */;
+       __le16 l4_src_port /* Searcher String - TCP/UDP Source Port */;
+};
+
+/*
+ * Ramrod data for rx create gft action
+ */
+struct rx_create_gft_action_data {
+       u8 vport_id /* Vport Id of GFT Action  */;
+       u8 reserved[7];
+};
+
+/*
+ * Ramrod data for rx create openflow action
+ */
+struct rx_create_openflow_action_data {
+       u8 vport_id /* ID of RX queue */;
+       u8 reserved[7];
+};
+
+/*
+ * Ramrod data for rx queue start ramrod
+ */
+struct rx_queue_start_ramrod_data {
+       __le16 rx_queue_id /* ID of RX queue */;
+       __le16 num_of_pbl_pages /* Num of pages in CQE PBL */;
+       __le16 bd_max_bytes /* maximal bytes that can be places on the bd */;
+       __le16 sb_id /* Status block ID */;
+       u8 sb_index /* index of the protocol index */;
+       u8 vport_id /* ID of virtual port */;
+       u8 default_rss_queue_flg /* set queue as default rss queue if set */;
+       u8 complete_cqe_flg /* post completion to the CQE ring if set */;
+       u8 complete_event_flg /* post completion to the event ring if set */;
+       u8 stats_counter_id /* Statistics counter ID */;
+       u8 pin_context /* Pin context in CCFC to improve performance */;
+       u8 pxp_tph_valid_bd /* PXP command TPH Valid - for BD/SGE fetch */;
+       u8 pxp_tph_valid_pkt /* PXP command TPH Valid - for packet placement */
+          ;
+       u8 pxp_st_hint
+           /* PXP command Steering tag hint. Use enum pxp_tph_st_hint */;
+       __le16 pxp_st_index /* PXP command Steering tag index */;
+       u8 pmd_mode
+           /* Indicates that current queue belongs to poll-mode driver */;
+       u8 notify_en;
+       u8 toggle_val
+           /* Initial value for the toggle valid bit - used in PMD mode */;
+       u8 reserved[7];
+       __le16 reserved1 /* FW reserved. */;
+       struct regpair cqe_pbl_addr /* Base address on host of CQE PBL */;
+       struct regpair bd_base /* bd address of the first bd page */;
+       struct regpair reserved2 /* FW reserved. */;
+};
+
+/*
+ * Ramrod data for rx queue start ramrod
+ */
+struct rx_queue_stop_ramrod_data {
+       __le16 rx_queue_id /* ID of RX queue */;
+       u8 complete_cqe_flg /* post completion to the CQE ring if set */;
+       u8 complete_event_flg /* post completion to the event ring if set */;
+       u8 vport_id /* ID of virtual port */;
+       u8 reserved[3];
+};
+
+/*
+ * Ramrod data for rx queue update ramrod
+ */
+struct rx_queue_update_ramrod_data {
+       __le16 rx_queue_id /* ID of RX queue */;
+       u8 complete_cqe_flg /* post completion to the CQE ring if set */;
+       u8 complete_event_flg /* post completion to the event ring if set */;
+       u8 vport_id /* ID of virtual port */;
+       u8 reserved[4];
+       u8 reserved1 /* FW reserved. */;
+       u8 reserved2 /* FW reserved. */;
+       u8 reserved3 /* FW reserved. */;
+       __le16 reserved4 /* FW reserved. */;
+       __le16 reserved5 /* FW reserved. */;
+       struct regpair reserved6 /* FW reserved. */;
+};
+
+/*
+ * Ramrod data for rx Add UDP Filter
+ */
+struct rx_udp_filter_data {
+       __le16 action_icid /* CID of Action to run for this filter */;
+       __le16 vlan_id /* Searcher String - Vlan ID */;
+       u8 ip_type /* Searcher String - IP Type */;
+       u8 tenant_id_exists /* Searcher String - Tenant ID Exists */;
+       __le16 reserved1;
+       __le32 ip_dst_addr[4];
+           /* Searcher String-IP Dest Addr for IPv4 use ip_dst_addr[0] only */
+          ;
+       __le32 ip_src_addr[4]
+           /* Searcher String-IP Src Addr, for IPv4 use ip_dst_addr[0] only */
+          ;
+       __le16 udp_dst_port /* Searcher String - UDP Destination Port */;
+       __le16 udp_src_port /* Searcher String - UDP Source Port */;
+       __le32 tenant_id /* Searcher String - Tenant ID */;
+};
+
+/*
+ * Ramrod data for rx queue start ramrod
+ */
+struct tx_queue_start_ramrod_data {
+       __le16 sb_id /* Status block ID */;
+       u8 sb_index /* Status block protocol index */;
+       u8 vport_id /* VPort ID */;
+       u8 reserved0 /* FW reserved. */;
+       u8 stats_counter_id /* Statistics counter ID to use */;
+       __le16 qm_pq_id /* QM PQ ID */;
+       u8 flags;
+#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_MASK  0x1
+#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_SHIFT 0
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_MASK      0x1
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_SHIFT     1
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_MASK      0x1
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_SHIFT     2
+#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_MASK               0x1
+#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_SHIFT              3
+#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_MASK              0x1
+#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_SHIFT             4
+#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_MASK            0x1
+#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_SHIFT           5
+#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_MASK              0x3
+#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_SHIFT             6
+       u8 pxp_st_hint /* PXP command Steering tag hint */;
+       u8 pxp_tph_valid_bd /* PXP command TPH Valid - for BD fetch */;
+       u8 pxp_tph_valid_pkt /* PXP command TPH Valid - for packet fetch */;
+       __le16 pxp_st_index /* PXP command Steering tag index */;
+       __le16 comp_agg_size /* TX completion min agg size - for PMD queues */;
+       __le16 queue_zone_id /* queue zone ID to use */;
+       __le16 test_dup_count /* In Test Mode, number of duplications */;
+       __le16 pbl_size /* Number of BD pages pointed by PBL */;
+       __le16 tx_queue_id
+           /* unique Queue ID - currently used only by PMD flow */;
+       struct regpair pbl_base_addr /* address of the pbl page */;
+       struct regpair bd_cons_address
+           /* BD consumer address in host - for PMD queues */;
+};
+
+/*
+ * Ramrod data for tx queue stop ramrod
+ */
+struct tx_queue_stop_ramrod_data {
+       __le16 reserved[4];
+};
+
+/*
+ * Ramrod data for vport update ramrod
+ */
+struct vport_filter_update_ramrod_data {
+       struct eth_filter_cmd_header filter_cmd_hdr
+           /* Header for Filter Commands (RX/TX, Add/Remove/Replace, etc) */;
+       struct eth_filter_cmd filter_cmds[ETH_FILTER_RULES_COUNT]
+           /* Filter Commands */;
+};
+
+/*
+ * Ramrod data for vport start ramrod
+ */
+struct vport_start_ramrod_data {
+       u8 vport_id;
+       u8 sw_fid;
+       __le16 mtu;
+       u8 drop_ttl0_en /* if set, drop packet with ttl=0 */;
+       u8 inner_vlan_removal_en;
+       struct eth_vport_rx_mode rx_mode /* Rx filter data */;
+       struct eth_vport_tx_mode tx_mode /* Tx filter data */;
+       struct eth_vport_tpa_param tpa_param /* TPA configuration parameters */
+          ;
+       __le16 default_vlan /* Default Vlan value to be forced by FW */;
+       u8 tx_switching_en /* Tx switching is enabled for current Vport */;
+       u8 anti_spoofing_en
+           /* Anti-spoofing verification is set for current Vport */;
+       u8 default_vlan_en
+           /* If set, the default Vlan value is forced by the FW */;
+       u8 handle_ptp_pkts /* If set, the vport handles PTP Timesync Packets */
+          ;
+       u8 silent_vlan_removal_en;
+       /* If enable then innerVlan will be striped and not written to cqe */
+       u8 untagged;
+       struct eth_tx_err_vals tx_err_behav
+           /* Desired behavior per TX error type */;
+       u8 zero_placement_offset;
+       u8 reserved[7];
+};
+
+/*
+ * Ramrod data for vport stop ramrod
+ */
+struct vport_stop_ramrod_data {
+       u8 vport_id;
+       u8 reserved[7];
+};
+
+/*
+ * Ramrod data for vport update ramrod
+ */
+struct vport_update_ramrod_data_cmn {
+       u8 vport_id;
+       u8 update_rx_active_flg /* set if rx active flag should be handled */;
+       u8 rx_active_flg /* rx active flag value */;
+       u8 update_tx_active_flg /* set if tx active flag should be handled */;
+       u8 tx_active_flg /* tx active flag value */;
+       u8 update_rx_mode_flg /* set if rx state data should be handled */;
+       u8 update_tx_mode_flg /* set if tx state data should be handled */;
+       u8 update_approx_mcast_flg
+           /* set if approx. mcast data should be handled */;
+       u8 update_rss_flg /* set if rss data should be handled  */;
+       u8 update_inner_vlan_removal_en_flg
+           /* set if inner_vlan_removal_en should be handled */;
+       u8 inner_vlan_removal_en;
+       u8 update_tpa_param_flg;
+       u8 update_tpa_en_flg /* set if tpa enable changes */;
+       u8 update_tx_switching_en_flg
+           /* set if tx switching en flag should be handled */;
+       u8 tx_switching_en /* tx switching en value */;
+       u8 update_anti_spoofing_en_flg
+           /* set if anti spoofing flag should be handled */;
+       u8 anti_spoofing_en /* Anti-spoofing verification en value */;
+       u8 update_handle_ptp_pkts
+           /* set if handle_ptp_pkts should be handled. */;
+       u8 handle_ptp_pkts /* If set, the vport handles PTP Timesync Packets */
+          ;
+       u8 update_default_vlan_en_flg
+           /* If set, the default Vlan enable flag is updated */;
+       u8 default_vlan_en
+           /* If set, the default Vlan value is forced by the FW */;
+       u8 update_default_vlan_flg
+           /* If set, the default Vlan value is updated */;
+       __le16 default_vlan /* Default Vlan value to be forced by FW */;
+       u8 update_accept_any_vlan_flg
+           /* set if accept_any_vlan should be handled */;
+       u8 accept_any_vlan /* accept_any_vlan updated value */;
+       u8 silent_vlan_removal_en;
+       u8 update_mtu_flg
+           /* If set, MTU will be updated. Vport must be not active. */;
+       __le16 mtu /* New MTU value. Used if update_mtu_flg are set */;
+       u8 reserved[2];
+};
+
+struct vport_update_ramrod_mcast {
+       __le32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS] /* multicast bins */;
+};
+
+/*
+ * Ramrod data for vport update ramrod
+ */
+struct vport_update_ramrod_data {
+       struct vport_update_ramrod_data_cmn common
+           /* Common data for all vport update ramrods */;
+       struct eth_vport_rx_mode rx_mode /* vport rx mode bitmap */;
+       struct eth_vport_tx_mode tx_mode /* vport tx mode bitmap */;
+       struct eth_vport_tpa_param tpa_param /* TPA configuration parameters */
+          ;
+       struct vport_update_ramrod_mcast approx_mcast;
+       struct eth_vport_rss_config rss_config /* rss config data */;
+};
+
+/*
+ * GFT CAM line struct
+ */
+struct gft_cam_line {
+       __le32 camline;
+#define GFT_CAM_LINE_VALID_MASK      0x1
+#define GFT_CAM_LINE_VALID_SHIFT     0
+#define GFT_CAM_LINE_DATA_MASK       0x3FFF
+#define GFT_CAM_LINE_DATA_SHIFT      1
+#define GFT_CAM_LINE_MASK_BITS_MASK  0x3FFF
+#define GFT_CAM_LINE_MASK_BITS_SHIFT 15
+#define GFT_CAM_LINE_RESERVED1_MASK  0x7
+#define GFT_CAM_LINE_RESERVED1_SHIFT 29
+};
+
+/*
+ * GFT CAM line struct (for driversim use)
+ */
+struct gft_cam_line_mapped {
+       __le32 camline;
+#define GFT_CAM_LINE_MAPPED_VALID_MASK                     0x1
+#define GFT_CAM_LINE_MAPPED_VALID_SHIFT                    0
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK                0x1
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_SHIFT               1
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK         0x1
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_SHIFT        2
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK       0xF
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_SHIFT      3
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK               0xF
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_SHIFT              7
+#define GFT_CAM_LINE_MAPPED_PF_ID_MASK                     0xF
+#define GFT_CAM_LINE_MAPPED_PF_ID_SHIFT                    11
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_MASK           0x1
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_SHIFT          15
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_MASK    0x1
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_SHIFT   16
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK  0xF
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_SHIFT 17
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_MASK          0xF
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_SHIFT         21
+#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK                0xF
+#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_SHIFT               25
+#define GFT_CAM_LINE_MAPPED_RESERVED1_MASK                 0x7
+#define GFT_CAM_LINE_MAPPED_RESERVED1_SHIFT                29
+};
+
+union gft_cam_line_union {
+       struct gft_cam_line cam_line;
+       struct gft_cam_line_mapped cam_line_mapped;
+};
+
+/*
+ * Used in gft_profile_key: Indication for ip version
+ */
+enum gft_profile_ip_version {
+       GFT_PROFILE_IPV4 = 0,
+       GFT_PROFILE_IPV6 = 1,
+       MAX_GFT_PROFILE_IP_VERSION
+};
+
+/*
+ * Profile key stucr fot GFT logic in Prs
+ */
+struct gft_profile_key {
+       __le16 profile_key;
+#define GFT_PROFILE_KEY_IP_VERSION_MASK           0x1
+#define GFT_PROFILE_KEY_IP_VERSION_SHIFT          0
+#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_MASK    0x1
+#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_SHIFT   1
+#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_MASK  0xF
+#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_SHIFT 2
+#define GFT_PROFILE_KEY_TUNNEL_TYPE_MASK          0xF
+#define GFT_PROFILE_KEY_TUNNEL_TYPE_SHIFT         6
+#define GFT_PROFILE_KEY_PF_ID_MASK                0xF
+#define GFT_PROFILE_KEY_PF_ID_SHIFT               10
+#define GFT_PROFILE_KEY_RESERVED0_MASK            0x3
+#define GFT_PROFILE_KEY_RESERVED0_SHIFT           14
+};
+
+/*
+ * Used in gft_profile_key: Indication for tunnel type
+ */
+enum gft_profile_tunnel_type {
+       GFT_PROFILE_NO_TUNNEL = 0,
+       GFT_PROFILE_VXLAN_TUNNEL = 1,
+       GFT_PROFILE_GRE_MAC_OR_NVGRE_TUNNEL = 2,
+       GFT_PROFILE_GRE_IP_TUNNEL = 3,
+       GFT_PROFILE_GENEVE_MAC_TUNNEL = 4,
+       GFT_PROFILE_GENEVE_IP_TUNNEL = 5,
+       MAX_GFT_PROFILE_TUNNEL_TYPE
+};
+
+/*
+ * Used in gft_profile_key: Indication for protocol type
+ */
+enum gft_profile_upper_protocol_type {
+       GFT_PROFILE_ROCE_PROTOCOL = 0,
+       GFT_PROFILE_RROCE_PROTOCOL = 1,
+       GFT_PROFILE_FCOE_PROTOCOL = 2,
+       GFT_PROFILE_ICMP_PROTOCOL = 3,
+       GFT_PROFILE_ARP_PROTOCOL = 4,
+       GFT_PROFILE_USER_TCP_SRC_PORT_1_INNER = 5,
+       GFT_PROFILE_USER_TCP_DST_PORT_1_INNER = 6,
+       GFT_PROFILE_TCP_PROTOCOL = 7,
+       GFT_PROFILE_USER_UDP_DST_PORT_1_INNER = 8,
+       GFT_PROFILE_USER_UDP_DST_PORT_2_OUTER = 9,
+       GFT_PROFILE_UDP_PROTOCOL = 10,
+       GFT_PROFILE_USER_IP_1_INNER = 11,
+       GFT_PROFILE_USER_IP_2_OUTER = 12,
+       GFT_PROFILE_USER_ETH_1_INNER = 13,
+       GFT_PROFILE_USER_ETH_2_OUTER = 14,
+       GFT_PROFILE_RAW = 15,
+       MAX_GFT_PROFILE_UPPER_PROTOCOL_TYPE
+};
+
+/*
+ * GFT RAM line struct
+ */
+struct gft_ram_line {
+       __le32 low32bits;
+#define GFT_RAM_LINE_VLAN_SELECT_MASK              0x3
+#define GFT_RAM_LINE_VLAN_SELECT_SHIFT             0
+#define GFT_RAM_LINE_TUNNEL_ENTROPHY_MASK          0x1
+#define GFT_RAM_LINE_TUNNEL_ENTROPHY_SHIFT         2
+#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_MASK     0x1
+#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_SHIFT    3
+#define GFT_RAM_LINE_TUNNEL_TTL_MASK               0x1
+#define GFT_RAM_LINE_TUNNEL_TTL_SHIFT              4
+#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_MASK         0x1
+#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_SHIFT        5
+#define GFT_RAM_LINE_TUNNEL_DST_PORT_MASK          0x1
+#define GFT_RAM_LINE_TUNNEL_DST_PORT_SHIFT         6
+#define GFT_RAM_LINE_TUNNEL_SRC_PORT_MASK          0x1
+#define GFT_RAM_LINE_TUNNEL_SRC_PORT_SHIFT         7
+#define GFT_RAM_LINE_TUNNEL_DSCP_MASK              0x1
+#define GFT_RAM_LINE_TUNNEL_DSCP_SHIFT             8
+#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_MASK  0x1
+#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_SHIFT 9
+#define GFT_RAM_LINE_TUNNEL_DST_IP_MASK            0x1
+#define GFT_RAM_LINE_TUNNEL_DST_IP_SHIFT           10
+#define GFT_RAM_LINE_TUNNEL_SRC_IP_MASK            0x1
+#define GFT_RAM_LINE_TUNNEL_SRC_IP_SHIFT           11
+#define GFT_RAM_LINE_TUNNEL_PRIORITY_MASK          0x1
+#define GFT_RAM_LINE_TUNNEL_PRIORITY_SHIFT         12
+#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_MASK     0x1
+#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_SHIFT    13
+#define GFT_RAM_LINE_TUNNEL_VLAN_MASK              0x1
+#define GFT_RAM_LINE_TUNNEL_VLAN_SHIFT             14
+#define GFT_RAM_LINE_TUNNEL_DST_MAC_MASK           0x1
+#define GFT_RAM_LINE_TUNNEL_DST_MAC_SHIFT          15
+#define GFT_RAM_LINE_TUNNEL_SRC_MAC_MASK           0x1
+#define GFT_RAM_LINE_TUNNEL_SRC_MAC_SHIFT          16
+#define GFT_RAM_LINE_TTL_EQUAL_ONE_MASK            0x1
+#define GFT_RAM_LINE_TTL_EQUAL_ONE_SHIFT           17
+#define GFT_RAM_LINE_TTL_MASK                      0x1
+#define GFT_RAM_LINE_TTL_SHIFT                     18
+#define GFT_RAM_LINE_ETHERTYPE_MASK                0x1
+#define GFT_RAM_LINE_ETHERTYPE_SHIFT               19
+#define GFT_RAM_LINE_RESERVED0_MASK                0x1
+#define GFT_RAM_LINE_RESERVED0_SHIFT               20
+#define GFT_RAM_LINE_TCP_FLAG_FIN_MASK             0x1
+#define GFT_RAM_LINE_TCP_FLAG_FIN_SHIFT            21
+#define GFT_RAM_LINE_TCP_FLAG_SYN_MASK             0x1
+#define GFT_RAM_LINE_TCP_FLAG_SYN_SHIFT            22
+#define GFT_RAM_LINE_TCP_FLAG_RST_MASK             0x1
+#define GFT_RAM_LINE_TCP_FLAG_RST_SHIFT            23
+#define GFT_RAM_LINE_TCP_FLAG_PSH_MASK             0x1
+#define GFT_RAM_LINE_TCP_FLAG_PSH_SHIFT            24
+#define GFT_RAM_LINE_TCP_FLAG_ACK_MASK             0x1
+#define GFT_RAM_LINE_TCP_FLAG_ACK_SHIFT            25
+#define GFT_RAM_LINE_TCP_FLAG_URG_MASK             0x1
+#define GFT_RAM_LINE_TCP_FLAG_URG_SHIFT            26
+#define GFT_RAM_LINE_TCP_FLAG_ECE_MASK             0x1
+#define GFT_RAM_LINE_TCP_FLAG_ECE_SHIFT            27
+#define GFT_RAM_LINE_TCP_FLAG_CWR_MASK             0x1
+#define GFT_RAM_LINE_TCP_FLAG_CWR_SHIFT            28
+#define GFT_RAM_LINE_TCP_FLAG_NS_MASK              0x1
+#define GFT_RAM_LINE_TCP_FLAG_NS_SHIFT             29
+#define GFT_RAM_LINE_DST_PORT_MASK                 0x1
+#define GFT_RAM_LINE_DST_PORT_SHIFT                30
+#define GFT_RAM_LINE_SRC_PORT_MASK                 0x1
+#define GFT_RAM_LINE_SRC_PORT_SHIFT                31
+       __le32 high32bits;
+#define GFT_RAM_LINE_DSCP_MASK                     0x1
+#define GFT_RAM_LINE_DSCP_SHIFT                    0
+#define GFT_RAM_LINE_OVER_IP_PROTOCOL_MASK         0x1
+#define GFT_RAM_LINE_OVER_IP_PROTOCOL_SHIFT        1
+#define GFT_RAM_LINE_DST_IP_MASK                   0x1
+#define GFT_RAM_LINE_DST_IP_SHIFT                  2
+#define GFT_RAM_LINE_SRC_IP_MASK                   0x1
+#define GFT_RAM_LINE_SRC_IP_SHIFT                  3
+#define GFT_RAM_LINE_PRIORITY_MASK                 0x1
+#define GFT_RAM_LINE_PRIORITY_SHIFT                4
+#define GFT_RAM_LINE_PROVIDER_VLAN_MASK            0x1
+#define GFT_RAM_LINE_PROVIDER_VLAN_SHIFT           5
+#define GFT_RAM_LINE_VLAN_MASK                     0x1
+#define GFT_RAM_LINE_VLAN_SHIFT                    6
+#define GFT_RAM_LINE_DST_MAC_MASK                  0x1
+#define GFT_RAM_LINE_DST_MAC_SHIFT                 7
+#define GFT_RAM_LINE_SRC_MAC_MASK                  0x1
+#define GFT_RAM_LINE_SRC_MAC_SHIFT                 8
+#define GFT_RAM_LINE_TENANT_ID_MASK                0x1
+#define GFT_RAM_LINE_TENANT_ID_SHIFT               9
+#define GFT_RAM_LINE_RESERVED1_MASK                0x3FFFFF
+#define GFT_RAM_LINE_RESERVED1_SHIFT               10
+};
+
+/*
+ * Used in the first 2 bits for gft_ram_line: Indication for vlan mask
+ */
+enum gft_vlan_select {
+       INNER_PROVIDER_VLAN = 0,
+       INNER_VLAN = 1,
+       OUTER_PROVIDER_VLAN = 2,
+       OUTER_VLAN = 3,
+       MAX_GFT_VLAN_SELECT
+};
+
+struct mstorm_eth_conn_ag_ctx {
+       u8 byte0 /* cdu_validation */;
+       u8 byte1 /* state */;
+       u8 flags0;
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK  0x1
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK          0x1
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT         1
+#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK           0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT          2
+#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK           0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT          4
+#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK           0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT          6
+       u8 flags1;
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK         0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT        0
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK         0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT        1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK         0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT        2
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK       0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT      3
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK       0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT      4
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK       0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT      5
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK       0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT      6
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK       0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT      7
+       __le16 word0 /* word0 */;
+       __le16 word1 /* word1 */;
+       __le32 reg0 /* reg0 */;
+       __le32 reg1 /* reg1 */;
+};
+
+/* @DPDK: xstormEthConnAgCtxDqExtLdPart */
+struct xstorm_eth_conn_ag_ctx_dq_ext_ld_part {
+       u8 reserved0 /* cdu_validation */;
+       u8 eth_state /* state */;
+       u8 flags0;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK            0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT           0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK               0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_SHIFT              1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK               0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_SHIFT              2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK            0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT           3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK               0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_SHIFT              4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK               0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_SHIFT              5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK               0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_SHIFT              6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK               0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_SHIFT              7
+       u8 flags1;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK               0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_SHIFT              0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK               0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_SHIFT              1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK               0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT              2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK                   0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT                  3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_MASK                   0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_SHIFT                  4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_MASK                   0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_SHIFT                  5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK          0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT         6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK            0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_SHIFT           7
+       u8 flags2;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK                     0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_SHIFT                    0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK                     0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_SHIFT                    2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK                     0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_SHIFT                    4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK                     0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_SHIFT                    6
+       u8 flags3;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK                     0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_SHIFT                    0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK                     0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_SHIFT                    2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK                     0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_SHIFT                    4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK                     0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_SHIFT                    6
+       u8 flags4;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK                     0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_SHIFT                    0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK                     0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_SHIFT                    2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK                    0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_SHIFT                   4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK                    0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_SHIFT                   6
+       u8 flags5;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_MASK                    0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_SHIFT                   0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK                    0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_SHIFT                   2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK                    0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_SHIFT                   4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK                    0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_SHIFT                   6
+       u8 flags6;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK        0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_SHIFT       0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK        0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_SHIFT       2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK                   0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_SHIFT                  4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK            0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_SHIFT           6
+       u8 flags7;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK                0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_SHIFT               0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK              0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_SHIFT             2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK               0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT              4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK                   0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_SHIFT                  6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK                   0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_SHIFT                  7
+       u8 flags8;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK                   0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_SHIFT                  0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK                   0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_SHIFT                  1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK                   0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_SHIFT                  2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK                   0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_SHIFT                  3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK                   0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_SHIFT                  4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK                   0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_SHIFT                  5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK                   0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_SHIFT                  6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK                   0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_SHIFT                  7
+       u8 flags9;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK                  0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_SHIFT                 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK                  0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_SHIFT                 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_MASK                  0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_SHIFT                 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK                  0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_SHIFT                 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK                  0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_SHIFT                 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK                  0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_SHIFT                 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK     0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_SHIFT    6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK     0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_SHIFT    7
+       u8 flags10;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK                0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_SHIFT               0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK         0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_SHIFT        1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK             0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_SHIFT            2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK              0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_SHIFT             3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK            0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT           4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK  0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK              0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_SHIFT             6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK              0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_SHIFT             7
+       u8 flags11;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK              0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_SHIFT             0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK              0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_SHIFT             1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK          0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_SHIFT         2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK                 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_SHIFT                3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK                 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_SHIFT                4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK                 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_SHIFT                5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK            0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT           6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK                 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_SHIFT                7
+       u8 flags12;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK                0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_SHIFT               0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK                0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_SHIFT               1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK            0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT           2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK            0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT           3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK                0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_SHIFT               4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK                0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_SHIFT               5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK                0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_SHIFT               6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK                0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_SHIFT               7
+       u8 flags13;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK                0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_SHIFT               0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK                0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_SHIFT               1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK            0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT           2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK            0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT           3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK            0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT           4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK            0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT           5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK            0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT           6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK            0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT           7
+       u8 flags14;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK        0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_SHIFT       0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK      0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_SHIFT     1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK    0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_SHIFT   2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK    0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_SHIFT   3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK          0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_SHIFT         4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK        0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT       5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK              0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT             6
+       u8 edpm_event_id /* byte2 */;
+       __le16 physical_q0 /* physical_q0 */;
+       __le16 word1 /* physical_q1 */;
+       __le16 edpm_num_bds /* physical_q2 */;
+       __le16 tx_bd_cons /* word3 */;
+       __le16 tx_bd_prod /* word4 */;
+       __le16 go_to_bd_cons /* word5 */;
+       __le16 conn_dpi /* conn_dpi */;
+       u8 byte3 /* byte3 */;
+       u8 byte4 /* byte4 */;
+       u8 byte5 /* byte5 */;
+       u8 byte6 /* byte6 */;
+       __le32 reg0 /* reg0 */;
+       __le32 reg1 /* reg1 */;
+       __le32 reg2 /* reg2 */;
+       __le32 reg3 /* reg3 */;
+       __le32 reg4 /* reg4 */;
+};
+
+struct xstorm_eth_hw_conn_ag_ctx {
+       u8 reserved0 /* cdu_validation */;
+       u8 eth_state /* state */;
+       u8 flags0;
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT           0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK               0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT              1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK               0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT              2
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT           3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK               0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT              4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK               0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT              5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK               0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT              6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK               0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT              7
+       u8 flags1;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK               0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT              0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK               0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT              1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK               0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT              2
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT                  3
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_SHIFT                  4
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_SHIFT                  5
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK          0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT         6
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT           7
+       u8 flags2;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK                     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT                    0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK                     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT                    2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK                     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT                    4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK                     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT                    6
+       u8 flags3;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK                     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT                    0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK                     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT                    2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK                     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT                    4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK                     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT                    6
+       u8 flags4;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK                     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT                    0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK                     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT                    2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK                    0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT                   4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK                    0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT                   6
+       u8 flags5;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK                    0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT                   0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK                    0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT                   2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK                    0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT                   4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK                    0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT                   6
+       u8 flags6;
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK        0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT       0
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK        0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT       2
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK                   0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT                  4
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK            0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT           6
+       u8 flags7;
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK                0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT               0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK              0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT             2
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK               0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT              4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT                  6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT                  7
+       u8 flags8;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT                  0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT                  1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT                  2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT                  3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT                  4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT                  5
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT                  6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT                  7
+       u8 flags9;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK                  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT                 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK                  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT                 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK                  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT                 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK                  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT                 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK                  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT                 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK                  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT                 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK     0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT    6
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK     0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT    7
+       u8 flags10;
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK                0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT               0
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK         0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT        1
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK             0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT            2
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK              0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT             3
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT           4
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK              0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT             6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK              0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT             7
+       u8 flags11;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK              0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT             0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK              0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT             1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK          0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT         2
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK                 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT                3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK                 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT                4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK                 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT                5
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT           6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK                 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT                7
+       u8 flags12;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK                0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT               0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK                0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT               1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT           2
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT           3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK                0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT               4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK                0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT               5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK                0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT               6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK                0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT               7
+       u8 flags13;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK                0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT               0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK                0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT               1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT           2
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT           3
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT           4
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT           5
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT           6
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT           7
+       u8 flags14;
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK        0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT       0
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK      0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT     1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK    0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT   2
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK    0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT   3
+#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK          0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT         4
+#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK        0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT       5
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK              0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT             6
+       u8 edpm_event_id /* byte2 */;
+       __le16 physical_q0 /* physical_q0 */;
+       __le16 word1 /* physical_q1 */;
+       __le16 edpm_num_bds /* physical_q2 */;
+       __le16 tx_bd_cons /* word3 */;
+       __le16 tx_bd_prod /* word4 */;
+       __le16 go_to_bd_cons /* word5 */;
+       __le16 conn_dpi /* conn_dpi */;
+};
+
+#endif /* __ECORE_HSI_ETH__ */
diff --git a/drivers/net/qede/base/ecore_hsi_tools.h b/drivers/net/qede/base/ecore_hsi_tools.h
new file mode 100644 (file)
index 0000000..18eea76
--- /dev/null
@@ -0,0 +1,1081 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_HSI_TOOLS__
+#define __ECORE_HSI_TOOLS__
+/**********************************/
+/* Tools HSI constants and macros */
+/**********************************/
+
+/*********************************** Init ************************************/
+
+/* Width of GRC address in bits (addresses are specified in dwords) */
+#define GRC_ADDR_BITS                  23
+#define MAX_GRC_ADDR                   ((1 << GRC_ADDR_BITS) - 1)
+
+/* indicates an init that should be applied to any phase ID */
+#define ANY_PHASE_ID                   0xffff
+
+/* init pattern size in bytes */
+#define INIT_PATTERN_SIZE_BITS 4
+#define MAX_INIT_PATTERN_SIZE  (1 << INIT_PATTERN_SIZE_BITS)
+
+/* Max size in dwords of a zipped array */
+#define MAX_ZIPPED_SIZE                        8192
+
+/* Global PXP window */
+#define NUM_OF_PXP_WIN                 19
+#define PXP_WIN_DWORD_SIZE_BITS        10
+#define PXP_WIN_DWORD_SIZE             (1 << PXP_WIN_DWORD_SIZE_BITS)
+#define PXP_WIN_BYTE_SIZE_BITS (PXP_WIN_DWORD_SIZE_BITS + 2)
+#define PXP_WIN_BYTE_SIZE              (PXP_WIN_DWORD_SIZE * 4)
+
+/********************************* GRC Dump **********************************/
+
+/* width of GRC dump register sequence length in bits */
+#define DUMP_SEQ_LEN_BITS                      8
+#define DUMP_SEQ_LEN_MAX_VAL           ((1 << DUMP_SEQ_LEN_BITS) - 1)
+
+/* width of GRC dump memory length in bits */
+#define DUMP_MEM_LEN_BITS                      18
+#define DUMP_MEM_LEN_MAX_VAL           ((1 << DUMP_MEM_LEN_BITS) - 1)
+
+/* width of register type ID in bits */
+#define REG_TYPE_ID_BITS                       6
+#define REG_TYPE_ID_MAX_VAL                    ((1 << REG_TYPE_ID_BITS) - 1)
+
+/* width of block ID in bits */
+#define BLOCK_ID_BITS                          8
+#define BLOCK_ID_MAX_VAL                       ((1 << BLOCK_ID_BITS) - 1)
+
+/******************************** Idle Check *********************************/
+
+/* max number of idle check predicate immediates */
+#define MAX_IDLE_CHK_PRED_IMM          3
+
+/* max number of idle check argument registers */
+#define MAX_IDLE_CHK_READ_REGS         3
+
+/* max number of idle check loops */
+#define MAX_IDLE_CHK_LOOPS                     0x10000
+
+/* max idle check address increment */
+#define MAX_IDLE_CHK_INCREMENT         0x10000
+
+/* inicates an undefined idle check line index */
+#define IDLE_CHK_UNDEFINED_LINE_IDX    0xffffff
+
+/* max number of register values following the idle check header for LSI */
+#define IDLE_CHK_MAX_LSI_DUMP_REGS     2
+
+/* arguments for IDLE_CHK_MACRO_TYPE_QM_RD_WR */
+#define IDLE_CHK_QM_RD_WR_PTR          0
+#define IDLE_CHK_QM_RD_WR_BANK         1
+
+/**************************************/
+/* HSI Functions constants and macros */
+/**************************************/
+
+/* Number of VLAN priorities */
+#define NUM_OF_VLAN_PRIORITIES                 8
+
+/* the MCP Trace meta data signautre is duplicated in the
+ * perl script that generats the NVRAM images
+ */
+#define MCP_TRACE_META_IMAGE_SIGNATURE 0x669955aa
+
+/* Maximal number of RAM lines occupied by FW Asserts data */
+#define MAX_FW_ASSERTS_RAM_LINES               800
+
+/*
+ * Binary buffer header
+ */
+struct bin_buffer_hdr {
+       __le32 offset
+           /* buffer offset in bytes from the beginning of the binary file */;
+       __le32 length /* buffer length in bytes */;
+};
+
+/*
+ * binary buffer types
+ */
+enum bin_buffer_type {
+       BIN_BUF_FW_VER_INFO /* fw_ver_info struct */,
+       BIN_BUF_INIT_CMD /* init commands */,
+       BIN_BUF_INIT_VAL /* init data */,
+       BIN_BUF_INIT_MODE_TREE /* init modes tree */,
+       BIN_BUF_IRO /* internal RAM offsets array */,
+       MAX_BIN_BUFFER_TYPE
+};
+
+/*
+ * Chip IDs
+ */
+enum chip_ids {
+       CHIP_BB_A0 /* BB A0 chip ID */,
+       CHIP_BB_B0 /* BB B0 chip ID */,
+       CHIP_K2 /* AH chip ID */,
+       MAX_CHIP_IDS
+};
+
+/*
+ * memory dump descriptor
+ */
+struct dbg_dump_mem_desc {
+       __le32 dword0;
+#define DBG_DUMP_MEM_DESC_ADDRESS_MASK         0xFFFFFF
+#define DBG_DUMP_MEM_DESC_ADDRESS_SHIFT        0
+#define DBG_DUMP_MEM_DESC_ASIC_CHIP_MASK_MASK  0xF
+#define DBG_DUMP_MEM_DESC_ASIC_CHIP_MASK_SHIFT 24
+#define DBG_DUMP_MEM_DESC_SIM_CHIP_MASK_MASK   0xF
+#define DBG_DUMP_MEM_DESC_SIM_CHIP_MASK_SHIFT  28
+       __le32 dword1;
+#define DBG_DUMP_MEM_DESC_LENGTH_MASK          0x3FFFF
+#define DBG_DUMP_MEM_DESC_LENGTH_SHIFT         0
+#define DBG_DUMP_MEM_DESC_REG_TYPE_ID_MASK     0x3F
+#define DBG_DUMP_MEM_DESC_REG_TYPE_ID_SHIFT    18
+#define DBG_DUMP_MEM_DESC_BLOCK_ID_MASK        0xFF
+#define DBG_DUMP_MEM_DESC_BLOCK_ID_SHIFT       24
+};
+
+/*
+ * registers dump descriptor: chip
+ */
+struct dbg_dump_regs_chip_desc {
+       __le32 data;
+#define DBG_DUMP_REGS_CHIP_DESC_IS_CHIP_MASK_MASK    0x1
+#define DBG_DUMP_REGS_CHIP_DESC_IS_CHIP_MASK_SHIFT   0
+#define DBG_DUMP_REGS_CHIP_DESC_ASIC_CHIP_MASK_MASK  0x7FFFFF
+#define DBG_DUMP_REGS_CHIP_DESC_ASIC_CHIP_MASK_SHIFT 1
+#define DBG_DUMP_REGS_CHIP_DESC_SIM_CHIP_MASK_MASK   0xFF
+#define DBG_DUMP_REGS_CHIP_DESC_SIM_CHIP_MASK_SHIFT  24
+};
+
+/*
+ * registers dump descriptor: raw
+ */
+struct dbg_dump_regs_raw_desc {
+       __le32 data;
+#define DBG_DUMP_REGS_RAW_DESC_IS_CHIP_MASK_MASK  0x1
+#define DBG_DUMP_REGS_RAW_DESC_IS_CHIP_MASK_SHIFT 0
+#define DBG_DUMP_REGS_RAW_DESC_PARAM1_MASK        0x7FFFFF
+#define DBG_DUMP_REGS_RAW_DESC_PARAM1_SHIFT       1
+#define DBG_DUMP_REGS_RAW_DESC_PARAM2_MASK        0xFF
+#define DBG_DUMP_REGS_RAW_DESC_PARAM2_SHIFT       24
+};
+
+/*
+ * registers dump descriptor: sequence
+ */
+struct dbg_dump_regs_seq_desc {
+       __le32 data;
+#define DBG_DUMP_REGS_SEQ_DESC_IS_CHIP_MASK_MASK  0x1
+#define DBG_DUMP_REGS_SEQ_DESC_IS_CHIP_MASK_SHIFT 0
+#define DBG_DUMP_REGS_SEQ_DESC_ADDRESS_MASK       0x7FFFFF
+#define DBG_DUMP_REGS_SEQ_DESC_ADDRESS_SHIFT      1
+#define DBG_DUMP_REGS_SEQ_DESC_LENGTH_MASK        0xFF
+#define DBG_DUMP_REGS_SEQ_DESC_LENGTH_SHIFT       24
+};
+
+/*
+ * registers dump descriptor
+ */
+union dbg_dump_regs_desc {
+       struct dbg_dump_regs_raw_desc raw /* dumped registers raw descriptor */
+          ;
+       struct dbg_dump_regs_seq_desc seq /* dumped registers seq descriptor */
+          ;
+       struct dbg_dump_regs_chip_desc chip
+           /* dumped registers chip descriptor */;
+};
+
+/*
+ * idle check macro types
+ */
+enum idle_chk_macro_types {
+       IDLE_CHK_MACRO_TYPE_COMPARE /* parametric register comparison */,
+       IDLE_CHK_MACRO_TYPE_QM_RD_WR /* compare QM r/w pointers and banks */,
+       MAX_IDLE_CHK_MACRO_TYPES
+};
+
+/*
+ * Idle Check result header
+ */
+struct idle_chk_result_hdr {
+       __le16 rule_idx /* Idle check rule index in CSV file */;
+       __le16 loop_idx /* the loop index in which the failure occurred */;
+       __le16 num_fw_values;
+       __le16 data;
+#define IDLE_CHK_RESULT_HDR_NUM_LSI_VALUES_MASK  0xF
+#define IDLE_CHK_RESULT_HDR_NUM_LSI_VALUES_SHIFT 0
+#define IDLE_CHK_RESULT_HDR_LOOP_VALID_MASK      0x1
+#define IDLE_CHK_RESULT_HDR_LOOP_VALID_SHIFT     4
+#define IDLE_CHK_RESULT_HDR_SEVERITY_MASK        0x7
+#define IDLE_CHK_RESULT_HDR_SEVERITY_SHIFT       5
+#define IDLE_CHK_RESULT_HDR_MACRO_TYPE_MASK      0xF
+#define IDLE_CHK_RESULT_HDR_MACRO_TYPE_SHIFT     8
+#define IDLE_CHK_RESULT_HDR_MACRO_TYPE_ARG_MASK  0xF
+#define IDLE_CHK_RESULT_HDR_MACRO_TYPE_ARG_SHIFT 12
+};
+
+/*
+ * Idle Check rule
+ */
+struct idle_chk_rule {
+       __le32 data;
+#define IDLE_CHK_RULE_ASIC_CHIP_MASK_MASK  0xF
+#define IDLE_CHK_RULE_ASIC_CHIP_MASK_SHIFT 0
+#define IDLE_CHK_RULE_SIM_CHIP_MASK_MASK   0xF
+#define IDLE_CHK_RULE_SIM_CHIP_MASK_SHIFT  4
+#define IDLE_CHK_RULE_BLOCK_ID_MASK        0xFF
+#define IDLE_CHK_RULE_BLOCK_ID_SHIFT       8
+#define IDLE_CHK_RULE_MACRO_TYPE_MASK      0xF
+#define IDLE_CHK_RULE_MACRO_TYPE_SHIFT     16
+#define IDLE_CHK_RULE_SEVERITY_MASK        0x7
+#define IDLE_CHK_RULE_SEVERITY_SHIFT       20
+#define IDLE_CHK_RULE_RESERVED_MASK        0x1
+#define IDLE_CHK_RULE_RESERVED_SHIFT       23
+#define IDLE_CHK_RULE_PRED_ID_MASK         0xFF
+#define IDLE_CHK_RULE_PRED_ID_SHIFT        24
+       __le16 loop;
+       __le16 increment
+           /* address increment of first argument register on each iteration */
+          ;
+       __le32 reg_addr[3];
+       __le32 pred_imm[3]
+           /* immediate values passed as arguments to the idle check rule */;
+};
+
+/*
+ * idle check severity types
+ */
+enum idle_chk_severity_types {
+       IDLE_CHK_SEVERITY_ERROR /* idle check failure should cause an error */,
+       IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC
+           ,
+       IDLE_CHK_SEVERITY_WARNING
+           /* idle check failure should cause a warning */,
+       MAX_IDLE_CHK_SEVERITY_TYPES
+};
+
+/*
+ * init array header: raw
+ */
+struct init_array_raw_hdr {
+       __le32 data;
+#define INIT_ARRAY_RAW_HDR_TYPE_MASK    0xF
+#define INIT_ARRAY_RAW_HDR_TYPE_SHIFT   0
+#define INIT_ARRAY_RAW_HDR_PARAMS_MASK  0xFFFFFFF
+#define INIT_ARRAY_RAW_HDR_PARAMS_SHIFT 4
+};
+
+/*
+ * init array header: standard
+ */
+struct init_array_standard_hdr {
+       __le32 data;
+#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK  0xF
+#define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0
+#define INIT_ARRAY_STANDARD_HDR_SIZE_MASK  0xFFFFFFF
+#define INIT_ARRAY_STANDARD_HDR_SIZE_SHIFT 4
+};
+
+/*
+ * init array header: zipped
+ */
+struct init_array_zipped_hdr {
+       __le32 data;
+#define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK         0xF
+#define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT        0
+#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK  0xFFFFFFF
+#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_SHIFT 4
+};
+
+/*
+ * init array header: pattern
+ */
+struct init_array_pattern_hdr {
+       __le32 data;
+#define INIT_ARRAY_PATTERN_HDR_TYPE_MASK          0xF
+#define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT         0
+#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK  0xF
+#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_SHIFT 4
+#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_MASK   0xFFFFFF
+#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_SHIFT  8
+};
+
+/*
+ * init array header union
+ */
+union init_array_hdr {
+       struct init_array_raw_hdr raw /* raw init array header */;
+       struct init_array_standard_hdr standard /* standard init array header */
+          ;
+       struct init_array_zipped_hdr zipped /* zipped init array header */;
+       struct init_array_pattern_hdr pattern /* pattern init array header */;
+};
+
+/*
+ * init array types
+ */
+enum init_array_types {
+       INIT_ARR_STANDARD /* standard init array */,
+       INIT_ARR_ZIPPED /* zipped init array */,
+       INIT_ARR_PATTERN /* a repeated pattern */,
+       MAX_INIT_ARRAY_TYPES
+};
+
+/*
+ * init operation: callback
+ */
+struct init_callback_op {
+       __le32 op_data;
+#define INIT_CALLBACK_OP_OP_MASK        0xF
+#define INIT_CALLBACK_OP_OP_SHIFT       0
+#define INIT_CALLBACK_OP_RESERVED_MASK  0xFFFFFFF
+#define INIT_CALLBACK_OP_RESERVED_SHIFT 4
+       __le16 callback_id /* Callback ID */;
+       __le16 block_id /* Blocks ID */;
+};
+
+/*
+ * init operation: delay
+ */
+struct init_delay_op {
+       __le32 op_data;
+#define INIT_DELAY_OP_OP_MASK        0xF
+#define INIT_DELAY_OP_OP_SHIFT       0
+#define INIT_DELAY_OP_RESERVED_MASK  0xFFFFFFF
+#define INIT_DELAY_OP_RESERVED_SHIFT 4
+       __le32 delay /* delay in us */;
+};
+
+/*
+ * init operation: if_mode
+ */
+struct init_if_mode_op {
+       __le32 op_data;
+#define INIT_IF_MODE_OP_OP_MASK          0xF
+#define INIT_IF_MODE_OP_OP_SHIFT         0
+#define INIT_IF_MODE_OP_RESERVED1_MASK   0xFFF
+#define INIT_IF_MODE_OP_RESERVED1_SHIFT  4
+#define INIT_IF_MODE_OP_CMD_OFFSET_MASK  0xFFFF
+#define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16
+       __le16 reserved2;
+       __le16 modes_buf_offset
+           /* offset (in bytes) in modes expression buffer */;
+};
+
+/*
+ * init operation: if_phase
+ */
+struct init_if_phase_op {
+       __le32 op_data;
+#define INIT_IF_PHASE_OP_OP_MASK           0xF
+#define INIT_IF_PHASE_OP_OP_SHIFT          0
+#define INIT_IF_PHASE_OP_DMAE_ENABLE_MASK  0x1
+#define INIT_IF_PHASE_OP_DMAE_ENABLE_SHIFT 4
+#define INIT_IF_PHASE_OP_RESERVED1_MASK    0x7FF
+#define INIT_IF_PHASE_OP_RESERVED1_SHIFT   5
+#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK   0xFFFF
+#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT  16
+       __le32 phase_data;
+#define INIT_IF_PHASE_OP_PHASE_MASK        0xFF
+#define INIT_IF_PHASE_OP_PHASE_SHIFT       0
+#define INIT_IF_PHASE_OP_RESERVED2_MASK    0xFF
+#define INIT_IF_PHASE_OP_RESERVED2_SHIFT   8
+#define INIT_IF_PHASE_OP_PHASE_ID_MASK     0xFFFF
+#define INIT_IF_PHASE_OP_PHASE_ID_SHIFT    16
+};
+
+/*
+ * init mode operators
+ */
+enum init_mode_ops {
+       INIT_MODE_OP_NOT /* init mode not operator */,
+       INIT_MODE_OP_OR /* init mode or operator */,
+       INIT_MODE_OP_AND /* init mode and operator */,
+       MAX_INIT_MODE_OPS
+};
+
+/*
+ * init operation: raw
+ */
+struct init_raw_op {
+       __le32 op_data;
+#define INIT_RAW_OP_OP_MASK      0xF
+#define INIT_RAW_OP_OP_SHIFT     0
+#define INIT_RAW_OP_PARAM1_MASK  0xFFFFFFF
+#define INIT_RAW_OP_PARAM1_SHIFT 4
+       __le32 param2 /* Init param 2 */;
+};
+
+/*
+ * init array params
+ */
+struct init_op_array_params {
+       __le16 size /* array size in dwords */;
+       __le16 offset /* array start offset in dwords */;
+};
+
+/*
+ * Write init operation arguments
+ */
+union init_write_args {
+       __le32 inline_val
+           /* value to write, used when init source is INIT_SRC_INLINE */;
+       __le32 zeros_count;
+       __le32 array_offset
+           /* array offset to write, used when init source is INIT_SRC_ARRAY */
+          ;
+       struct init_op_array_params runtime;
+};
+
+/*
+ * init operation: write
+ */
+struct init_write_op {
+       __le32 data;
+#define INIT_WRITE_OP_OP_MASK        0xF
+#define INIT_WRITE_OP_OP_SHIFT       0
+#define INIT_WRITE_OP_SOURCE_MASK    0x7
+#define INIT_WRITE_OP_SOURCE_SHIFT   4
+#define INIT_WRITE_OP_RESERVED_MASK  0x1
+#define INIT_WRITE_OP_RESERVED_SHIFT 7
+#define INIT_WRITE_OP_WIDE_BUS_MASK  0x1
+#define INIT_WRITE_OP_WIDE_BUS_SHIFT 8
+#define INIT_WRITE_OP_ADDRESS_MASK   0x7FFFFF
+#define INIT_WRITE_OP_ADDRESS_SHIFT  9
+       union init_write_args args /* Write init operation arguments */;
+};
+
+/*
+ * init operation: read
+ */
+struct init_read_op {
+       __le32 op_data;
+#define INIT_READ_OP_OP_MASK         0xF
+#define INIT_READ_OP_OP_SHIFT        0
+#define INIT_READ_OP_POLL_TYPE_MASK  0xF
+#define INIT_READ_OP_POLL_TYPE_SHIFT 4
+#define INIT_READ_OP_RESERVED_MASK   0x1
+#define INIT_READ_OP_RESERVED_SHIFT  8
+#define INIT_READ_OP_ADDRESS_MASK    0x7FFFFF
+#define INIT_READ_OP_ADDRESS_SHIFT   9
+       __le32 expected_val
+           /* expected polling value, used only when polling is done */;
+};
+
+/*
+ * Init operations union
+ */
+union init_op {
+       struct init_raw_op raw /* raw init operation */;
+       struct init_write_op write /* write init operation */;
+       struct init_read_op read /* read init operation */;
+       struct init_if_mode_op if_mode /* if_mode init operation */;
+       struct init_if_phase_op if_phase /* if_phase init operation */;
+       struct init_callback_op callback /* callback init operation */;
+       struct init_delay_op delay /* delay init operation */;
+};
+
+/*
+ * Init command operation types
+ */
+enum init_op_types {
+       INIT_OP_READ /* GRC read init command */,
+       INIT_OP_WRITE /* GRC write init command */,
+       INIT_OP_IF_MODE
+           /* Skip init commands if the init modes expression doesn't match */,
+       INIT_OP_IF_PHASE
+           /* Skip init commands if the init phase doesn't match */,
+       INIT_OP_DELAY /* delay init command */,
+       INIT_OP_CALLBACK /* callback init command */,
+       MAX_INIT_OP_TYPES
+};
+
+/*
+ * init polling types
+ */
+enum init_poll_types {
+       INIT_POLL_NONE /* No polling */,
+       INIT_POLL_EQ /* init value is included in the init command */,
+       INIT_POLL_OR /* init value is all zeros */,
+       INIT_POLL_AND /* init value is an array of values */,
+       MAX_INIT_POLL_TYPES
+};
+
+/*
+ * init source types
+ */
+enum init_source_types {
+       INIT_SRC_INLINE /* init value is included in the init command */,
+       INIT_SRC_ZEROS /* init value is all zeros */,
+       INIT_SRC_ARRAY /* init value is an array of values */,
+       INIT_SRC_RUNTIME /* init value is provided during runtime */,
+       MAX_INIT_SOURCE_TYPES
+};
+
+/*
+ * Internal RAM Offsets macro data
+ */
+struct iro {
+       __le32 base /* RAM field offset */;
+       __le16 m1 /* multiplier 1 */;
+       __le16 m2 /* multiplier 2 */;
+       __le16 m3 /* multiplier 3 */;
+       __le16 size /* RAM field size */;
+};
+
+/*
+ * register descriptor
+ */
+struct reg_desc {
+       __le32 data;
+#define REG_DESC_ADDRESS_MASK  0xFFFFFF
+#define REG_DESC_ADDRESS_SHIFT 0
+#define REG_DESC_SIZE_MASK     0xFF
+#define REG_DESC_SIZE_SHIFT    24
+};
+
+/*
+ * Debug Bus block data
+ */
+struct dbg_bus_block_data {
+       u8 enabled /* Indicates if the block is enabled for recording (0/1) */;
+       u8 hw_id /* HW ID associated with the block */;
+       u8 line_num /* Debug line number to select */;
+       u8 right_shift /* Number of units to  right the debug data (0-3) */;
+       u8 cycle_en /* 4-bit value: bit i set -> unit i is enabled. */;
+       u8 force_valid /* 4-bit value: bit i set -> unit i is forced valid. */;
+       u8 force_frame
+           /* 4-bit value: bit i set -> unit i frame bit is forced. */;
+       u8 reserved;
+};
+
+/*
+ * Debug Bus Clients
+ */
+enum dbg_bus_clients {
+       DBG_BUS_CLIENT_RBCN,
+       DBG_BUS_CLIENT_RBCP,
+       DBG_BUS_CLIENT_RBCR,
+       DBG_BUS_CLIENT_RBCT,
+       DBG_BUS_CLIENT_RBCU,
+       DBG_BUS_CLIENT_RBCF,
+       DBG_BUS_CLIENT_RBCX,
+       DBG_BUS_CLIENT_RBCS,
+       DBG_BUS_CLIENT_RBCH,
+       DBG_BUS_CLIENT_RBCZ,
+       DBG_BUS_CLIENT_OTHER_ENGINE,
+       DBG_BUS_CLIENT_TIMESTAMP,
+       DBG_BUS_CLIENT_CPU,
+       DBG_BUS_CLIENT_RBCY,
+       DBG_BUS_CLIENT_RBCQ,
+       DBG_BUS_CLIENT_RBCM,
+       DBG_BUS_CLIENT_RBCB,
+       DBG_BUS_CLIENT_RBCW,
+       DBG_BUS_CLIENT_RBCV,
+       MAX_DBG_BUS_CLIENTS
+};
+
+/*
+ * Debug Bus constraint operation types
+ */
+enum dbg_bus_constraint_ops {
+       DBG_BUS_CONSTRAINT_OP_EQ /* equal */,
+       DBG_BUS_CONSTRAINT_OP_NE /* not equal */,
+       DBG_BUS_CONSTRAINT_OP_LT /* less than */,
+       DBG_BUS_CONSTRAINT_OP_LTC /* less than (cyclic) */,
+       DBG_BUS_CONSTRAINT_OP_LE /* less than or equal */,
+       DBG_BUS_CONSTRAINT_OP_LEC /* less than or equal (cyclic) */,
+       DBG_BUS_CONSTRAINT_OP_GT /* greater than */,
+       DBG_BUS_CONSTRAINT_OP_GTC /* greater than (cyclic) */,
+       DBG_BUS_CONSTRAINT_OP_GE /* greater than or equal */,
+       DBG_BUS_CONSTRAINT_OP_GEC /* greater than or equal (cyclic) */,
+       MAX_DBG_BUS_CONSTRAINT_OPS
+};
+
+/*
+ * Debug Bus memory address
+ */
+struct dbg_bus_mem_addr {
+       __le32 lo;
+       __le32 hi;
+};
+
+/*
+ * Debug Bus PCI buffer data
+ */
+struct dbg_bus_pci_buf_data {
+       struct dbg_bus_mem_addr phys_addr /* PCI buffer physical address */;
+       struct dbg_bus_mem_addr virt_addr /* PCI buffer virtual address */;
+       __le32 size /* PCI buffer size in bytes */;
+};
+
+/*
+ * Debug Bus Storm EID range filter params
+ */
+struct dbg_bus_storm_eid_range_params {
+       u8 min /* Minimal event ID to filter on */;
+       u8 max /* Maximal event ID to filter on */;
+};
+
+/*
+ * Debug Bus Storm EID mask filter params
+ */
+struct dbg_bus_storm_eid_mask_params {
+       u8 val /* Event ID value */;
+       u8 mask /* Event ID mask. 1s in the mask = dont care bits. */;
+};
+
+/*
+ * Debug Bus Storm EID filter params
+ */
+union dbg_bus_storm_eid_params {
+       struct dbg_bus_storm_eid_range_params range
+           /* EID range filter params */;
+       struct dbg_bus_storm_eid_mask_params mask /* EID mask filter params */;
+};
+
+/*
+ * Debug Bus Storm data
+ */
+struct dbg_bus_storm_data {
+       u8 fast_enabled;
+       u8 fast_mode
+           /* Fast debug Storm mode, valid only if fast_enabled is set */;
+       u8 slow_enabled;
+       u8 slow_mode
+           /* Slow debug Storm mode, valid only if slow_enabled is set */;
+       u8 hw_id /* HW ID associated with the Storm */;
+       u8 eid_filter_en /* Indicates if EID filtering is performed (0/1) */;
+       u8 eid_range_not_mask;
+       u8 cid_filter_en /* Indicates if CID filtering is performed (0/1) */;
+       union dbg_bus_storm_eid_params eid_filter_params;
+       __le16 reserved;
+       __le32 cid /* CID to filter on. Valid only if cid_filter_en is set. */;
+};
+
+/*
+ * Debug Bus data
+ */
+struct dbg_bus_data {
+       __le32 app_version /* The tools version number of the application */;
+       u8 state /* The current debug bus state */;
+       u8 hw_dwords /* HW dwords per cycle */;
+       u8 next_hw_id /* Next HW ID to be associated with an input */;
+       u8 num_enabled_blocks /* Number of blocks enabled for recording */;
+       u8 num_enabled_storms /* Number of Storms enabled for recording */;
+       u8 target /* Output target */;
+       u8 next_trigger_state /* ID of next trigger state to be added */;
+       u8 next_constraint_id
+           /* ID of next filter/trigger constraint to be added */;
+       u8 one_shot_en /* Indicates if one-shot mode is enabled (0/1) */;
+       u8 grc_input_en /* Indicates if GRC recording is enabled (0/1) */;
+       u8 timestamp_input_en
+           /* Indicates if timestamp recording is enabled (0/1) */;
+       u8 filter_en /* Indicates if the recording filter is enabled (0/1) */;
+       u8 trigger_en /* Indicates if the recording trigger is enabled (0/1) */
+          ;
+       u8 adding_filter;
+       u8 filter_pre_trigger;
+       u8 filter_post_trigger;
+       u8 unify_inputs;
+       u8 rcv_from_other_engine;
+       struct dbg_bus_pci_buf_data pci_buf;
+       __le16 reserved;
+       struct dbg_bus_block_data blocks[80] /* Debug Bus data for each block */
+          ;
+       struct dbg_bus_storm_data storms[6] /* Debug Bus data for each block */
+          ;
+};
+
+/*
+ * Debug bus filter types
+ */
+enum dbg_bus_filter_types {
+       DBG_BUS_FILTER_TYPE_OFF /* filter always off */,
+       DBG_BUS_FILTER_TYPE_PRE /* filter before trigger only */,
+       DBG_BUS_FILTER_TYPE_POST /* filter after trigger only */,
+       DBG_BUS_FILTER_TYPE_ON /* filter always on */,
+       MAX_DBG_BUS_FILTER_TYPES
+};
+
+/*
+ * Debug bus frame modes
+ */
+enum dbg_bus_frame_modes {
+       DBG_BUS_FRAME_MODE_0HW_4ST = 0 /* 0 HW dwords, 4 Storm dwords */,
+       DBG_BUS_FRAME_MODE_4HW_0ST = 3 /* 4 HW dwords, 0 Storm dwords */,
+       DBG_BUS_FRAME_MODE_8HW_0ST = 4 /* 8 HW dwords, 0 Storm dwords */,
+       MAX_DBG_BUS_FRAME_MODES
+};
+
+/*
+ * Debug bus input types
+ */
+enum dbg_bus_input_types {
+       DBG_BUS_INPUT_TYPE_STORM,
+       DBG_BUS_INPUT_TYPE_BLOCK,
+       MAX_DBG_BUS_INPUT_TYPES
+};
+
+/*
+ * Debug bus other engine mode
+ */
+enum dbg_bus_other_engine_modes {
+       DBG_BUS_OTHER_ENGINE_MODE_NONE,
+       DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX,
+       DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_RX,
+       DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX,
+       DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX,
+       MAX_DBG_BUS_OTHER_ENGINE_MODES
+};
+
+/*
+ * Debug bus post-trigger recording types
+ */
+enum dbg_bus_post_trigger_types {
+       DBG_BUS_POST_TRIGGER_RECORD /* start recording after trigger */,
+       DBG_BUS_POST_TRIGGER_DROP /* drop data after trigger */,
+       MAX_DBG_BUS_POST_TRIGGER_TYPES
+};
+
+/*
+ * Debug bus pre-trigger recording types
+ */
+enum dbg_bus_pre_trigger_types {
+       DBG_BUS_PRE_TRIGGER_START_FROM_ZERO /* start recording from time 0 */,
+       DBG_BUS_PRE_TRIGGER_NUM_CHUNKS
+           /* start recording some chunks before trigger */,
+       DBG_BUS_PRE_TRIGGER_DROP /* drop data before trigger */,
+       MAX_DBG_BUS_PRE_TRIGGER_TYPES
+};
+
+/*
+ * Debug bus SEMI frame modes
+ */
+enum dbg_bus_semi_frame_modes {
+       DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST =
+           0 /* 0 slow dwords, 4 fast dwords */,
+       DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST =
+           3 /* 4 slow dwords, 0 fast dwords */,
+       MAX_DBG_BUS_SEMI_FRAME_MODES
+};
+
+/*
+ * Debug bus states
+ */
+enum dbg_bus_states {
+       DBG_BUS_STATE_BEFORE_RECORD /* before debug bus the recording starts */
+           ,
+       DBG_BUS_STATE_DURING_RECORD /* during debug bus recording */,
+       DBG_BUS_STATE_AFTER_RECORD /* after debug bus recording */,
+       MAX_DBG_BUS_STATES
+};
+
+/*
+ * Debug Bus Storm modes
+ */
+enum dbg_bus_storm_modes {
+       DBG_BUS_STORM_MODE_PRINTF /* store data (fast debug) */,
+       DBG_BUS_STORM_MODE_PRAM_ADDR /* pram address (fast debug) */,
+       DBG_BUS_STORM_MODE_DRA_RW /* DRA read/write data (fast debug) */,
+       DBG_BUS_STORM_MODE_DRA_W /* DRA write data (fast debug) */,
+       DBG_BUS_STORM_MODE_LD_ST_ADDR /* load/store address (fast debug) */,
+       DBG_BUS_STORM_MODE_DRA_FSM /* DRA state machines (fast debug) */,
+       DBG_BUS_STORM_MODE_RH /* recording handlers (fast debug) */,
+       DBG_BUS_STORM_MODE_FOC /* FOC: FIN + DRA Rd (slow debug) */,
+       DBG_BUS_STORM_MODE_EXT_STORE /* FOC: External Store (slow) */,
+       MAX_DBG_BUS_STORM_MODES
+};
+
+/*
+ * Debug bus target IDs
+ */
+enum dbg_bus_targets {
+       DBG_BUS_TARGET_ID_INT_BUF
+           /* records debug bus to DBG block internal buffer */,
+       DBG_BUS_TARGET_ID_NIG /* records debug bus to the NW */,
+       DBG_BUS_TARGET_ID_PCI /* records debug bus to a PCI buffer */,
+       MAX_DBG_BUS_TARGETS
+};
+
+/*
+ * GRC Dump data
+ */
+struct dbg_grc_data {
+       u8 is_updated /* Indicates if the GRC Dump data is updated (0/1) */;
+       u8 chip_id /* Chip ID */;
+       u8 chip_mask /* Chip mask */;
+       u8 reserved;
+       __le32 max_dump_dwords /* Max GRC Dump size in dwords */;
+       __le32 param_val[40];
+       u8 param_set_by_user[40];
+};
+
+/*
+ * Debug GRC params
+ */
+enum dbg_grc_params {
+       DBG_GRC_PARAM_DUMP_TSTORM /* dump Tstorm memories (0/1) */,
+       DBG_GRC_PARAM_DUMP_MSTORM /* dump Mstorm memories (0/1) */,
+       DBG_GRC_PARAM_DUMP_USTORM /* dump Ustorm memories (0/1) */,
+       DBG_GRC_PARAM_DUMP_XSTORM /* dump Xstorm memories (0/1) */,
+       DBG_GRC_PARAM_DUMP_YSTORM /* dump Ystorm memories (0/1) */,
+       DBG_GRC_PARAM_DUMP_PSTORM /* dump Pstorm memories (0/1) */,
+       DBG_GRC_PARAM_DUMP_REGS /* dump non-memory registers (0/1) */,
+       DBG_GRC_PARAM_DUMP_RAM /* dump Storm internal RAMs (0/1) */,
+       DBG_GRC_PARAM_DUMP_PBUF /* dump Storm passive buffer (0/1) */,
+       DBG_GRC_PARAM_DUMP_IOR /* dump Storm IORs (0/1) */,
+       DBG_GRC_PARAM_DUMP_VFC /* dump VFC memories (0/1) */,
+       DBG_GRC_PARAM_DUMP_CM_CTX /* dump CM contexts (0/1) */,
+       DBG_GRC_PARAM_DUMP_PXP /* dump PXP memories (0/1) */,
+       DBG_GRC_PARAM_DUMP_RSS /* dump RSS memories (0/1) */,
+       DBG_GRC_PARAM_DUMP_CAU /* dump CAU memories (0/1) */,
+       DBG_GRC_PARAM_DUMP_QM /* dump QM memories (0/1) */,
+       DBG_GRC_PARAM_DUMP_MCP /* dump MCP memories (0/1) */,
+       DBG_GRC_PARAM_RESERVED /* reserved */,
+       DBG_GRC_PARAM_DUMP_CFC /* dump CFC memories (0/1) */,
+       DBG_GRC_PARAM_DUMP_IGU /* dump IGU memories (0/1) */,
+       DBG_GRC_PARAM_DUMP_BRB /* dump BRB memories (0/1) */,
+       DBG_GRC_PARAM_DUMP_BTB /* dump BTB memories (0/1) */,
+       DBG_GRC_PARAM_DUMP_BMB /* dump BMB memories (0/1) */,
+       DBG_GRC_PARAM_DUMP_NIG /* dump NIG memories (0/1) */,
+       DBG_GRC_PARAM_DUMP_MULD /* dump MULD memories (0/1) */,
+       DBG_GRC_PARAM_DUMP_PRS /* dump PRS memories (0/1) */,
+       DBG_GRC_PARAM_DUMP_DMAE /* dump PRS memories (0/1) */,
+       DBG_GRC_PARAM_DUMP_TM /* dump TM (timers) memories (0/1) */,
+       DBG_GRC_PARAM_DUMP_SDM /* dump SDM memories (0/1) */,
+       DBG_GRC_PARAM_DUMP_STATIC /* dump static debug data (0/1) */,
+       DBG_GRC_PARAM_UNSTALL /* un-stall Storms after dump (0/1) */,
+       DBG_GRC_PARAM_NUM_LCIDS /* number of LCIDs (0..320) */,
+       DBG_GRC_PARAM_NUM_LTIDS /* number of LTIDs (0..320) */,
+       DBG_GRC_PARAM_EXCLUDE_ALL
+           /* preset: exclude all memories from dump (1 only) */,
+       DBG_GRC_PARAM_CRASH
+           /* preset: include memories for crash dump (1 only) */,
+       DBG_GRC_PARAM_PARITY_SAFE
+           /* perform dump only if MFW is responding (0/1) */,
+       DBG_GRC_PARAM_DUMP_CM /* dump CM memories (0/1) */,
+       MAX_DBG_GRC_PARAMS
+};
+
+/*
+ * Debug reset registers
+ */
+enum dbg_reset_regs {
+       DBG_RESET_REG_MISCS_PL_UA,
+       DBG_RESET_REG_MISCS_PL_HV,
+       DBG_RESET_REG_MISC_PL_UA,
+       DBG_RESET_REG_MISC_PL_HV,
+       DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
+       DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
+       DBG_RESET_REG_MISC_PL_PDA_VAUX,
+       MAX_DBG_RESET_REGS
+};
+
+/*
+ * @DPDK Debug status codes
+ */
+enum dbg_status {
+       DBG_STATUS_OK,
+       DBG_STATUS_APP_VERSION_NOT_SET,
+       DBG_STATUS_UNSUPPORTED_APP_VERSION,
+       DBG_STATUS_DBG_BLOCK_NOT_RESET,
+       DBG_STATUS_INVALID_ARGS,
+       DBG_STATUS_OUTPUT_ALREADY_SET,
+       DBG_STATUS_INVALID_PCI_BUF_SIZE,
+       DBG_STATUS_PCI_BUF_ALLOC_FAILED,
+       DBG_STATUS_PCI_BUF_NOT_ALLOCATED,
+       DBG_STATUS_TOO_MANY_INPUTS,
+       DBG_STATUS_INPUT_OVERLAP,
+       DBG_STATUS_HW_ONLY_RECORDING,
+       DBG_STATUS_STORM_ALREADY_ENABLED,
+       DBG_STATUS_STORM_NOT_ENABLED,
+       DBG_STATUS_BLOCK_ALREADY_ENABLED,
+       DBG_STATUS_BLOCK_NOT_ENABLED,
+       DBG_STATUS_NO_INPUT_ENABLED,
+       DBG_STATUS_NO_FILTER_TRIGGER_64B,
+       DBG_STATUS_FILTER_ALREADY_ENABLED,
+       DBG_STATUS_TRIGGER_ALREADY_ENABLED,
+       DBG_STATUS_TRIGGER_NOT_ENABLED,
+       DBG_STATUS_CANT_ADD_CONSTRAINT,
+       DBG_STATUS_TOO_MANY_TRIGGER_STATES,
+       DBG_STATUS_TOO_MANY_CONSTRAINTS,
+       DBG_STATUS_RECORDING_NOT_STARTED,
+       DBG_STATUS_NO_DATA_TRIGGERED,
+       DBG_STATUS_NO_DATA_RECORDED,
+       DBG_STATUS_DUMP_BUF_TOO_SMALL,
+       DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED,
+       DBG_STATUS_UNKNOWN_CHIP,
+       DBG_STATUS_VIRT_MEM_ALLOC_FAILED,
+       DBG_STATUS_BLOCK_IN_RESET,
+       DBG_STATUS_INVALID_TRACE_SIGNATURE,
+       DBG_STATUS_INVALID_NVRAM_BUNDLE,
+       DBG_STATUS_NVRAM_GET_IMAGE_FAILED,
+       DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE,
+       DBG_STATUS_NVRAM_READ_FAILED,
+       DBG_STATUS_IDLE_CHK_PARSE_FAILED,
+       DBG_STATUS_MCP_TRACE_BAD_DATA,
+       DBG_STATUS_MCP_TRACE_NO_META,
+       DBG_STATUS_MCP_COULD_NOT_HALT,
+       DBG_STATUS_MCP_COULD_NOT_RESUME,
+       DBG_STATUS_DMAE_FAILED,
+       DBG_STATUS_SEMI_FIFO_NOT_EMPTY,
+       DBG_STATUS_IGU_FIFO_BAD_DATA,
+       DBG_STATUS_MCP_COULD_NOT_MASK_PRTY,
+       DBG_STATUS_FW_ASSERTS_PARSE_FAILED,
+       DBG_STATUS_REG_FIFO_BAD_DATA,
+       DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA,
+       MAX_DBG_STATUS
+};
+
+/*
+ * Debug Storms IDs
+ */
+enum dbg_storms {
+       DBG_TSTORM_ID,
+       DBG_MSTORM_ID,
+       DBG_USTORM_ID,
+       DBG_XSTORM_ID,
+       DBG_YSTORM_ID,
+       DBG_PSTORM_ID,
+       MAX_DBG_STORMS
+};
+
+/*
+ * Idle Check data
+ */
+struct idle_chk_data {
+       __le32 buf_size /* Idle check buffer size in dwords */;
+       u8 buf_size_set
+           /* Indicates if the idle check buffer size was set (0/1) */;
+       u8 reserved1;
+       __le16 reserved2;
+};
+
+/*
+ * Idle Check data
+ */
+struct mcp_trace_data {
+       __le32 buf_size /* MCP Trace buffer size in dwords */;
+       u8 buf_size_set
+           /* Indicates if the MCP Trace buffer size was set (0/1) */;
+       u8 reserved1;
+       __le16 reserved2;
+};
+
+/*
+ * Debug Tools data (per HW function)
+ */
+struct dbg_tools_data {
+       struct dbg_grc_data grc /* GRC Dump data */;
+       struct dbg_bus_data bus /* Debug Bus data */;
+       struct idle_chk_data idle_chk /* Idle Check data */;
+       struct mcp_trace_data mcp_trace /* MCP Trace data */;
+       u8 block_in_reset[80] /* Indicates if a block is in reset state (0/1) */
+          ;
+       u8 chip_id /* Chip ID (from enum chip_ids) */;
+       u8 chip_mask
+           /* Chip mask = bit index chip_id is set, the rest are cleared */;
+       u8 initialized /* Indicates if the data was initialized */;
+       u8 reset_state_updated
+           /* Indicates if blocks reset state is updated (0/1) */;
+};
+
+/*
+ * BRB RAM init requirements
+ */
+struct init_brb_ram_req {
+       __le32 guranteed_per_tc /* guaranteed size per TC, in bytes */;
+       __le32 headroom_per_tc /* headroom size per TC, in bytes */;
+       __le32 min_pkt_size /* min packet size, in bytes */;
+       __le32 max_ports_per_engine /* min packet size, in bytes */;
+       u8 num_active_tcs[MAX_NUM_PORTS] /* number of active TCs per port */;
+};
+
+/*
+ * ETS per-TC init requirements
+ */
+struct init_ets_tc_req {
+       u8 use_sp;
+       u8 use_wfq;
+       __le16 weight /* An arbitration weight. Valid only if use_wfq is set. */
+          ;
+};
+
+/*
+ * ETS init requirements
+ */
+struct init_ets_req {
+       __le32 mtu /* Max packet size (in bytes) */;
+       struct init_ets_tc_req tc_req[NUM_OF_TCS]
+           /* ETS initialization requirements per TC. */;
+};
+
+/*
+ * NIG LB RL init requirements
+ */
+struct init_nig_lb_rl_req {
+       __le16 lb_mac_rate;
+       __le16 lb_rate;
+       __le32 mtu /* Max packet size (in bytes) */;
+       __le16 tc_rate[NUM_OF_PHYS_TCS];
+};
+
+/*
+ * NIG TC mapping for each priority
+ */
+struct init_nig_pri_tc_map_entry {
+       u8 tc_id /* the mapped TC ID */;
+       u8 valid /* indicates if the mapping entry is valid */;
+};
+
+/*
+ * NIG priority to TC map init requirements
+ */
+struct init_nig_pri_tc_map_req {
+       struct init_nig_pri_tc_map_entry pri[NUM_OF_VLAN_PRIORITIES];
+};
+
+/*
+ * QM per-port init parameters
+ */
+struct init_qm_port_params {
+       u8 active /* Indicates if this port is active */;
+       u8 num_active_phys_tcs /* number of physical TCs used by this port */;
+       __le16 num_pbf_cmd_lines
+           /* number of PBF command lines that can be used by this port */;
+       __le16 num_btb_blocks
+           /* number of BTB blocks that can be used by this port */;
+       __le16 reserved;
+};
+
+/*
+ * QM per-PQ init parameters
+ */
+struct init_qm_pq_params {
+       u8 vport_id /* VPORT ID */;
+       u8 tc_id /* TC ID */;
+       u8 wrr_group /* WRR group */;
+       u8 reserved;
+};
+
+/*
+ * QM per-vport init parameters
+ */
+struct init_qm_vport_params {
+       __le32 vport_rl;
+       __le16 vport_wfq;
+       __le16 first_tx_pq_id[NUM_OF_TCS]
+           /* the first Tx PQ ID associated with this VPORT for each TC. */;
+};
+
+#endif /* __ECORE_HSI_TOOLS__ */
diff --git a/drivers/net/qede/base/ecore_hw.c b/drivers/net/qede/base/ecore_hw.c
new file mode 100644 (file)
index 0000000..1c48ed0
--- /dev/null
@@ -0,0 +1,905 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+#include "ecore_hsi_common.h"
+#include "ecore_status.h"
+#include "ecore.h"
+#include "ecore_hw.h"
+#include "reg_addr.h"
+#include "ecore_utils.h"
+
+#ifndef ASIC_ONLY
+#define ECORE_EMUL_FACTOR 2000
+#define ECORE_FPGA_FACTOR 200
+#endif
+
+#define ECORE_BAR_ACQUIRE_TIMEOUT 1000
+
+/* Invalid values */
+#define ECORE_BAR_INVALID_OFFSET               -1
+
+struct ecore_ptt {
+       osal_list_entry_t list_entry;
+       unsigned int idx;
+       struct pxp_ptt_entry pxp;
+};
+
+struct ecore_ptt_pool {
+       osal_list_t free_list;
+       osal_spinlock_t lock;
+       struct ecore_ptt ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM];
+};
+
+enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_ptt_pool *p_pool;
+       int i;
+
+       p_pool = OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
+                           sizeof(struct ecore_ptt_pool));
+       if (!p_pool)
+               return ECORE_NOMEM;
+
+       OSAL_LIST_INIT(&p_pool->free_list);
+       for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
+               p_pool->ptts[i].idx = i;
+               p_pool->ptts[i].pxp.offset = ECORE_BAR_INVALID_OFFSET;
+               p_pool->ptts[i].pxp.pretend.control = 0;
+
+               /* There are special PTT entries that are taken only by design.
+                * The rest are added ot the list for general usage.
+                */
+               if (i >= RESERVED_PTT_MAX)
+                       OSAL_LIST_PUSH_HEAD(&p_pool->ptts[i].list_entry,
+                                           &p_pool->free_list);
+       }
+
+       p_hwfn->p_ptt_pool = p_pool;
+       OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_pool->lock);
+       OSAL_SPIN_LOCK_INIT(&p_pool->lock);
+
+       return ECORE_SUCCESS;
+}
+
+void ecore_ptt_invalidate(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_ptt *p_ptt;
+       int i;
+
+       for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
+               p_ptt = &p_hwfn->p_ptt_pool->ptts[i];
+               p_ptt->pxp.offset = ECORE_BAR_INVALID_OFFSET;
+       }
+}
+
+void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn)
+{
+       if (p_hwfn->p_ptt_pool)
+               OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->p_ptt_pool->lock);
+       OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_ptt_pool);
+       p_hwfn->p_ptt_pool = OSAL_NULL;
+}
+
+struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_ptt *p_ptt;
+       unsigned int i;
+
+       /* Take the free PTT from the list */
+       for (i = 0; i < ECORE_BAR_ACQUIRE_TIMEOUT; i++) {
+               OSAL_SPIN_LOCK(&p_hwfn->p_ptt_pool->lock);
+               if (!OSAL_LIST_IS_EMPTY(&p_hwfn->p_ptt_pool->free_list))
+                       break;
+               OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
+               OSAL_MSLEEP(1);
+       }
+
+       /* We should not time-out, but it can happen... --> Lock isn't held */
+       if (i == ECORE_BAR_ACQUIRE_TIMEOUT) {
+               DP_NOTICE(p_hwfn, true, "Failed to allocate PTT\n");
+               return OSAL_NULL;
+       }
+
+       p_ptt = OSAL_LIST_FIRST_ENTRY(&p_hwfn->p_ptt_pool->free_list,
+                                     struct ecore_ptt, list_entry);
+       OSAL_LIST_REMOVE_ENTRY(&p_ptt->list_entry,
+                              &p_hwfn->p_ptt_pool->free_list);
+       OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "allocated ptt %d\n", p_ptt->idx);
+
+       return p_ptt;
+}
+
+void ecore_ptt_release(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+       /* This PTT should not be set to pretend if it is being released */
+
+       OSAL_SPIN_LOCK(&p_hwfn->p_ptt_pool->lock);
+       OSAL_LIST_PUSH_HEAD(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list);
+       OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
+}
+
+u32 ecore_ptt_get_hw_addr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+       /* The HW is using DWORDS and we need to translate it to Bytes */
+       return p_ptt->pxp.offset << 2;
+}
+
+static u32 ecore_ptt_config_addr(struct ecore_ptt *p_ptt)
+{
+       return PXP_PF_WINDOW_ADMIN_PER_PF_START +
+           p_ptt->idx * sizeof(struct pxp_ptt_entry);
+}
+
+u32 ecore_ptt_get_bar_addr(struct ecore_ptt *p_ptt)
+{
+       return PXP_EXTERNAL_BAR_PF_WINDOW_START +
+           p_ptt->idx * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE;
+}
+
+void ecore_ptt_set_win(struct ecore_hwfn *p_hwfn,
+                      struct ecore_ptt *p_ptt, u32 new_hw_addr)
+{
+       u32 prev_hw_addr;
+
+       prev_hw_addr = ecore_ptt_get_hw_addr(p_hwfn, p_ptt);
+
+       if (new_hw_addr == prev_hw_addr)
+               return;
+
+       /* Update PTT entery in admin window */
+       DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+                  "Updating PTT entry %d to offset 0x%x\n",
+                  p_ptt->idx, new_hw_addr);
+
+       /* The HW is using DWORDS and the address is in Bytes */
+       p_ptt->pxp.offset = new_hw_addr >> 2;
+
+       REG_WR(p_hwfn,
+              ecore_ptt_config_addr(p_ptt) +
+              OFFSETOF(struct pxp_ptt_entry, offset), p_ptt->pxp.offset);
+}
+
+static u32 ecore_set_ptt(struct ecore_hwfn *p_hwfn,
+                        struct ecore_ptt *p_ptt, u32 hw_addr)
+{
+       u32 win_hw_addr = ecore_ptt_get_hw_addr(p_hwfn, p_ptt);
+       u32 offset;
+
+       offset = hw_addr - win_hw_addr;
+
+       /* Verify the address is within the window */
+       if (hw_addr < win_hw_addr ||
+           offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) {
+               ecore_ptt_set_win(p_hwfn, p_ptt, hw_addr);
+               offset = 0;
+       }
+
+       return ecore_ptt_get_bar_addr(p_ptt) + offset;
+}
+
+struct ecore_ptt *ecore_get_reserved_ptt(struct ecore_hwfn *p_hwfn,
+                                        enum reserved_ptts ptt_idx)
+{
+       if (ptt_idx >= RESERVED_PTT_MAX) {
+               DP_NOTICE(p_hwfn, true,
+                         "Requested PTT %d is out of range\n", ptt_idx);
+               return OSAL_NULL;
+       }
+
+       return &p_hwfn->p_ptt_pool->ptts[ptt_idx];
+}
+
+void ecore_wr(struct ecore_hwfn *p_hwfn,
+             struct ecore_ptt *p_ptt, u32 hw_addr, u32 val)
+{
+       u32 bar_addr = ecore_set_ptt(p_hwfn, p_ptt, hw_addr);
+
+       REG_WR(p_hwfn, bar_addr, val);
+       DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+                  "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
+                  bar_addr, hw_addr, val);
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
+               OSAL_UDELAY(100);
+#endif
+}
+
+u32 ecore_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 hw_addr)
+{
+       u32 bar_addr = ecore_set_ptt(p_hwfn, p_ptt, hw_addr);
+       u32 val = REG_RD(p_hwfn, bar_addr);
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+                  "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
+                  bar_addr, hw_addr, val);
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
+               OSAL_UDELAY(100);
+#endif
+
+       return val;
+}
+
+static void ecore_memcpy_hw(struct ecore_hwfn *p_hwfn,
+                           struct ecore_ptt *p_ptt,
+                           void *addr,
+                           u32 hw_addr, osal_size_t n, bool to_device)
+{
+       u32 dw_count, *host_addr, hw_offset;
+       osal_size_t quota, done = 0;
+       u32 OSAL_IOMEM *reg_addr;
+
+       while (done < n) {
+               quota = OSAL_MIN_T(osal_size_t, n - done,
+                                  PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE);
+
+               ecore_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
+               hw_offset = ecore_ptt_get_bar_addr(p_ptt);
+
+               dw_count = quota / 4;
+               host_addr = (u32 *)((u8 *)addr + done);
+               reg_addr = (u32 OSAL_IOMEM *)OSAL_REG_ADDR(p_hwfn, hw_offset);
+
+               if (to_device)
+                       while (dw_count--)
+                               DIRECT_REG_WR(p_hwfn, reg_addr++, *host_addr++);
+               else
+                       while (dw_count--)
+                               *host_addr++ = DIRECT_REG_RD(p_hwfn,
+                                                            reg_addr++);
+
+               done += quota;
+       }
+}
+
+void ecore_memcpy_from(struct ecore_hwfn *p_hwfn,
+                      struct ecore_ptt *p_ptt,
+                      void *dest, u32 hw_addr, osal_size_t n)
+{
+       DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+                  "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n",
+                  hw_addr, dest, hw_addr, (unsigned long)n);
+
+       ecore_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false);
+}
+
+void ecore_memcpy_to(struct ecore_hwfn *p_hwfn,
+                    struct ecore_ptt *p_ptt,
+                    u32 hw_addr, void *src, osal_size_t n)
+{
+       DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+                  "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n",
+                  hw_addr, hw_addr, src, (unsigned long)n);
+
+       ecore_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true);
+}
+
+void ecore_fid_pretend(struct ecore_hwfn *p_hwfn,
+                      struct ecore_ptt *p_ptt, u16 fid)
+{
+       void *p_pretend;
+       u16 control = 0;
+
+       SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1);
+       SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1);
+
+       /* Every pretend undos prev pretends, including previous port pretend */
+       SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
+       SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
+       SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
+       p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
+
+       if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID))
+               fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID);
+
+       p_ptt->pxp.pretend.fid.concrete_fid.fid = OSAL_CPU_TO_LE16(fid);
+
+       p_pretend = &p_ptt->pxp.pretend;
+       REG_WR(p_hwfn,
+              ecore_ptt_config_addr(p_ptt) +
+              OFFSETOF(struct pxp_ptt_entry, pretend), *(u32 *)p_pretend);
+}
+
+void ecore_port_pretend(struct ecore_hwfn *p_hwfn,
+                       struct ecore_ptt *p_ptt, u8 port_id)
+{
+       void *p_pretend;
+       u16 control = 0;
+
+       SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id);
+       SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1);
+       SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
+       p_ptt->pxp.pretend.control = control;
+
+       p_pretend = &p_ptt->pxp.pretend;
+       REG_WR(p_hwfn,
+              ecore_ptt_config_addr(p_ptt) +
+              OFFSETOF(struct pxp_ptt_entry, pretend), *(u32 *)p_pretend);
+}
+
+void ecore_port_unpretend(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+       void *p_pretend;
+       u16 control = 0;
+
+       SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
+       SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
+       SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
+       p_ptt->pxp.pretend.control = control;
+
+       p_pretend = &p_ptt->pxp.pretend;
+       REG_WR(p_hwfn,
+              ecore_ptt_config_addr(p_ptt) +
+              OFFSETOF(struct pxp_ptt_entry, pretend), *(u32 *)p_pretend);
+}
+
+u32 ecore_vfid_to_concrete(struct ecore_hwfn *p_hwfn, u8 vfid)
+{
+       u32 concrete_fid = 0;
+
+       SET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID, p_hwfn->rel_pf_id);
+       SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID, vfid);
+       SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID, 1);
+
+       return concrete_fid;
+}
+
+/* Not in use @DPDK
+ * Ecore HW lock
+ * =============
+ * Although the implementation is ready, today we don't have any flow that
+ * utliizes said locks - and we want to keep it this way.
+ * If this changes, this needs to be revisted.
+ */
+
+/* Ecore DMAE
+ * =============
+ */
+static void ecore_dmae_opcode(struct ecore_hwfn *p_hwfn,
+                             const u8 is_src_type_grc,
+                             const u8 is_dst_type_grc,
+                             struct ecore_dmae_params *p_params)
+{
+       u16 opcode_b = 0;
+       u32 opcode = 0;
+
+       /* Whether the source is the PCIe or the GRC.
+        * 0- The source is the PCIe
+        * 1- The source is the GRC.
+        */
+       opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC
+                  : DMAE_CMD_SRC_MASK_PCIE) << DMAE_CMD_SRC_SHIFT;
+       opcode |= (p_hwfn->rel_pf_id & DMAE_CMD_SRC_PF_ID_MASK) <<
+           DMAE_CMD_SRC_PF_ID_SHIFT;
+
+       /* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
+       opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC
+                  : DMAE_CMD_DST_MASK_PCIE) << DMAE_CMD_DST_SHIFT;
+       opcode |= (p_hwfn->rel_pf_id & DMAE_CMD_DST_PF_ID_MASK) <<
+           DMAE_CMD_DST_PF_ID_SHIFT;
+
+       /* DMAE_E4_TODO need to check which value to specifiy here. */
+       /* opcode |= (!b_complete_to_host)<< DMAE_CMD_C_DST_SHIFT; */
+
+       /* Whether to write a completion word to the completion destination:
+        * 0-Do not write a completion word
+        * 1-Write the completion word
+        */
+       opcode |= DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT;
+       opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK << DMAE_CMD_SRC_ADDR_RESET_SHIFT;
+
+       if (p_params->flags & ECORE_DMAE_FLAG_COMPLETION_DST)
+               opcode |= 1 << DMAE_CMD_COMP_FUNC_SHIFT;
+
+       /* swapping mode 3 - big endian there should be a define ifdefed in
+        * the HSI somewhere. Since it is currently
+        */
+       opcode |= DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT;
+
+       opcode |= p_hwfn->port_id << DMAE_CMD_PORT_ID_SHIFT;
+
+       /* reset source address in next go */
+       opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK << DMAE_CMD_SRC_ADDR_RESET_SHIFT;
+
+       /* reset dest address in next go */
+       opcode |= DMAE_CMD_DST_ADDR_RESET_MASK << DMAE_CMD_DST_ADDR_RESET_SHIFT;
+
+       /* SRC/DST VFID: all 1's - pf, otherwise VF id */
+       if (p_params->flags & ECORE_DMAE_FLAG_VF_SRC) {
+               opcode |= (1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT);
+               opcode_b |= (p_params->src_vfid << DMAE_CMD_SRC_VF_ID_SHIFT);
+       } else {
+               opcode_b |= (DMAE_CMD_SRC_VF_ID_MASK <<
+                            DMAE_CMD_SRC_VF_ID_SHIFT);
+       }
+       if (p_params->flags & ECORE_DMAE_FLAG_VF_DST) {
+               opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT;
+               opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT;
+       } else {
+               opcode_b |= DMAE_CMD_DST_VF_ID_MASK << DMAE_CMD_DST_VF_ID_SHIFT;
+       }
+
+       p_hwfn->dmae_info.p_dmae_cmd->opcode = OSAL_CPU_TO_LE32(opcode);
+       p_hwfn->dmae_info.p_dmae_cmd->opcode_b = OSAL_CPU_TO_LE16(opcode_b);
+}
+
+static u32 ecore_dmae_idx_to_go_cmd(u8 idx)
+{
+       OSAL_BUILD_BUG_ON((DMAE_REG_GO_C31 - DMAE_REG_GO_C0) != 31 * 4);
+
+       return DMAE_REG_GO_C0 + idx * 4;
+}
+
+static enum _ecore_status_t
+ecore_dmae_post_command(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+       struct dmae_cmd *p_command = p_hwfn->dmae_info.p_dmae_cmd;
+       enum _ecore_status_t ecore_status = ECORE_SUCCESS;
+       u8 idx_cmd = p_hwfn->dmae_info.channel, i;
+
+       /* verify address is not OSAL_NULL */
+       if ((((!p_command->dst_addr_lo) && (!p_command->dst_addr_hi)) ||
+            ((!p_command->src_addr_lo) && (!p_command->src_addr_hi)))) {
+               DP_NOTICE(p_hwfn, true,
+                         "source or destination address 0 idx_cmd=%d\n"
+                         "opcode = [0x%08x,0x%04x] len=0x%x"
+                         " src=0x%x:%x dst=0x%x:%x\n",
+                         idx_cmd, (u32)p_command->opcode,
+                         (u16)p_command->opcode_b,
+                         (int)p_command->length,
+                         (int)p_command->src_addr_hi,
+                         (int)p_command->src_addr_lo,
+                         (int)p_command->dst_addr_hi,
+                         (int)p_command->dst_addr_lo);
+
+               return ECORE_INVAL;
+       }
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+                  "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x]"
+                  "len=0x%x src=0x%x:%x dst=0x%x:%x\n",
+                  idx_cmd, (u32)p_command->opcode,
+                  (u16)p_command->opcode_b,
+                  (int)p_command->length,
+                  (int)p_command->src_addr_hi,
+                  (int)p_command->src_addr_lo,
+                  (int)p_command->dst_addr_hi, (int)p_command->dst_addr_lo);
+
+       /* Copy the command to DMAE - need to do it before every call
+        * for source/dest address no reset.
+        * The number of commands have been increased to 16 (previous was 14)
+        * The first 9 DWs are the command registers, the 10 DW is the
+        * GO register, and
+        * the rest are result registers (which are read only by the client).
+        */
+       for (i = 0; i < DMAE_CMD_SIZE; i++) {
+               u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ?
+                   *(((u32 *)p_command) + i) : 0;
+
+               ecore_wr(p_hwfn, p_ptt,
+                        DMAE_REG_CMD_MEM +
+                        (idx_cmd * DMAE_CMD_SIZE * sizeof(u32)) +
+                        (i * sizeof(u32)), data);
+       }
+
+       ecore_wr(p_hwfn, p_ptt,
+                ecore_dmae_idx_to_go_cmd(idx_cmd), DMAE_GO_VALUE);
+
+       return ecore_status;
+}
+
+enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn)
+{
+       dma_addr_t *p_addr = &p_hwfn->dmae_info.completion_word_phys_addr;
+       struct dmae_cmd **p_cmd = &p_hwfn->dmae_info.p_dmae_cmd;
+       u32 **p_buff = &p_hwfn->dmae_info.p_intermediate_buffer;
+       u32 **p_comp = &p_hwfn->dmae_info.p_completion_word;
+
+       *p_comp = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr, sizeof(u32));
+       if (*p_comp == OSAL_NULL) {
+               DP_NOTICE(p_hwfn, true,
+                         "Failed to allocate `p_completion_word'\n");
+               ecore_dmae_info_free(p_hwfn);
+               return ECORE_NOMEM;
+       }
+
+       p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr;
+       *p_cmd = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr,
+                                        sizeof(struct dmae_cmd));
+       if (*p_cmd == OSAL_NULL) {
+               DP_NOTICE(p_hwfn, true,
+                         "Failed to allocate `struct dmae_cmd'\n");
+               ecore_dmae_info_free(p_hwfn);
+               return ECORE_NOMEM;
+       }
+
+       p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr;
+       *p_buff = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr,
+                                         sizeof(u32) * DMAE_MAX_RW_SIZE);
+       if (*p_buff == OSAL_NULL) {
+               DP_NOTICE(p_hwfn, true,
+                         "Failed to allocate `intermediate_buffer'\n");
+               ecore_dmae_info_free(p_hwfn);
+               return ECORE_NOMEM;
+       }
+
+       /* DMAE_E4_TODO : Need to change this to reflect proper channel */
+       p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
+
+       return ECORE_SUCCESS;
+}
+
+void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn)
+{
+       dma_addr_t p_phys;
+
+       /* Just make sure no one is in the middle */
+       OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
+
+       if (p_hwfn->dmae_info.p_completion_word != OSAL_NULL) {
+               p_phys = p_hwfn->dmae_info.completion_word_phys_addr;
+               OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+                                      p_hwfn->dmae_info.p_completion_word,
+                                      p_phys, sizeof(u32));
+               p_hwfn->dmae_info.p_completion_word = OSAL_NULL;
+       }
+
+       if (p_hwfn->dmae_info.p_dmae_cmd != OSAL_NULL) {
+               p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr;
+               OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+                                      p_hwfn->dmae_info.p_dmae_cmd,
+                                      p_phys, sizeof(struct dmae_cmd));
+               p_hwfn->dmae_info.p_dmae_cmd = OSAL_NULL;
+       }
+
+       if (p_hwfn->dmae_info.p_intermediate_buffer != OSAL_NULL) {
+               p_phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
+               OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+                                      p_hwfn->dmae_info.p_intermediate_buffer,
+                                      p_phys, sizeof(u32) * DMAE_MAX_RW_SIZE);
+               p_hwfn->dmae_info.p_intermediate_buffer = OSAL_NULL;
+       }
+
+       OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
+}
+
+static enum _ecore_status_t ecore_dmae_operation_wait(struct ecore_hwfn *p_hwfn)
+{
+       enum _ecore_status_t ecore_status = ECORE_SUCCESS;
+       u32 wait_cnt_limit = 10000, wait_cnt = 0;
+
+#ifndef ASIC_ONLY
+       u32 factor = (CHIP_REV_IS_EMUL(p_hwfn->p_dev) ?
+                     ECORE_EMUL_FACTOR :
+                     (CHIP_REV_IS_FPGA(p_hwfn->p_dev) ?
+                      ECORE_FPGA_FACTOR : 1));
+
+       wait_cnt_limit *= factor;
+#endif
+
+       /* DMAE_E4_TODO : TODO check if we have to call any other function
+        * other than BARRIER to sync the completion_word since we are not
+        * using the volatile keyword for this
+        */
+       OSAL_BARRIER(p_hwfn->p_dev);
+       while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) {
+               /* DMAE_E4_TODO : using OSAL_MSLEEP instead of mm_wait since mm
+                * functions are getting depriciated. Need to review for future.
+                */
+               OSAL_UDELAY(DMAE_MIN_WAIT_TIME);
+               if (++wait_cnt > wait_cnt_limit) {
+                       DP_NOTICE(p_hwfn->p_dev, ECORE_MSG_HW,
+                                 "Timed-out waiting for operation to"
+                                 " complete. Completion word is 0x%08x"
+                                 " expected 0x%08x.\n",
+                                 *p_hwfn->dmae_info.p_completion_word,
+                                 DMAE_COMPLETION_VAL);
+                       ecore_status = ECORE_TIMEOUT;
+                       break;
+               }
+               /* to sync the completion_word since we are not
+                * using the volatile keyword for p_completion_word
+                */
+               OSAL_BARRIER(p_hwfn->p_dev);
+       }
+
+       if (ecore_status == ECORE_SUCCESS)
+               *p_hwfn->dmae_info.p_completion_word = 0;
+
+       return ecore_status;
+}
+
+static enum _ecore_status_t
+ecore_dmae_execute_sub_operation(struct ecore_hwfn *p_hwfn,
+                                struct ecore_ptt *p_ptt,
+                                u64 src_addr,
+                                u64 dst_addr,
+                                u8 src_type, u8 dst_type, u32 length)
+{
+       dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
+       struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
+       enum _ecore_status_t ecore_status = ECORE_SUCCESS;
+
+       switch (src_type) {
+       case ECORE_DMAE_ADDRESS_GRC:
+       case ECORE_DMAE_ADDRESS_HOST_PHYS:
+               cmd->src_addr_hi = DMA_HI(src_addr);
+               cmd->src_addr_lo = DMA_LO(src_addr);
+               break;
+               /* for virt source addresses we use the intermediate buffer. */
+       case ECORE_DMAE_ADDRESS_HOST_VIRT:
+               cmd->src_addr_hi = DMA_HI(phys);
+               cmd->src_addr_lo = DMA_LO(phys);
+               OSAL_MEMCPY(&p_hwfn->dmae_info.p_intermediate_buffer[0],
+                           (void *)(osal_uintptr_t)src_addr,
+                           length * sizeof(u32));
+               break;
+       default:
+               return ECORE_INVAL;
+       }
+
+       switch (dst_type) {
+       case ECORE_DMAE_ADDRESS_GRC:
+       case ECORE_DMAE_ADDRESS_HOST_PHYS:
+               cmd->dst_addr_hi = DMA_HI(dst_addr);
+               cmd->dst_addr_lo = DMA_LO(dst_addr);
+               break;
+               /* for virt destination address we use the intermediate buff. */
+       case ECORE_DMAE_ADDRESS_HOST_VIRT:
+               cmd->dst_addr_hi = DMA_HI(phys);
+               cmd->dst_addr_lo = DMA_LO(phys);
+               break;
+       default:
+               return ECORE_INVAL;
+       }
+
+       cmd->length = (u16)length;
+
+       if (src_type == ECORE_DMAE_ADDRESS_HOST_VIRT ||
+           src_type == ECORE_DMAE_ADDRESS_HOST_PHYS)
+               OSAL_DMA_SYNC(p_hwfn->p_dev,
+                             (void *)HILO_U64(cmd->src_addr_hi,
+                                              cmd->src_addr_lo),
+                             length * sizeof(u32), false);
+
+       ecore_dmae_post_command(p_hwfn, p_ptt);
+
+       ecore_status = ecore_dmae_operation_wait(p_hwfn);
+
+       /* TODO - is it true ? */
+       if (src_type == ECORE_DMAE_ADDRESS_HOST_VIRT ||
+           src_type == ECORE_DMAE_ADDRESS_HOST_PHYS)
+               OSAL_DMA_SYNC(p_hwfn->p_dev,
+                             (void *)HILO_U64(cmd->src_addr_hi,
+                                              cmd->src_addr_lo),
+                             length * sizeof(u32), true);
+
+       if (ecore_status != ECORE_SUCCESS) {
+               DP_NOTICE(p_hwfn, ECORE_MSG_HW,
+                         "ecore_dmae_host2grc: Wait Failed. source_addr"
+                         " 0x%lx, grc_addr 0x%lx, size_in_dwords 0x%x\n",
+                         (unsigned long)src_addr, (unsigned long)dst_addr,
+                         length);
+               return ecore_status;
+       }
+
+       if (dst_type == ECORE_DMAE_ADDRESS_HOST_VIRT)
+               OSAL_MEMCPY((void *)(osal_uintptr_t)(dst_addr),
+                           &p_hwfn->dmae_info.p_intermediate_buffer[0],
+                           length * sizeof(u32));
+
+       return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_dmae_execute_command(struct ecore_hwfn *p_hwfn,
+                          struct ecore_ptt *p_ptt,
+                          u64 src_addr,
+                          u64 dst_addr,
+                          u8 src_type,
+                          u8 dst_type,
+                          u32 size_in_dwords,
+                          struct ecore_dmae_params *p_params)
+{
+       dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr;
+       u16 length_cur = 0, i = 0, cnt_split = 0, length_mod = 0;
+       struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
+       enum _ecore_status_t ecore_status = ECORE_SUCCESS;
+       u64 src_addr_split = 0, dst_addr_split = 0;
+       u16 length_limit = DMAE_MAX_RW_SIZE;
+       u32 offset = 0;
+
+       ecore_dmae_opcode(p_hwfn,
+                         (src_type == ECORE_DMAE_ADDRESS_GRC),
+                         (dst_type == ECORE_DMAE_ADDRESS_GRC), p_params);
+
+       cmd->comp_addr_lo = DMA_LO(phys);
+       cmd->comp_addr_hi = DMA_HI(phys);
+       cmd->comp_val = DMAE_COMPLETION_VAL;
+
+       /* Check if the grc_addr is valid like < MAX_GRC_OFFSET */
+       cnt_split = size_in_dwords / length_limit;
+       length_mod = size_in_dwords % length_limit;
+
+       src_addr_split = src_addr;
+       dst_addr_split = dst_addr;
+
+       for (i = 0; i <= cnt_split; i++) {
+               offset = length_limit * i;
+
+               if (!(p_params->flags & ECORE_DMAE_FLAG_RW_REPL_SRC)) {
+                       if (src_type == ECORE_DMAE_ADDRESS_GRC)
+                               src_addr_split = src_addr + offset;
+                       else
+                               src_addr_split = src_addr + (offset * 4);
+               }
+
+               if (dst_type == ECORE_DMAE_ADDRESS_GRC)
+                       dst_addr_split = dst_addr + offset;
+               else
+                       dst_addr_split = dst_addr + (offset * 4);
+
+               length_cur = (cnt_split == i) ? length_mod : length_limit;
+
+               /* might be zero on last iteration */
+               if (!length_cur)
+                       continue;
+
+               ecore_status = ecore_dmae_execute_sub_operation(p_hwfn,
+                                                               p_ptt,
+                                                               src_addr_split,
+                                                               dst_addr_split,
+                                                               src_type,
+                                                               dst_type,
+                                                               length_cur);
+               if (ecore_status != ECORE_SUCCESS) {
+                       DP_NOTICE(p_hwfn, false,
+                                 "ecore_dmae_execute_sub_operation Failed"
+                                 " with error 0x%x. source_addr 0x%lx,"
+                                 " dest addr 0x%lx, size_in_dwords 0x%x\n",
+                                 ecore_status, (unsigned long)src_addr,
+                                 (unsigned long)dst_addr, length_cur);
+
+                       ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_DMAE_FAIL);
+                       break;
+               }
+       }
+
+       return ecore_status;
+}
+
+enum _ecore_status_t
+ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
+                   struct ecore_ptt *p_ptt,
+                   u64 source_addr,
+                   u32 grc_addr, u32 size_in_dwords, u32 flags)
+{
+       u32 grc_addr_in_dw = grc_addr / sizeof(u32);
+       struct ecore_dmae_params params;
+       enum _ecore_status_t rc;
+
+       OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
+       params.flags = flags;
+
+       OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
+
+       rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr,
+                                       grc_addr_in_dw,
+                                       ECORE_DMAE_ADDRESS_HOST_VIRT,
+                                       ECORE_DMAE_ADDRESS_GRC,
+                                       size_in_dwords, &params);
+
+       OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
+
+       return rc;
+}
+
+enum _ecore_status_t
+ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
+                   struct ecore_ptt *p_ptt,
+                   u32 grc_addr,
+                   dma_addr_t dest_addr, u32 size_in_dwords, u32 flags)
+{
+       u32 grc_addr_in_dw = grc_addr / sizeof(u32);
+       struct ecore_dmae_params params;
+       enum _ecore_status_t rc;
+
+       OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
+       params.flags = flags;
+
+       OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
+
+       rc = ecore_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw,
+                                       dest_addr, ECORE_DMAE_ADDRESS_GRC,
+                                       ECORE_DMAE_ADDRESS_HOST_VIRT,
+                                       size_in_dwords, &params);
+
+       OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
+
+       return rc;
+}
+
+enum _ecore_status_t
+ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
+                    struct ecore_ptt *p_ptt,
+                    dma_addr_t source_addr,
+                    dma_addr_t dest_addr,
+                    u32 size_in_dwords, struct ecore_dmae_params *p_params)
+{
+       enum _ecore_status_t rc;
+
+       OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
+
+       rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr,
+                                       dest_addr,
+                                       ECORE_DMAE_ADDRESS_HOST_PHYS,
+                                       ECORE_DMAE_ADDRESS_HOST_PHYS,
+                                       size_in_dwords, p_params);
+
+       OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
+
+       return rc;
+}
+
+u16 ecore_get_qm_pq(struct ecore_hwfn *p_hwfn,
+                   enum protocol_type proto,
+                   union ecore_qm_pq_params *p_params)
+{
+       u16 pq_id = 0;
+
+       if ((proto == PROTOCOLID_CORE ||
+            proto == PROTOCOLID_ETH) && !p_params) {
+               DP_NOTICE(p_hwfn, true,
+                         "Protocol %d received NULL PQ params\n", proto);
+               return 0;
+       }
+
+       switch (proto) {
+       case PROTOCOLID_CORE:
+               if (p_params->core.tc == LB_TC)
+                       pq_id = p_hwfn->qm_info.pure_lb_pq;
+               else if (p_params->core.tc == OOO_LB_TC)
+                       pq_id = p_hwfn->qm_info.ooo_pq;
+               else
+                       pq_id = p_hwfn->qm_info.offload_pq;
+               break;
+       case PROTOCOLID_ETH:
+               pq_id = p_params->eth.tc;
+               /* TODO - multi-CoS for VFs? */
+               if (p_params->eth.is_vf)
+                       pq_id += p_hwfn->qm_info.vf_queues_offset +
+                           p_params->eth.vf_id;
+               break;
+       default:
+               pq_id = 0;
+       }
+
+       pq_id = CM_TX_PQ_BASE + pq_id + RESC_START(p_hwfn, ECORE_PQ);
+
+       return pq_id;
+}
+
+void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn,
+                        enum ecore_hw_err_type err_type)
+{
+       /* Fan failure cannot be masked by handling of another HW error */
+       if (p_hwfn->p_dev->recov_in_prog && err_type != ECORE_HW_ERR_FAN_FAIL) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_DRV,
+                          "Recovery is in progress."
+                          "Avoid notifying about HW error %d.\n",
+                          err_type);
+               return;
+       }
+
+       OSAL_HW_ERROR_OCCURRED(p_hwfn, err_type);
+}
diff --git a/drivers/net/qede/base/ecore_hw.h b/drivers/net/qede/base/ecore_hw.h
new file mode 100644 (file)
index 0000000..8949944
--- /dev/null
@@ -0,0 +1,269 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_HW_H__
+#define __ECORE_HW_H__
+
+#include "ecore.h"
+#include "ecore_dev_api.h"
+
+/* Forward decleration */
+struct ecore_ptt;
+
+enum reserved_ptts {
+       RESERVED_PTT_EDIAG,
+       RESERVED_PTT_USER_SPACE,
+       RESERVED_PTT_MAIN,
+       RESERVED_PTT_DPC,
+       RESERVED_PTT_MAX
+};
+
+/* @@@TMP - in earlier versions of the emulation, the HW lock started from 1
+ * instead of 0, this should be fixed in later HW versions.
+ */
+#ifndef MISC_REG_DRIVER_CONTROL_0
+#define MISC_REG_DRIVER_CONTROL_0      MISC_REG_DRIVER_CONTROL_1
+#endif
+#ifndef MISC_REG_DRIVER_CONTROL_0_SIZE
+#define MISC_REG_DRIVER_CONTROL_0_SIZE MISC_REG_DRIVER_CONTROL_1_SIZE
+#endif
+
+enum _dmae_cmd_dst_mask {
+       DMAE_CMD_DST_MASK_NONE = 0,
+       DMAE_CMD_DST_MASK_PCIE = 1,
+       DMAE_CMD_DST_MASK_GRC = 2
+};
+
+enum _dmae_cmd_src_mask {
+       DMAE_CMD_SRC_MASK_PCIE = 0,
+       DMAE_CMD_SRC_MASK_GRC = 1
+};
+
+enum _dmae_cmd_crc_mask {
+       DMAE_CMD_COMP_CRC_EN_MASK_NONE = 0,
+       DMAE_CMD_COMP_CRC_EN_MASK_SET = 1
+};
+
+/* definitions for DMA constants */
+#define DMAE_GO_VALUE  0x1
+
+#ifdef __BIG_ENDIAN
+#define DMAE_COMPLETION_VAL    0xAED10000
+#define DMAE_CMD_ENDIANITY     0x3
+#else
+#define DMAE_COMPLETION_VAL    0xD1AE
+#define DMAE_CMD_ENDIANITY     0x2
+#endif
+
+#define DMAE_CMD_SIZE  14
+/* size of DMAE command structure to fill.. DMAE_CMD_SIZE-5 */
+#define DMAE_CMD_SIZE_TO_FILL  (DMAE_CMD_SIZE - 5)
+/* Minimum wait for dmae opertaion to complete 2 milliseconds */
+#define DMAE_MIN_WAIT_TIME     0x2
+#define DMAE_MAX_CLIENTS       32
+
+/**
+* @brief ecore_gtt_init - Initialize GTT windows
+*
+* @param p_hwfn
+*/
+void ecore_gtt_init(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_ptt_invalidate - Forces all ptt entries to be re-configured
+ *
+ * @param p_hwfn
+ */
+void ecore_ptt_invalidate(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_ptt_pool_alloc - Allocate and initialize PTT pool
+ *
+ * @param p_hwfn
+ *
+ * @return _ecore_status_t - success (0), negative - error.
+ */
+enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_ptt_pool_free -
+ *
+ * @param p_hwfn
+ */
+void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_ptt_get_hw_addr - Get PTT's GRC/HW address
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return u32
+ */
+u32 ecore_ptt_get_hw_addr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+
+/**
+ * @brief ecore_ptt_get_bar_addr - Get PPT's external BAR address
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return u32
+ */
+u32 ecore_ptt_get_bar_addr(struct ecore_ptt *p_ptt);
+
+/**
+ * @brief ecore_ptt_set_win - Set PTT Window's GRC BAR address
+ *
+ * @param p_hwfn
+ * @param new_hw_addr
+ * @param p_ptt
+ */
+void ecore_ptt_set_win(struct ecore_hwfn *p_hwfn,
+                      struct ecore_ptt *p_ptt, u32 new_hw_addr);
+
+/**
+ * @brief ecore_get_reserved_ptt - Get a specific reserved PTT
+ *
+ * @param p_hwfn
+ * @param ptt_idx
+ *
+ * @return struct ecore_ptt *
+ */
+struct ecore_ptt *ecore_get_reserved_ptt(struct ecore_hwfn *p_hwfn,
+                                        enum reserved_ptts ptt_idx);
+
+/**
+ * @brief ecore_wr - Write value to BAR using the given ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param val
+ * @param hw_addr
+ */
+void ecore_wr(struct ecore_hwfn *p_hwfn,
+             struct ecore_ptt *p_ptt, u32 hw_addr, u32 val);
+
+/**
+ * @brief ecore_rd - Read value from BAR using the given ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param val
+ * @param hw_addr
+ */
+u32 ecore_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 hw_addr);
+
+/**
+ * @brief ecore_memcpy_from - copy n bytes from BAR using the given
+ *        ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param dest
+ * @param hw_addr
+ * @param n
+ */
+void ecore_memcpy_from(struct ecore_hwfn *p_hwfn,
+                      struct ecore_ptt *p_ptt,
+                      void *dest, u32 hw_addr, osal_size_t n);
+
+/**
+ * @brief ecore_memcpy_to - copy n bytes to BAR using the given
+ *        ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param hw_addr
+ * @param src
+ * @param n
+ */
+void ecore_memcpy_to(struct ecore_hwfn *p_hwfn,
+                    struct ecore_ptt *p_ptt,
+                    u32 hw_addr, void *src, osal_size_t n);
+/**
+ * @brief ecore_fid_pretend - pretend to another function when
+ *        accessing the ptt window. There is no way to unpretend
+ *        a function. The only way to cancel a pretend is to
+ *        pretend back to the original function.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param fid - fid field of pxp_pretend structure. Can contain
+ *            either pf / vf, port/path fields are don't care.
+ */
+void ecore_fid_pretend(struct ecore_hwfn *p_hwfn,
+                      struct ecore_ptt *p_ptt, u16 fid);
+
+/**
+ * @brief ecore_port_pretend - pretend to another port when
+ *        accessing the ptt window
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param port_id - the port to pretend to
+ */
+void ecore_port_pretend(struct ecore_hwfn *p_hwfn,
+                       struct ecore_ptt *p_ptt, u8 port_id);
+
+/**
+ * @brief ecore_port_unpretend - cancel any previously set port
+ *        pretend
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void ecore_port_unpretend(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+
+/**
+ * @brief ecore_vfid_to_concrete - build a concrete FID for a
+ *        given VF ID
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vfid
+ */
+u32 ecore_vfid_to_concrete(struct ecore_hwfn *p_hwfn, u8 vfid);
+
+/**
+* @brief ecore_dmae_info_alloc - Init the dmae_info structure
+* which is part of p_hwfn.
+* @param p_hwfn
+*/
+enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn);
+
+/**
+* @brief ecore_dmae_info_free - Free the dmae_info structure
+* which is part of p_hwfn
+*
+* @param p_hwfn
+*/
+void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn);
+
+union ecore_qm_pq_params {
+       struct {
+               u8 tc;
+       } core;
+
+       struct {
+               u8 is_vf;
+               u8 vf_id;
+               u8 tc;
+       } eth;
+};
+
+u16 ecore_get_qm_pq(struct ecore_hwfn *p_hwfn,
+                   enum protocol_type proto, union ecore_qm_pq_params *params);
+
+enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
+                                       const u8 *fw_data);
+
+void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn,
+                        enum ecore_hw_err_type err_type);
+
+#endif /* __ECORE_HW_H__ */
diff --git a/drivers/net/qede/base/ecore_hw_defs.h b/drivers/net/qede/base/ecore_hw_defs.h
new file mode 100644 (file)
index 0000000..fa518ce
--- /dev/null
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef _ECORE_IGU_DEF_H_
+#define _ECORE_IGU_DEF_H_
+
+/* Fields of IGU PF CONFIGRATION REGISTER */
+#define IGU_PF_CONF_FUNC_EN       (0x1 << 0)   /* function enable        */
+#define IGU_PF_CONF_MSI_MSIX_EN   (0x1 << 1)   /* MSI/MSIX enable        */
+#define IGU_PF_CONF_INT_LINE_EN   (0x1 << 2)   /* INT enable             */
+#define IGU_PF_CONF_ATTN_BIT_EN   (0x1 << 3)   /* attention enable       */
+#define IGU_PF_CONF_SINGLE_ISR_EN (0x1 << 4)   /* single ISR mode enable */
+#define IGU_PF_CONF_SIMD_MODE     (0x1 << 5)   /* simd all ones mode     */
+
+/* Fields of IGU VF CONFIGRATION REGISTER */
+#define IGU_VF_CONF_FUNC_EN        (0x1 << 0)  /* function enable        */
+#define IGU_VF_CONF_MSI_MSIX_EN    (0x1 << 1)  /* MSI/MSIX enable        */
+#define IGU_VF_CONF_SINGLE_ISR_EN  (0x1 << 4)  /* single ISR mode enable */
+#define IGU_VF_CONF_PARENT_MASK    (0xF)       /* Parent PF              */
+#define IGU_VF_CONF_PARENT_SHIFT   5   /* Parent PF              */
+
+/* Igu control commands
+ */
+enum igu_ctrl_cmd {
+       IGU_CTRL_CMD_TYPE_RD,
+       IGU_CTRL_CMD_TYPE_WR,
+       MAX_IGU_CTRL_CMD
+};
+
+/* Control register for the IGU command register
+ */
+struct igu_ctrl_reg {
+       u32 ctrl_data;
+#define IGU_CTRL_REG_FID_MASK          0xFFFF  /* Opaque_FID     */
+#define IGU_CTRL_REG_FID_SHIFT         0
+#define IGU_CTRL_REG_PXP_ADDR_MASK     0xFFF   /* Command address */
+#define IGU_CTRL_REG_PXP_ADDR_SHIFT    16
+#define IGU_CTRL_REG_RESERVED_MASK     0x1
+#define IGU_CTRL_REG_RESERVED_SHIFT    28
+#define IGU_CTRL_REG_TYPE_MASK         0x1     /* use enum igu_ctrl_cmd */
+#define IGU_CTRL_REG_TYPE_SHIFT                31
+};
+
+#endif
diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.c b/drivers/net/qede/base/ecore_init_fw_funcs.c
new file mode 100644 (file)
index 0000000..5324e05
--- /dev/null
@@ -0,0 +1,1275 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+#include "ecore_hw.h"
+#include "ecore_init_ops.h"
+#include "reg_addr.h"
+#include "ecore_rt_defs.h"
+#include "ecore_hsi_common.h"
+#include "ecore_hsi_tools.h"
+#include "ecore_init_fw_funcs.h"
+
+/* @DPDK CmInterfaceEnum */
+enum cm_interface_enum {
+       MCM_SEC,
+       MCM_PRI,
+       UCM_SEC,
+       UCM_PRI,
+       TCM_SEC,
+       TCM_PRI,
+       YCM_SEC,
+       YCM_PRI,
+       XCM_SEC,
+       XCM_PRI,
+       NUM_OF_CM_INTERFACES
+};
+/* general constants */
+#define QM_PQ_MEM_4KB(pq_size) \
+(pq_size ? DIV_ROUND_UP((pq_size + 1) * QM_PQ_ELEMENT_SIZE, 0x1000) : 0)
+#define QM_PQ_SIZE_256B(pq_size) \
+(pq_size ? DIV_ROUND_UP(pq_size, 0x100) - 1 : 0)
+#define QM_INVALID_PQ_ID                       0xffff
+/* feature enable */
+#define QM_BYPASS_EN                           1
+#define QM_BYTE_CRD_EN                         1
+/* other PQ constants */
+#define QM_OTHER_PQS_PER_PF                    4
+/* WFQ constants */
+#define QM_WFQ_UPPER_BOUND                     62500000
+#define QM_WFQ_VP_PQ_VOQ_SHIFT         0
+#define QM_WFQ_VP_PQ_PF_SHIFT          5
+#define QM_WFQ_INC_VAL(weight)         ((weight) * 0x9000)
+#define QM_WFQ_MAX_INC_VAL                     43750000
+/* RL constants */
+#define QM_RL_UPPER_BOUND                      62500000
+#define QM_RL_PERIOD                           5
+#define QM_RL_PERIOD_CLK_25M           (25 * QM_RL_PERIOD)
+#define QM_RL_INC_VAL(rate) \
+OSAL_MAX_T(u32, (((rate ? rate : 1000000) * QM_RL_PERIOD * 1.01) / 8), 1)
+#define QM_RL_MAX_INC_VAL                      43750000
+/* AFullOprtnstcCrdMask constants */
+#define QM_OPPOR_LINE_VOQ_DEF          1
+#define QM_OPPOR_FW_STOP_DEF           0
+#define QM_OPPOR_PQ_EMPTY_DEF          1
+#define EAGLE_WORKAROUND_TC                    7
+/* Command Queue constants */
+#define PBF_CMDQ_PURE_LB_LINES                 150
+#define PBF_CMDQ_EAGLE_WORKAROUND_LINES                8 /* eagle workaround CmdQ */
+#define PBF_CMDQ_LINES_RT_OFFSET(voq) \
+(PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
+voq * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET \
+- PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
+#define PBF_BTB_GUARANTEED_RT_OFFSET(voq) \
+(PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \
+(PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
+#define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
+((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
+/* BTB: blocks constants (block size = 256B) */
+#define BTB_JUMBO_PKT_BLOCKS 38        /* 256B blocks in 9700B packet */
+#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS       /* headroom per-port */
+#define BTB_EAGLE_WORKAROUND_BLOCKS    4       /* eagle workaround blocks */
+#define BTB_PURE_LB_FACTOR             10
+#define BTB_PURE_LB_RATIO              7 /* factored (hence really 0.7) */
+/* QM stop command constants */
+#define QM_STOP_PQ_MASK_WIDTH                  32
+#define QM_STOP_CMD_ADDR                               0x2
+#define QM_STOP_CMD_STRUCT_SIZE                        2
+#define QM_STOP_CMD_PAUSE_MASK_OFFSET  0
+#define QM_STOP_CMD_PAUSE_MASK_SHIFT   0
+#define QM_STOP_CMD_PAUSE_MASK_MASK            -1
+#define QM_STOP_CMD_GROUP_ID_OFFSET            1
+#define QM_STOP_CMD_GROUP_ID_SHIFT             16
+#define QM_STOP_CMD_GROUP_ID_MASK              15
+#define QM_STOP_CMD_PQ_TYPE_OFFSET             1
+#define QM_STOP_CMD_PQ_TYPE_SHIFT              24
+#define QM_STOP_CMD_PQ_TYPE_MASK               1
+#define QM_STOP_CMD_MAX_POLL_COUNT             100
+#define QM_STOP_CMD_POLL_PERIOD_US             500
+/* QM command macros */
+#define QM_CMD_STRUCT_SIZE(cmd)        cmd##_STRUCT_SIZE
+#define QM_CMD_SET_FIELD(var, cmd, field, value) \
+SET_FIELD(var[cmd##_##field##_OFFSET], cmd##_##field, value)
+/* QM: VOQ macros */
+#define PHYS_VOQ(port, tc, max_phys_tcs_per_port) \
+((port) * (max_phys_tcs_per_port) + (tc))
+#define LB_VOQ(port)                           (MAX_PHYS_VOQS + (port))
+#define VOQ(port, tc, max_phys_tcs_per_port) \
+((tc) < LB_TC ? PHYS_VOQ(port, tc, max_phys_tcs_per_port) : LB_VOQ(port))
+/******************** INTERNAL IMPLEMENTATION *********************/
+/* Prepare PF RL enable/disable runtime init values */
+static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn, bool pf_rl_en)
+{
+       STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
+       if (pf_rl_en) {
+               /* enable RLs for all VOQs */
+               STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
+                            (1 << MAX_NUM_VOQS) - 1);
+               /* write RL period */
+               STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIOD_RT_OFFSET,
+                            QM_RL_PERIOD_CLK_25M);
+               STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIODTIMER_RT_OFFSET,
+                            QM_RL_PERIOD_CLK_25M);
+               /* set credit threshold for QM bypass flow */
+               if (QM_BYPASS_EN)
+                       STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
+                                    QM_RL_UPPER_BOUND);
+       }
+}
+
+/* Prepare PF WFQ enable/disable runtime init values */
+static void ecore_enable_pf_wfq(struct ecore_hwfn *p_hwfn, bool pf_wfq_en)
+{
+       STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
+       /* set credit threshold for QM bypass flow */
+       if (pf_wfq_en && QM_BYPASS_EN)
+               STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
+                            QM_WFQ_UPPER_BOUND);
+}
+
+/* Prepare VPORT RL enable/disable runtime init values */
+static void ecore_enable_vport_rl(struct ecore_hwfn *p_hwfn, bool vport_rl_en)
+{
+       STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
+                    vport_rl_en ? 1 : 0);
+       if (vport_rl_en) {
+               /* write RL period (use timer 0 only) */
+               STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
+                            QM_RL_PERIOD_CLK_25M);
+               STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
+                            QM_RL_PERIOD_CLK_25M);
+               /* set credit threshold for QM bypass flow */
+               if (QM_BYPASS_EN)
+                       STORE_RT_REG(p_hwfn,
+                                    QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
+                                    QM_RL_UPPER_BOUND);
+       }
+}
+
+/* Prepare VPORT WFQ enable/disable runtime init values */
+static void ecore_enable_vport_wfq(struct ecore_hwfn *p_hwfn, bool vport_wfq_en)
+{
+       STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
+                    vport_wfq_en ? 1 : 0);
+       /* set credit threshold for QM bypass flow */
+       if (vport_wfq_en && QM_BYPASS_EN)
+               STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
+                            QM_WFQ_UPPER_BOUND);
+}
+
+/* Prepare runtime init values to allocate PBF command queue lines for
+ * the specified VOQ
+ */
+static void ecore_cmdq_lines_voq_rt_init(struct ecore_hwfn *p_hwfn,
+                                        u8 voq, u16 cmdq_lines)
+{
+       u32 qm_line_crd;
+       bool is_bb_a0 = ECORE_IS_BB_A0(p_hwfn->p_dev);
+       if (is_bb_a0)
+               cmdq_lines = OSAL_MIN_T(u32, cmdq_lines, 1022);
+       qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
+       OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
+                        (u32)cmdq_lines);
+       STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd);
+       STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq,
+                    qm_line_crd);
+}
+
+/* Prepare runtime init values to allocate PBF command queue lines. */
+static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn,
+                                    u8 max_ports_per_engine,
+                                    u8 max_phys_tcs_per_port,
+                                    struct init_qm_port_params
+                                    port_params[MAX_NUM_PORTS])
+{
+       u8 tc, voq, port_id;
+       bool eagle_workaround = ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn);
+       /* clear PBF lines for all VOQs */
+       for (voq = 0; voq < MAX_NUM_VOQS; voq++)
+               STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
+       for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
+               if (port_params[port_id].active) {
+                       u16 phys_lines, phys_lines_per_tc;
+                       phys_lines =
+                           port_params[port_id].num_pbf_cmd_lines -
+                           PBF_CMDQ_PURE_LB_LINES;
+                       if (eagle_workaround)
+                               phys_lines -= PBF_CMDQ_EAGLE_WORKAROUND_LINES;
+                       /* find #lines per active physical TC */
+                       phys_lines_per_tc =
+                           phys_lines /
+                           port_params[port_id].num_active_phys_tcs;
+                       /* init registers per active TC */
+                       for (tc = 0;
+                            tc < port_params[port_id].num_active_phys_tcs;
+                            tc++) {
+                               voq =
+                                   PHYS_VOQ(port_id, tc,
+                                            max_phys_tcs_per_port);
+                               ecore_cmdq_lines_voq_rt_init(p_hwfn, voq,
+                                                            phys_lines_per_tc);
+                       }
+                       /* init registers for pure LB TC */
+                       ecore_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
+                                                    PBF_CMDQ_PURE_LB_LINES);
+                       /* init registers for eagle workaround */
+                       if (eagle_workaround) {
+                               voq =
+                                   PHYS_VOQ(port_id, EAGLE_WORKAROUND_TC,
+                                            max_phys_tcs_per_port);
+                               ecore_cmdq_lines_voq_rt_init(p_hwfn, voq,
+                                            PBF_CMDQ_EAGLE_WORKAROUND_LINES);
+                       }
+               }
+       }
+}
+
+/*
+ * Prepare runtime init values to allocate guaranteed BTB blocks for the
+ * specified port. The guaranteed BTB space is divided between the TCs as
+ * follows (shared space Is currently not used):
+ * 1. Parameters:
+ *     B BTB blocks for this port
+ *     C Number of physical TCs for this port
+ * 2. Calculation:
+ *     a. 38 blocks (9700B jumbo frame) are allocated for global per port
+ *        headroom
+ *     b. B = B 38 (remainder after global headroom allocation)
+ *     c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ.
+ *     d. B = B MAX(38, B/(C+0.7)) (remainder after pure LB allocation).
+ *     e. B/C blocks are allocated for each physical TC.
+ * Assumptions:
+ * - MTU is up to 9700 bytes (38 blocks)
+ * - All TCs are considered symmetrical (same rate and packet size)
+ * - No optimization for lossy TC (all are considered lossless). Shared space is
+ *   not enabled and allocated for each TC.
+ */
+static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn,
+                                    u8 max_ports_per_engine,
+                                    u8 max_phys_tcs_per_port,
+                                    struct init_qm_port_params
+                                    port_params[MAX_NUM_PORTS])
+{
+       u8 tc, voq, port_id;
+       u32 usable_blocks, pure_lb_blocks, phys_blocks;
+       bool eagle_workaround = ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn);
+       for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
+               if (port_params[port_id].active) {
+                       /* subtract headroom blocks */
+                       usable_blocks =
+                           port_params[port_id].num_btb_blocks -
+                           BTB_HEADROOM_BLOCKS;
+                       if (eagle_workaround)
+                               usable_blocks -= BTB_EAGLE_WORKAROUND_BLOCKS;
+                       pure_lb_blocks =
+                           (usable_blocks * BTB_PURE_LB_FACTOR) /
+                           (port_params[port_id].num_active_phys_tcs *
+                            BTB_PURE_LB_FACTOR + BTB_PURE_LB_RATIO);
+                       pure_lb_blocks =
+                           OSAL_MAX_T(u32, BTB_JUMBO_PKT_BLOCKS,
+                                      pure_lb_blocks / BTB_PURE_LB_FACTOR);
+                       phys_blocks =
+                           (usable_blocks -
+                            pure_lb_blocks) /
+                           port_params[port_id].num_active_phys_tcs;
+                       /* init physical TCs */
+                       for (tc = 0;
+                            tc < port_params[port_id].num_active_phys_tcs;
+                            tc++) {
+                               voq =
+                                   PHYS_VOQ(port_id, tc,
+                                            max_phys_tcs_per_port);
+                               STORE_RT_REG(p_hwfn,
+                                            PBF_BTB_GUARANTEED_RT_OFFSET(voq),
+                                            phys_blocks);
+                       }
+                       /* init pure LB TC */
+                       STORE_RT_REG(p_hwfn,
+                                    PBF_BTB_GUARANTEED_RT_OFFSET(LB_VOQ
+                                                                 (port_id)),
+                                    pure_lb_blocks);
+                       /* init eagle workaround */
+                       if (eagle_workaround) {
+                               voq =
+                                   PHYS_VOQ(port_id, EAGLE_WORKAROUND_TC,
+                                            max_phys_tcs_per_port);
+                               STORE_RT_REG(p_hwfn,
+                                            PBF_BTB_GUARANTEED_RT_OFFSET(voq),
+                                            BTB_EAGLE_WORKAROUND_BLOCKS);
+                       }
+               }
+       }
+}
+
+/* Prepare Tx PQ mapping runtime init values for the specified PF */
+static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
+                                   struct ecore_ptt *p_ptt,
+                                   u8 port_id,
+                                   u8 pf_id,
+                                   u8 max_phys_tcs_per_port,
+                                   bool is_first_pf,
+                                   u32 num_pf_cids,
+                                   u32 num_vf_cids,
+                                   u16 start_pq,
+                                   u16 num_pf_pqs,
+                                   u16 num_vf_pqs,
+                                   u8 start_vport,
+                                   u32 base_mem_addr_4kb,
+                                   struct init_qm_pq_params *pq_params,
+                                   struct init_qm_vport_params *vport_params)
+{
+       u16 i, pq_id, pq_group;
+       u16 num_pqs = num_pf_pqs + num_vf_pqs;
+       u16 first_pq_group = start_pq / QM_PF_QUEUE_GROUP_SIZE;
+       u16 last_pq_group = (start_pq + num_pqs - 1) / QM_PF_QUEUE_GROUP_SIZE;
+       bool is_bb_a0 = ECORE_IS_BB_A0(p_hwfn->p_dev);
+       /* a bit per Tx PQ indicating if the PQ is associated with a VF */
+       u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
+       u32 tx_pq_vf_mask_width = is_bb_a0 ? 32 : QM_PF_QUEUE_GROUP_SIZE;
+       u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / tx_pq_vf_mask_width;
+       u32 pq_mem_4kb = QM_PQ_MEM_4KB(num_pf_cids);
+       u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(num_vf_cids);
+       u32 mem_addr_4kb = base_mem_addr_4kb;
+       /* set mapping from PQ group to PF */
+       for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
+               STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
+                            (u32)(pf_id));
+       /* set PQ sizes */
+       STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
+                    QM_PQ_SIZE_256B(num_pf_cids));
+       STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
+                    QM_PQ_SIZE_256B(num_vf_cids));
+       /* go over all Tx PQs */
+       for (i = 0, pq_id = start_pq; i < num_pqs; i++, pq_id++) {
+               struct qm_rf_pq_map tx_pq_map;
+               u8 voq =
+                   VOQ(port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
+               bool is_vf_pq = (i >= num_pf_pqs);
+               /* update first Tx PQ of VPORT/TC */
+               u8 vport_id_in_pf = pq_params[i].vport_id - start_vport;
+               u16 first_tx_pq_id =
+                   vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].
+                                                               tc_id];
+               if (first_tx_pq_id == QM_INVALID_PQ_ID) {
+                       /* create new VP PQ */
+                       vport_params[vport_id_in_pf].
+                           first_tx_pq_id[pq_params[i].tc_id] = pq_id;
+                       first_tx_pq_id = pq_id;
+                       /* map VP PQ to VOQ and PF */
+                       STORE_RT_REG(p_hwfn,
+                                    QM_REG_WFQVPMAP_RT_OFFSET + first_tx_pq_id,
+                                    (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) | (pf_id <<
+                                                       QM_WFQ_VP_PQ_PF_SHIFT));
+               }
+               /* fill PQ map entry */
+               OSAL_MEMSET(&tx_pq_map, 0, sizeof(tx_pq_map));
+               SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
+               SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID,
+                         is_vf_pq ? 1 : 0);
+               SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
+               SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
+                         is_vf_pq ? pq_params[i].vport_id : 0);
+               SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
+               SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
+                         pq_params[i].wrr_group);
+               /* write PQ map entry to CAM */
+               STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id,
+                            *((u32 *)&tx_pq_map));
+               /* set base address */
+               STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
+                            mem_addr_4kb);
+               /* check if VF PQ */
+               if (is_vf_pq) {
+                       tx_pq_vf_mask[pq_id / tx_pq_vf_mask_width] |=
+                           (1 << (pq_id % tx_pq_vf_mask_width));
+                       mem_addr_4kb += vport_pq_mem_4kb;
+               } else {
+                       mem_addr_4kb += pq_mem_4kb;
+               }
+       }
+       /* store Tx PQ VF mask to size select register */
+       for (i = 0; i < num_tx_pq_vf_masks; i++) {
+               if (tx_pq_vf_mask[i]) {
+                       if (is_bb_a0) {
+                               u32 curr_mask =
+                                   is_first_pf ? 0 : ecore_rd(p_hwfn, p_ptt,
+                                                      QM_REG_MAXPQSIZETXSEL_0
+                                                              + i * 4);
+                               STORE_RT_REG(p_hwfn,
+                                            QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET +
+                                            i, curr_mask | tx_pq_vf_mask[i]);
+                       } else
+                               STORE_RT_REG(p_hwfn,
+                                            QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET +
+                                            i, tx_pq_vf_mask[i]);
+               }
+       }
+}
+
+/* Prepare Other PQ mapping runtime init values for the specified PF */
+static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
+                                      u8 port_id,
+                                      u8 pf_id,
+                                      u32 num_pf_cids,
+                                      u32 num_tids, u32 base_mem_addr_4kb)
+{
+       u16 i, pq_id;
+       u16 pq_group = pf_id;
+       u32 pq_size = num_pf_cids + num_tids;
+       u32 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
+       u32 mem_addr_4kb = base_mem_addr_4kb;
+       /* map PQ group to PF */
+       STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
+                    (u32)(pf_id));
+       /* set PQ sizes */
+       STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
+                    QM_PQ_SIZE_256B(pq_size));
+       /* set base address */
+       for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
+            i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
+               STORE_RT_REG(p_hwfn, QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
+                            mem_addr_4kb);
+               mem_addr_4kb += pq_mem_4kb;
+       }
+}
+
+static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
+                               u8 port_id,
+                               u8 pf_id,
+                               u16 pf_wfq,
+                               u8 max_phys_tcs_per_port,
+                               u16 num_tx_pqs,
+                               struct init_qm_pq_params *pq_params)
+{
+       u16 i;
+       u32 inc_val;
+       u32 crd_reg_offset =
+           (pf_id <
+            MAX_NUM_PFS_BB ? QM_REG_WFQPFCRD_RT_OFFSET :
+            QM_REG_WFQPFCRD_MSB_RT_OFFSET) + (pf_id % MAX_NUM_PFS_BB);
+       inc_val = QM_WFQ_INC_VAL(pf_wfq);
+       if (inc_val == 0 || inc_val > QM_WFQ_MAX_INC_VAL) {
+               DP_NOTICE(p_hwfn, true, "Invalid PF WFQ weight configuration");
+               return -1;
+       }
+       for (i = 0; i < num_tx_pqs; i++) {
+               u8 voq =
+                   VOQ(port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
+               OVERWRITE_RT_REG(p_hwfn, crd_reg_offset + voq * MAX_NUM_PFS_BB,
+                                QM_WFQ_CRD_REG_SIGN_BIT);
+       }
+       STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + pf_id,
+                    QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
+       STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val);
+       return 0;
+}
+
+/* Prepare PF RL runtime init values for the specified PF. Return -1 on err */
+static int ecore_pf_rl_rt_init(struct ecore_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
+{
+       u32 inc_val = QM_RL_INC_VAL(pf_rl);
+       if (inc_val > QM_RL_MAX_INC_VAL) {
+               DP_NOTICE(p_hwfn, true, "Invalid PF rate limit configuration");
+               return -1;
+       }
+       STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
+                    QM_RL_CRD_REG_SIGN_BIT);
+       STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
+                    QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
+       STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
+       return 0;
+}
+
+static int ecore_vp_wfq_rt_init(struct ecore_hwfn *p_hwfn,
+                               u8 num_vports,
+                               struct init_qm_vport_params *vport_params)
+{
+       u8 tc, i;
+       u32 inc_val;
+       /* go over all PF VPORTs */
+       for (i = 0; i < num_vports; i++) {
+               if (vport_params[i].vport_wfq) {
+                       inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
+                       if (inc_val > QM_WFQ_MAX_INC_VAL) {
+                               DP_NOTICE(p_hwfn, true,
+                                         "Invalid VPORT WFQ weight config");
+                               return -1;
+                       }
+                       for (tc = 0; tc < NUM_OF_TCS; tc++) {
+                               u16 vport_pq_id =
+                                   vport_params[i].first_tx_pq_id[tc];
+                               if (vport_pq_id != QM_INVALID_PQ_ID) {
+                                       STORE_RT_REG(p_hwfn,
+                                                    QM_REG_WFQVPCRD_RT_OFFSET +
+                                                    vport_pq_id,
+                                                    QM_WFQ_CRD_REG_SIGN_BIT);
+                                       STORE_RT_REG(p_hwfn,
+                                               QM_REG_WFQVPWEIGHT_RT_OFFSET
+                                                    + vport_pq_id, inc_val);
+                               }
+                       }
+               }
+       }
+       return 0;
+}
+
+/* Prepare VPORT RL runtime init values for specified VPORT. Ret -1 on error. */
+static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn,
+                                 u8 start_vport,
+                                 u8 num_vports,
+                                 struct init_qm_vport_params *vport_params)
+{
+       u8 i, vport_id;
+       /* go over all PF VPORTs */
+       for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
+               u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
+               if (inc_val > QM_RL_MAX_INC_VAL) {
+                       DP_NOTICE(p_hwfn, true,
+                                 "Invalid VPORT rate-limit configuration");
+                       return -1;
+               }
+               STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
+                            QM_RL_CRD_REG_SIGN_BIT);
+               STORE_RT_REG(p_hwfn,
+                            QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
+                            QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
+               STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
+                            inc_val);
+       }
+       return 0;
+}
+
+static bool ecore_poll_on_qm_cmd_ready(struct ecore_hwfn *p_hwfn,
+                                      struct ecore_ptt *p_ptt)
+{
+       u32 reg_val, i;
+       for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && reg_val == 0;
+            i++) {
+               OSAL_UDELAY(QM_STOP_CMD_POLL_PERIOD_US);
+               reg_val = ecore_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
+       }
+       /* check if timeout while waiting for SDM command ready */
+       if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+                          "Timeout waiting for QM SDM cmd ready signal\n");
+               return false;
+       }
+       return true;
+}
+
+static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn,
+                             struct ecore_ptt *p_ptt,
+                             u32 cmd_addr, u32 cmd_data_lsb, u32 cmd_data_msb)
+{
+       if (!ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
+               return false;
+       ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
+       ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
+       ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
+       ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
+       ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
+       return ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
+}
+
+/******************** INTERFACE IMPLEMENTATION *********************/
+u32 ecore_qm_pf_mem_size(u8 pf_id,
+                        u32 num_pf_cids,
+                        u32 num_vf_cids,
+                        u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs)
+{
+       return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
+           QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
+           QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
+}
+
+int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
+                           u8 max_ports_per_engine,
+                           u8 max_phys_tcs_per_port,
+                           bool pf_rl_en,
+                           bool pf_wfq_en,
+                           bool vport_rl_en,
+                           bool vport_wfq_en,
+                           struct init_qm_port_params
+                           port_params[MAX_NUM_PORTS])
+{
+       u8 port_id;
+       /* init AFullOprtnstcCrdMask */
+       u32 mask =
+           (QM_OPPOR_LINE_VOQ_DEF << QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
+           (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
+           (pf_wfq_en << QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
+           (vport_wfq_en << QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
+           (pf_rl_en << QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
+           (vport_rl_en << QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
+           (QM_OPPOR_FW_STOP_DEF << QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
+           (QM_OPPOR_PQ_EMPTY_DEF <<
+            QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
+       STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
+       /* check eagle workaround */
+       for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
+               if (port_params[port_id].active &&
+                   port_params[port_id].num_active_phys_tcs >
+                   EAGLE_WORKAROUND_TC &&
+                   ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn)) {
+                       DP_NOTICE(p_hwfn, true,
+                                 "Can't config 8 TCs with Eagle"
+                                 " eng1 workaround");
+                       return -1;
+               }
+       }
+       /* enable/disable PF RL */
+       ecore_enable_pf_rl(p_hwfn, pf_rl_en);
+       /* enable/disable PF WFQ */
+       ecore_enable_pf_wfq(p_hwfn, pf_wfq_en);
+       /* enable/disable VPORT RL */
+       ecore_enable_vport_rl(p_hwfn, vport_rl_en);
+       /* enable/disable VPORT WFQ */
+       ecore_enable_vport_wfq(p_hwfn, vport_wfq_en);
+       /* init PBF CMDQ line credit */
+       ecore_cmdq_lines_rt_init(p_hwfn, max_ports_per_engine,
+                                max_phys_tcs_per_port, port_params);
+       /* init BTB blocks in PBF */
+       ecore_btb_blocks_rt_init(p_hwfn, max_ports_per_engine,
+                                max_phys_tcs_per_port, port_params);
+       return 0;
+}
+
+int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
+                       struct ecore_ptt *p_ptt,
+                       u8 port_id,
+                       u8 pf_id,
+                       u8 max_phys_tcs_per_port,
+                       bool is_first_pf,
+                       u32 num_pf_cids,
+                       u32 num_vf_cids,
+                       u32 num_tids,
+                       u16 start_pq,
+                       u16 num_pf_pqs,
+                       u16 num_vf_pqs,
+                       u8 start_vport,
+                       u8 num_vports,
+                       u16 pf_wfq,
+                       u32 pf_rl,
+                       struct init_qm_pq_params *pq_params,
+                       struct init_qm_vport_params *vport_params)
+{
+       u8 tc, i;
+       u32 other_mem_size_4kb =
+           QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
+       /* clear first Tx PQ ID array for each VPORT */
+       for (i = 0; i < num_vports; i++)
+               for (tc = 0; tc < NUM_OF_TCS; tc++)
+                       vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
+       /* map Other PQs (if any) */
+#if QM_OTHER_PQS_PER_PF > 0
+       ecore_other_pq_map_rt_init(p_hwfn, port_id, pf_id, num_pf_cids,
+                                  num_tids, 0);
+#endif
+       /* map Tx PQs */
+       ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, port_id, pf_id,
+                               max_phys_tcs_per_port, is_first_pf, num_pf_cids,
+                               num_vf_cids, start_pq, num_pf_pqs, num_vf_pqs,
+                               start_vport, other_mem_size_4kb, pq_params,
+                               vport_params);
+       /* init PF WFQ */
+       if (pf_wfq)
+               if (ecore_pf_wfq_rt_init
+                   (p_hwfn, port_id, pf_id, pf_wfq, max_phys_tcs_per_port,
+                    num_pf_pqs + num_vf_pqs, pq_params) != 0)
+                       return -1;
+       /* init PF RL */
+       if (ecore_pf_rl_rt_init(p_hwfn, pf_id, pf_rl) != 0)
+               return -1;
+       /* set VPORT WFQ */
+       if (ecore_vp_wfq_rt_init(p_hwfn, num_vports, vport_params) != 0)
+               return -1;
+       /* set VPORT RL */
+       if (ecore_vport_rl_rt_init
+           (p_hwfn, start_vport, num_vports, vport_params) != 0)
+               return -1;
+       return 0;
+}
+
+int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn,
+                     struct ecore_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
+{
+       u32 inc_val = QM_WFQ_INC_VAL(pf_wfq);
+       if (inc_val == 0 || inc_val > QM_WFQ_MAX_INC_VAL) {
+               DP_NOTICE(p_hwfn, true, "Invalid PF WFQ weight configuration");
+               return -1;
+       }
+       ecore_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
+       return 0;
+}
+
+int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
+                    struct ecore_ptt *p_ptt, u8 pf_id, u32 pf_rl)
+{
+       u32 inc_val = QM_RL_INC_VAL(pf_rl);
+       if (inc_val > QM_RL_MAX_INC_VAL) {
+               DP_NOTICE(p_hwfn, true, "Invalid PF rate limit configuration");
+               return -1;
+       }
+       ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFCRD + pf_id * 4,
+                QM_RL_CRD_REG_SIGN_BIT);
+       ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
+       return 0;
+}
+
+int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
+                        struct ecore_ptt *p_ptt,
+                        u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)
+{
+       u8 tc;
+       u32 inc_val = QM_WFQ_INC_VAL(vport_wfq);
+       if (inc_val == 0 || inc_val > QM_WFQ_MAX_INC_VAL) {
+               DP_NOTICE(p_hwfn, true,
+                         "Invalid VPORT WFQ weight configuration");
+               return -1;
+       }
+       for (tc = 0; tc < NUM_OF_TCS; tc++) {
+               u16 vport_pq_id = first_tx_pq_id[tc];
+               if (vport_pq_id != QM_INVALID_PQ_ID) {
+                       ecore_wr(p_hwfn, p_ptt,
+                                QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val);
+               }
+       }
+       return 0;
+}
+
+int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
+                       struct ecore_ptt *p_ptt, u8 vport_id, u32 vport_rl)
+{
+       u32 inc_val = QM_RL_INC_VAL(vport_rl);
+       if (inc_val > QM_RL_MAX_INC_VAL) {
+               DP_NOTICE(p_hwfn, true,
+                         "Invalid VPORT rate-limit configuration");
+               return -1;
+       }
+       ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLCRD + vport_id * 4,
+                QM_RL_CRD_REG_SIGN_BIT);
+       ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
+       return 0;
+}
+
+bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
+                           struct ecore_ptt *p_ptt,
+                           bool is_release_cmd,
+                           bool is_tx_pq, u16 start_pq, u16 num_pqs)
+{
+       u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
+       u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id;
+       /* set command's PQ type */
+       QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
+       /* go over requested PQs */
+       for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
+               /* set PQ bit in mask (stop command only) */
+               if (!is_release_cmd)
+                       pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
+               /* if last PQ or end of PQ mask, write command */
+               if ((pq_id == last_pq) ||
+                   (pq_id % QM_STOP_PQ_MASK_WIDTH ==
+                   (QM_STOP_PQ_MASK_WIDTH - 1))) {
+                       QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PAUSE_MASK,
+                                        pq_mask);
+                       QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, GROUP_ID,
+                                        pq_id / QM_STOP_PQ_MASK_WIDTH);
+                       if (!ecore_send_qm_cmd
+                           (p_hwfn, p_ptt, QM_STOP_CMD_ADDR, cmd_arr[0],
+                            cmd_arr[1]))
+                               return false;
+                       pq_mask = 0;
+               }
+       }
+       return true;
+}
+
+/* NIG: ETS configuration constants */
+#define NIG_TX_ETS_CLIENT_OFFSET       4
+#define NIG_LB_ETS_CLIENT_OFFSET       1
+#define NIG_ETS_MIN_WFQ_BYTES          1600
+/* NIG: ETS constants */
+#define NIG_ETS_UP_BOUND(weight, mtu) \
+(2 * ((weight) > (mtu) ? (weight) : (mtu)))
+/* NIG: RL constants */
+#define NIG_RL_BASE_TYPE                       1       /* byte base type */
+#define NIG_RL_PERIOD                          1       /* in us */
+#define NIG_RL_PERIOD_CLK_25M          (25 * NIG_RL_PERIOD)
+#define NIG_RL_INC_VAL(rate)           (((rate) * NIG_RL_PERIOD) / 8)
+#define NIG_RL_MAX_VAL(inc_val, mtu) \
+(2 * ((inc_val) > (mtu) ? (inc_val) : (mtu)))
+/* NIG: packet prioritry configuration constants */
+#define NIG_PRIORITY_MAP_TC_BITS 4
+void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
+                       struct ecore_ptt *p_ptt,
+                       struct init_ets_req *req, bool is_lb)
+{
+       u8 tc, sp_tc_map = 0, wfq_tc_map = 0;
+       u8 num_tc = is_lb ? NUM_OF_TCS : NUM_OF_PHYS_TCS;
+       u8 tc_client_offset =
+           is_lb ? NIG_LB_ETS_CLIENT_OFFSET : NIG_TX_ETS_CLIENT_OFFSET;
+       u32 min_weight = 0xffffffff;
+       u32 tc_weight_base_addr =
+           is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
+           NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
+       u32 tc_weight_addr_diff =
+           is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_1 -
+           NIG_REG_LB_ARB_CREDIT_WEIGHT_0 : NIG_REG_TX_ARB_CREDIT_WEIGHT_1 -
+           NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
+       u32 tc_bound_base_addr =
+           is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
+           NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
+       u32 tc_bound_addr_diff =
+           is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_1 -
+           NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
+           NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_1 -
+           NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
+       for (tc = 0; tc < num_tc; tc++) {
+               struct init_ets_tc_req *tc_req = &req->tc_req[tc];
+               /* update SP map */
+               if (tc_req->use_sp)
+                       sp_tc_map |= (1 << tc);
+               if (tc_req->use_wfq) {
+                       /* update WFQ map */
+                       wfq_tc_map |= (1 << tc);
+                       /* find minimal weight */
+                       if (tc_req->weight < min_weight)
+                               min_weight = tc_req->weight;
+               }
+       }
+       /* write SP map */
+       ecore_wr(p_hwfn, p_ptt,
+                is_lb ? NIG_REG_LB_ARB_CLIENT_IS_STRICT :
+                NIG_REG_TX_ARB_CLIENT_IS_STRICT,
+                (sp_tc_map << tc_client_offset));
+       /* write WFQ map */
+       ecore_wr(p_hwfn, p_ptt,
+                is_lb ? NIG_REG_LB_ARB_CLIENT_IS_SUBJECT2WFQ :
+                NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ,
+                (wfq_tc_map << tc_client_offset));
+       /* write WFQ weights */
+       for (tc = 0; tc < num_tc; tc++, tc_client_offset++) {
+               struct init_ets_tc_req *tc_req = &req->tc_req[tc];
+               if (tc_req->use_wfq) {
+                       /* translate weight to bytes */
+                       u32 byte_weight =
+                           (NIG_ETS_MIN_WFQ_BYTES * tc_req->weight) /
+                           min_weight;
+                       /* write WFQ weight */
+                       ecore_wr(p_hwfn, p_ptt,
+                                tc_weight_base_addr +
+                                tc_weight_addr_diff * tc_client_offset,
+                                byte_weight);
+                       /* write WFQ upper bound */
+                       ecore_wr(p_hwfn, p_ptt,
+                                tc_bound_base_addr +
+                                tc_bound_addr_diff * tc_client_offset,
+                                NIG_ETS_UP_BOUND(byte_weight, req->mtu));
+               }
+       }
+}
+
+void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
+                         struct ecore_ptt *p_ptt,
+                         struct init_nig_lb_rl_req *req)
+{
+       u8 tc;
+       u32 ctrl, inc_val, reg_offset;
+       /* disable global MAC+LB RL */
+       ctrl =
+           NIG_RL_BASE_TYPE <<
+           NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_BASE_TYPE_SHIFT;
+       ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
+       /* configure and enable global MAC+LB RL */
+       if (req->lb_mac_rate) {
+               /* configure  */
+               ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_PERIOD,
+                        NIG_RL_PERIOD_CLK_25M);
+               inc_val = NIG_RL_INC_VAL(req->lb_mac_rate);
+               ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_VALUE,
+                        inc_val);
+               ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_MAX_VALUE,
+                        NIG_RL_MAX_VAL(inc_val, req->mtu));
+               /* enable */
+               ctrl |=
+                   1 <<
+                   NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_EN_SHIFT;
+               ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
+       }
+       /* disable global LB-only RL */
+       ctrl =
+           NIG_RL_BASE_TYPE <<
+           NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_BASE_TYPE_SHIFT;
+       ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
+       /* configure and enable global LB-only RL */
+       if (req->lb_rate) {
+               /* configure  */
+               ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_PERIOD,
+                        NIG_RL_PERIOD_CLK_25M);
+               inc_val = NIG_RL_INC_VAL(req->lb_rate);
+               ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_VALUE,
+                        inc_val);
+               ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_MAX_VALUE,
+                        NIG_RL_MAX_VAL(inc_val, req->mtu));
+               /* enable */
+               ctrl |=
+                   1 << NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_EN_SHIFT;
+               ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
+       }
+       /* per-TC RLs */
+       for (tc = 0, reg_offset = 0; tc < NUM_OF_PHYS_TCS;
+            tc++, reg_offset += 4) {
+               /* disable TC RL */
+               ctrl =
+                   NIG_RL_BASE_TYPE <<
+               NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_BASE_TYPE_0_SHIFT;
+               ecore_wr(p_hwfn, p_ptt,
+                        NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset, ctrl);
+               /* configure and enable TC RL */
+               if (req->tc_rate[tc]) {
+                       /* configure */
+                       ecore_wr(p_hwfn, p_ptt,
+                                NIG_REG_LB_TCRATELIMIT_INC_PERIOD_0 +
+                                reg_offset, NIG_RL_PERIOD_CLK_25M);
+                       inc_val = NIG_RL_INC_VAL(req->tc_rate[tc]);
+                       ecore_wr(p_hwfn, p_ptt,
+                                NIG_REG_LB_TCRATELIMIT_INC_VALUE_0 +
+                                reg_offset, inc_val);
+                       ecore_wr(p_hwfn, p_ptt,
+                                NIG_REG_LB_TCRATELIMIT_MAX_VALUE_0 +
+                                reg_offset, NIG_RL_MAX_VAL(inc_val, req->mtu));
+                       /* enable */
+                       ctrl |=
+                           1 <<
+               NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT;
+                       ecore_wr(p_hwfn, p_ptt,
+                                NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset,
+                                ctrl);
+               }
+       }
+}
+
+void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
+                              struct ecore_ptt *p_ptt,
+                              struct init_nig_pri_tc_map_req *req)
+{
+       u8 pri, tc;
+       u32 pri_tc_mask = 0;
+       u8 tc_pri_mask[NUM_OF_PHYS_TCS] = { 0 };
+       for (pri = 0; pri < NUM_OF_VLAN_PRIORITIES; pri++) {
+               if (req->pri[pri].valid) {
+                       pri_tc_mask |=
+                           (req->pri[pri].
+                            tc_id << (pri * NIG_PRIORITY_MAP_TC_BITS));
+                       tc_pri_mask[req->pri[pri].tc_id] |= (1 << pri);
+               }
+       }
+       /* write priority -> TC mask */
+       ecore_wr(p_hwfn, p_ptt, NIG_REG_PKT_PRIORITY_TO_TC, pri_tc_mask);
+       /* write TC -> priority mask */
+       for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
+               ecore_wr(p_hwfn, p_ptt, NIG_REG_PRIORITY_FOR_TC_0 + tc * 4,
+                        tc_pri_mask[tc]);
+               ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_TC0_PRIORITY_MASK + tc * 4,
+                        tc_pri_mask[tc]);
+       }
+}
+
+/* PRS: ETS configuration constants */
+#define PRS_ETS_MIN_WFQ_BYTES                  1600
+#define PRS_ETS_UP_BOUND(weight, mtu) \
+(2 * ((weight) > (mtu) ? (weight) : (mtu)))
+void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
+                       struct ecore_ptt *p_ptt, struct init_ets_req *req)
+{
+       u8 tc, sp_tc_map = 0, wfq_tc_map = 0;
+       u32 min_weight = 0xffffffff;
+       u32 tc_weight_addr_diff =
+           PRS_REG_ETS_ARB_CREDIT_WEIGHT_1 - PRS_REG_ETS_ARB_CREDIT_WEIGHT_0;
+       u32 tc_bound_addr_diff =
+           PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_1 -
+           PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0;
+       for (tc = 0; tc < NUM_OF_TCS; tc++) {
+               struct init_ets_tc_req *tc_req = &req->tc_req[tc];
+               /* update SP map */
+               if (tc_req->use_sp)
+                       sp_tc_map |= (1 << tc);
+               if (tc_req->use_wfq) {
+                       /* update WFQ map */
+                       wfq_tc_map |= (1 << tc);
+                       /* find minimal weight */
+                       if (tc_req->weight < min_weight)
+                               min_weight = tc_req->weight;
+               }
+       }
+       /* write SP map */
+       ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_STRICT, sp_tc_map);
+       /* write WFQ map */
+       ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ,
+                wfq_tc_map);
+       /* write WFQ weights */
+       for (tc = 0; tc < NUM_OF_TCS; tc++) {
+               struct init_ets_tc_req *tc_req = &req->tc_req[tc];
+               if (tc_req->use_wfq) {
+                       /* translate weight to bytes */
+                       u32 byte_weight =
+                           (PRS_ETS_MIN_WFQ_BYTES * tc_req->weight) /
+                           min_weight;
+                       /* write WFQ weight */
+                       ecore_wr(p_hwfn, p_ptt,
+                                PRS_REG_ETS_ARB_CREDIT_WEIGHT_0 +
+                                tc * tc_weight_addr_diff, byte_weight);
+                       /* write WFQ upper bound */
+                       ecore_wr(p_hwfn, p_ptt,
+                                PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0 +
+                                tc * tc_bound_addr_diff,
+                                PRS_ETS_UP_BOUND(byte_weight, req->mtu));
+               }
+       }
+}
+
+/* BRB: RAM configuration constants */
+#define BRB_TOTAL_RAM_BLOCKS_BB        4800
+#define BRB_TOTAL_RAM_BLOCKS_K2        5632
+#define BRB_BLOCK_SIZE                 128     /* in bytes */
+#define BRB_MIN_BLOCKS_PER_TC  9
+#define BRB_HYST_BYTES                 10240
+#define BRB_HYST_BLOCKS                        (BRB_HYST_BYTES / BRB_BLOCK_SIZE)
+/*
+ * temporary big RAM allocation - should be updated
+ */
+void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
+                       struct ecore_ptt *p_ptt, struct init_brb_ram_req *req)
+{
+       u8 port, active_ports = 0;
+       u32 active_port_blocks, reg_offset = 0;
+       u32 tc_headroom_blocks =
+           (u32)DIV_ROUND_UP(req->headroom_per_tc, BRB_BLOCK_SIZE);
+       u32 min_pkt_size_blocks =
+           (u32)DIV_ROUND_UP(req->min_pkt_size, BRB_BLOCK_SIZE);
+       u32 total_blocks =
+           ECORE_IS_K2(p_hwfn->
+                       p_dev) ? BRB_TOTAL_RAM_BLOCKS_K2 :
+           BRB_TOTAL_RAM_BLOCKS_BB;
+       /* find number of active ports */
+       for (port = 0; port < MAX_NUM_PORTS; port++)
+               if (req->num_active_tcs[port])
+                       active_ports++;
+       active_port_blocks = (u32)(total_blocks / active_ports);
+       for (port = 0; port < req->max_ports_per_engine; port++) {
+               /* calculate per-port sizes */
+               u32 tc_guaranteed_blocks =
+                   (u32)DIV_ROUND_UP(req->guranteed_per_tc, BRB_BLOCK_SIZE);
+               u32 port_blocks =
+                   req->num_active_tcs[port] ? active_port_blocks : 0;
+               u32 port_guaranteed_blocks =
+                   req->num_active_tcs[port] * tc_guaranteed_blocks;
+               u32 port_shared_blocks = port_blocks - port_guaranteed_blocks;
+               u32 full_xoff_th =
+                   req->num_active_tcs[port] * BRB_MIN_BLOCKS_PER_TC;
+               u32 full_xon_th = full_xoff_th + min_pkt_size_blocks;
+               u32 pause_xoff_th = tc_headroom_blocks;
+               u32 pause_xon_th = pause_xoff_th + min_pkt_size_blocks;
+               u8 tc;
+               /* init total size per port */
+               ecore_wr(p_hwfn, p_ptt, BRB_REG_TOTAL_MAC_SIZE + port * 4,
+                        port_blocks);
+               /* init shared size per port */
+               ecore_wr(p_hwfn, p_ptt, BRB_REG_SHARED_HR_AREA + port * 4,
+                        port_shared_blocks);
+               for (tc = 0; tc < NUM_OF_TCS; tc++, reg_offset += 4) {
+                       /* clear init values for non-active TCs */
+                       if (tc == req->num_active_tcs[port]) {
+                               tc_guaranteed_blocks = 0;
+                               full_xoff_th = 0;
+                               full_xon_th = 0;
+                               pause_xoff_th = 0;
+                               pause_xon_th = 0;
+                       }
+                       /* init guaranteed size per TC */
+                       ecore_wr(p_hwfn, p_ptt,
+                                BRB_REG_TC_GUARANTIED_0 + reg_offset,
+                                tc_guaranteed_blocks);
+                       ecore_wr(p_hwfn, p_ptt,
+                                BRB_REG_MAIN_TC_GUARANTIED_HYST_0 + reg_offset,
+                                BRB_HYST_BLOCKS);
+                       ecore_wr(p_hwfn, p_ptt,
+                                BRB_REG_LB_TC_FULL_XOFF_THRESHOLD_0 +
+                                reg_offset, full_xoff_th);
+                       ecore_wr(p_hwfn, p_ptt,
+                                BRB_REG_LB_TC_FULL_XON_THRESHOLD_0 +
+                                reg_offset, full_xon_th);
+                       ecore_wr(p_hwfn, p_ptt,
+                                BRB_REG_LB_TC_PAUSE_XOFF_THRESHOLD_0 +
+                                reg_offset, pause_xoff_th);
+                       ecore_wr(p_hwfn, p_ptt,
+                                BRB_REG_LB_TC_PAUSE_XON_THRESHOLD_0 +
+                                reg_offset, pause_xon_th);
+                       ecore_wr(p_hwfn, p_ptt,
+                                BRB_REG_MAIN_TC_FULL_XOFF_THRESHOLD_0 +
+                                reg_offset, full_xoff_th);
+                       ecore_wr(p_hwfn, p_ptt,
+                                BRB_REG_MAIN_TC_FULL_XON_THRESHOLD_0 +
+                                reg_offset, full_xon_th);
+                       ecore_wr(p_hwfn, p_ptt,
+                                BRB_REG_MAIN_TC_PAUSE_XOFF_THRESHOLD_0 +
+                                reg_offset, pause_xoff_th);
+                       ecore_wr(p_hwfn, p_ptt,
+                                BRB_REG_MAIN_TC_PAUSE_XON_THRESHOLD_0 +
+                                reg_offset, pause_xon_th);
+               }
+       }
+}
+
+/*In MF should be called once per engine to set EtherType of OuterTag*/
+void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
+                                       struct ecore_ptt *p_ptt, u32 eth_type)
+{
+       /* update PRS register */
+       STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET, eth_type);
+       /* update NIG register */
+       STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET, eth_type);
+       /* update PBF register */
+       STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET, eth_type);
+}
+
+/*In MF should be called once per port to set EtherType of OuterTag*/
+void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
+                                     struct ecore_ptt *p_ptt, u32 eth_type)
+{
+       /* update DORQ register */
+       STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, eth_type);
+}
+
+#define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
+(var = ((var) & ~(1 << (offset))) | ((enable) ? (1 << (offset)) : 0))
+#define PRS_ETH_TUNN_FIC_FORMAT        -188897008
+void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
+                              struct ecore_ptt *p_ptt, u16 dest_port)
+{
+       /* update PRS register */
+       ecore_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
+       /* update NIG register */
+       ecore_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_PORT, dest_port);
+       /* update PBF register */
+       ecore_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
+}
+
+void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
+                           struct ecore_ptt *p_ptt, bool vxlan_enable)
+{
+       u32 reg_val;
+       /* update PRS register */
+       reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
+       SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+                          PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT,
+                          vxlan_enable);
+       ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
+       if (reg_val) {
+               ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
+                        PRS_ETH_TUNN_FIC_FORMAT);
+       }
+       /* update NIG register */
+       reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
+       SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+                                  NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT,
+                                  vxlan_enable);
+       ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
+       /* update DORQ register */
+       ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
+                vxlan_enable ? 1 : 0);
+}
+
+void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
+                         struct ecore_ptt *p_ptt,
+                         bool eth_gre_enable, bool ip_gre_enable)
+{
+       u32 reg_val;
+       /* update PRS register */
+       reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
+       SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+                  PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT,
+                  eth_gre_enable);
+       SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+                  PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT,
+                  ip_gre_enable);
+       ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
+       if (reg_val) {
+               ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
+                        PRS_ETH_TUNN_FIC_FORMAT);
+       }
+       /* update NIG register */
+       reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
+       SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+                  NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT,
+                  eth_gre_enable);
+       SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+                  NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT,
+                  ip_gre_enable);
+       ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
+       /* update DORQ registers */
+       ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
+                eth_gre_enable ? 1 : 0);
+       ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
+                ip_gre_enable ? 1 : 0);
+}
+
+void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
+                               struct ecore_ptt *p_ptt, u16 dest_port)
+{
+       /* geneve tunnel not supported in BB_A0 */
+       if (ECORE_IS_BB_A0(p_hwfn->p_dev))
+               return;
+       /* update PRS register */
+       ecore_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
+       /* update NIG register */
+       ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
+       /* update PBF register */
+       ecore_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
+}
+
+void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
+                            struct ecore_ptt *p_ptt,
+                            bool eth_geneve_enable, bool ip_geneve_enable)
+{
+       u32 reg_val;
+       /* geneve tunnel not supported in BB_A0 */
+       if (ECORE_IS_BB_A0(p_hwfn->p_dev))
+               return;
+       /* update PRS register */
+       reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
+       SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+                  PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT,
+                  eth_geneve_enable);
+       SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+                  PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT,
+                  ip_geneve_enable);
+       ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
+       if (reg_val) {
+               ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
+                        PRS_ETH_TUNN_FIC_FORMAT);
+       }
+       /* update NIG register */
+       ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
+                eth_geneve_enable ? 1 : 0);
+       ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE,
+                ip_geneve_enable ? 1 : 0);
+       /* comp ver */
+       reg_val = (ip_geneve_enable || eth_geneve_enable) ? 1 : 0;
+       ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_COMP_VER, reg_val);
+       ecore_wr(p_hwfn, p_ptt, PBF_REG_NGE_COMP_VER, reg_val);
+       ecore_wr(p_hwfn, p_ptt, PRS_REG_NGE_COMP_VER, reg_val);
+       /* EDPM with geneve tunnel not supported in BB_B0 */
+       if (ECORE_IS_BB_B0(p_hwfn->p_dev))
+               return;
+       /* update DORQ registers */
+       ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN,
+                eth_geneve_enable ? 1 : 0);
+       ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN,
+                ip_geneve_enable ? 1 : 0);
+}
diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.h b/drivers/net/qede/base/ecore_init_fw_funcs.h
new file mode 100644 (file)
index 0000000..5280cd7
--- /dev/null
@@ -0,0 +1,263 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef _INIT_FW_FUNCS_H
+#define _INIT_FW_FUNCS_H
+/* forward declarations */
+struct init_qm_pq_params;
+/**
+ * @brief ecore_qm_pf_mem_size - prepare QM ILT sizes
+ *
+ * Returns the required host memory size in 4KB units.
+ * Must be called before all QM init HSI functions.
+ *
+ * @param pf_id                        - physical function ID
+ * @param num_pf_cids  - number of connections used by this PF
+ * @param num_vf_cids  - number of connections used by VFs of this PF
+ * @param num_tids             - number of tasks used by this PF
+ * @param num_pf_pqs   - number of PQs used by this PF
+ * @param num_vf_pqs   - number of PQs used by VFs of this PF
+ *
+ * @return The required host memory size in 4KB units.
+ */
+u32 ecore_qm_pf_mem_size(u8 pf_id,
+                        u32 num_pf_cids,
+                        u32 num_vf_cids,
+                        u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs);
+/**
+ * @brief ecore_qm_common_rt_init -
+ * Prepare QM runtime init values for the engine phase
+ *
+ * @param p_hwfn
+ * @param max_ports_per_engine - max number of ports per engine in HW
+ * @param max_phys_tcs_per_port        - max number of physical TCs per port in HW
+ * @param pf_rl_en                             - enable per-PF rate limiters
+ * @param pf_wfq_en                            - enable per-PF WFQ
+ * @param vport_rl_en                  - enable per-VPORT rate limiters
+ * @param vport_wfq_en                 - enable per-VPORT WFQ
+ * @param port_params- array of size MAX_NUM_PORTS with parameters for each port
+ *
+ * @return 0 on success, -1 on error.
+ */
+int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
+                           u8 max_ports_per_engine,
+                           u8 max_phys_tcs_per_port,
+                           bool pf_rl_en,
+                           bool pf_wfq_en,
+                           bool vport_rl_en,
+                           bool vport_wfq_en,
+                           struct init_qm_port_params
+                           port_params[MAX_NUM_PORTS]);
+
+int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
+                       struct ecore_ptt *p_ptt,
+                       u8 port_id,
+                       u8 pf_id,
+                       u8 max_phys_tcs_per_port,
+                       bool is_first_pf,
+                       u32 num_pf_cids,
+                       u32 num_vf_cids,
+                       u32 num_tids,
+                       u16 start_pq,
+                       u16 num_pf_pqs,
+                       u16 num_vf_pqs,
+                       u8 start_vport,
+                       u8 num_vports,
+                       u16 pf_wfq,
+                       u32 pf_rl,
+                       struct init_qm_pq_params *pq_params,
+                       struct init_qm_vport_params *vport_params);
+/**
+ * @brief ecore_init_pf_wfq  Initializes the WFQ weight of the specified PF
+ *
+ * @param p_hwfn
+ * @param p_ptt                - ptt window used for writing the registers
+ * @param pf_id                - PF ID
+ * @param pf_wfq       - WFQ weight. Must be non-zero.
+ *
+ * @return 0 on success, -1 on error.
+ */
+int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn,
+                     struct ecore_ptt *p_ptt, u8 pf_id, u16 pf_wfq);
+/**
+ * @brief ecore_init_pf_rl  Initializes the rate limit of the specified PF
+ *
+ * @param p_hwfn
+ * @param p_ptt        - ptt window used for writing the registers
+ * @param pf_id        - PF ID
+ * @param pf_rl        - rate limit in Mb/sec units
+ *
+ * @return 0 on success, -1 on error.
+ */
+int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
+                    struct ecore_ptt *p_ptt, u8 pf_id, u32 pf_rl);
+/**
+ * @brief ecore_init_vport_wfq Initializes the WFQ weight of the specified VPORT
+ *
+ * @param p_hwfn
+ * @param p_ptt                        - ptt window used for writing the registers
+ * @param first_tx_pq_id- An array containing the first Tx PQ ID associated
+ *                        with the VPORT for each TC. This array is filled by
+ *                        ecore_qm_pf_rt_init
+ * @param vport_wfq            - WFQ weight. Must be non-zero.
+ *
+ * @return 0 on success, -1 on error.
+ */
+int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
+                        struct ecore_ptt *p_ptt,
+                        u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq);
+/**
+ * @brief ecore_init_vport_rl  Initializes the rate limit of the specified VPORT
+ *
+ * @param p_hwfn
+ * @param p_ptt                - ptt window used for writing the registers
+ * @param vport_id     - VPORT ID
+ * @param vport_rl     - rate limit in Mb/sec units
+ *
+ * @return 0 on success, -1 on error.
+ */
+int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
+                       struct ecore_ptt *p_ptt, u8 vport_id, u32 vport_rl);
+/**
+ * @brief ecore_send_qm_stop_cmd  Sends a stop command to the QM
+ *
+ * @param p_hwfn
+ * @param p_ptt                 - ptt window used for writing the registers
+ * @param is_release_cmd - true for release, false for stop.
+ * @param is_tx_pq       - true for Tx PQs, false for Other PQs.
+ * @param start_pq       - first PQ ID to stop
+ * @param num_pqs        - Number of PQs to stop, starting from start_pq.
+ *
+ * @return bool, true if successful, false if timeout occurred while
+ * waiting for QM command done.
+ */
+bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
+                           struct ecore_ptt *p_ptt,
+                           bool is_release_cmd,
+                           bool is_tx_pq, u16 start_pq, u16 num_pqs);
+/**
+ * @brief ecore_init_nig_ets - initializes the NIG ETS arbiter
+ *
+ * Based on weight/priority requirements per-TC.
+ *
+ * @param p_ptt        - ptt window used for writing the registers.
+ * @param req  - the NIG ETS initialization requirements.
+ * @param is_lb        - if set, the loopback port arbiter is initialized, otherwise
+ *               the physical port arbiter is initialized. The pure-LB TC
+ *               requirements are ignored when is_lb is cleared.
+ */
+void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
+                       struct ecore_ptt *p_ptt,
+                       struct init_ets_req *req, bool is_lb);
+/**
+ * @brief ecore_init_nig_lb_rl - initializes the NIG LB RLs
+ *
+ * Based on global and per-TC rate requirements
+ *
+ * @param p_ptt        - ptt window used for writing the registers.
+ * @param req  - the NIG LB RLs initialization requirements.
+ */
+void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
+                         struct ecore_ptt *p_ptt,
+                         struct init_nig_lb_rl_req *req);
+/**
+ * @brief ecore_init_nig_pri_tc_map - initializes the NIG priority to TC map.
+ *
+ * Assumes valid arguments.
+ *
+ * @param p_ptt        - ptt window used for writing the registers.
+ * @param req  - required mapping from prioirties to TCs.
+ */
+void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
+                              struct ecore_ptt *p_ptt,
+                              struct init_nig_pri_tc_map_req *req);
+/**
+ * @brief ecore_init_prs_ets - initializes the PRS Rx ETS arbiter
+ *
+ * Based on weight/priority requirements per-TC.
+ *
+ * @param p_ptt        - ptt window used for writing the registers.
+ * @param req  - the PRS ETS initialization requirements.
+ */
+void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
+                       struct ecore_ptt *p_ptt, struct init_ets_req *req);
+/**
+ * @brief ecore_init_brb_ram - initializes BRB RAM sizes per TC
+ *
+ * Based on weight/priority requirements per-TC.
+ *
+ * @param p_ptt        - ptt window used for writing the registers.
+ * @param req  - the BRB RAM initialization requirements.
+ */
+void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
+                       struct ecore_ptt *p_ptt, struct init_brb_ram_req *req);
+/**
+ * @brief ecore_set_engine_mf_ovlan_eth_type - initializes Nig,Prs,Pbf
+ * and llh ethType Regs to  input ethType
+ * should Be called once per engine if engine is in BD mode.
+ *
+ * @param p_ptt    - ptt window used for writing the registers.
+ * @param ethType - etherType to configure
+ */
+void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
+                                       struct ecore_ptt *p_ptt, u32 eth_type);
+/**
+ * @brief ecore_set_port_mf_ovlan_eth_type - initializes DORQ ethType Regs
+ * to input ethType
+ * should Be called once per port.
+ *
+ * @param p_ptt    - ptt window used for writing the registers.
+ * @param ethType - etherType to configure
+ */
+void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
+                                     struct ecore_ptt *p_ptt, u32 eth_type);
+/**
+ * @brief ecore_set_vxlan_dest_port - init vxlan tunnel destination udp port
+ *
+ * @param p_ptt     - ptt window used for writing the registers.
+ * @param dest_port - vxlan destination udp port.
+ */
+void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
+                              struct ecore_ptt *p_ptt, u16 dest_port);
+/**
+ * @brief ecore_set_vxlan_enable - enable or disable VXLAN tunnel in HW
+ *
+ * @param p_ptt        - ptt window used for writing the registers.
+ * @param vxlan_enable - vxlan enable flag.
+ */
+void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
+                           struct ecore_ptt *p_ptt, bool vxlan_enable);
+/**
+ * @brief ecore_set_gre_enable - enable or disable GRE tunnel in HW
+ *
+ * @param p_ptt          - ptt window used for writing the registers.
+ * @param eth_gre_enable - eth GRE enable enable flag.
+ * @param ip_gre_enable  - IP GRE enable enable flag.
+ */
+void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
+                         struct ecore_ptt *p_ptt,
+                         bool eth_gre_enable, bool ip_gre_enable);
+/**
+ * @brief ecore_set_geneve_dest_port - init geneve tunnel destination udp port
+ *
+ * @param p_ptt     - ptt window used for writing the registers.
+ * @param dest_port - geneve destination udp port.
+ */
+void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
+                               struct ecore_ptt *p_ptt, u16 dest_port);
+/**
+ * @brief ecore_set_gre_enable - enable or disable GRE tunnel in HW
+ *
+ * @param p_ptt             - ptt window used for writing the registers.
+ * @param eth_geneve_enable - eth GENEVE enable enable flag.
+ * @param ip_geneve_enable  - IP GENEVE enable enable flag.
+  */
+void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
+                            struct ecore_ptt *p_ptt,
+                            bool eth_geneve_enable, bool ip_geneve_enable);
+#endif
diff --git a/drivers/net/qede/base/ecore_init_ops.c b/drivers/net/qede/base/ecore_init_ops.c
new file mode 100644 (file)
index 0000000..eeaabb6
--- /dev/null
@@ -0,0 +1,595 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+/* include the precompiled configuration values - only once */
+#include "bcm_osal.h"
+#include "ecore_hsi_common.h"
+#include "ecore.h"
+#include "ecore_hw.h"
+#include "ecore_status.h"
+#include "ecore_rt_defs.h"
+#include "ecore_init_fw_funcs.h"
+
+#include "ecore_iro_values.h"
+#include "ecore_gtt_values.h"
+#include "reg_addr.h"
+#include "ecore_init_ops.h"
+
+#define ECORE_INIT_MAX_POLL_COUNT      100
+#define ECORE_INIT_POLL_PERIOD_US      500
+
+void ecore_init_iro_array(struct ecore_dev *p_dev)
+{
+       p_dev->iro_arr = iro_arr;
+}
+
+/* Runtime configuration helpers */
+void ecore_init_clear_rt_data(struct ecore_hwfn *p_hwfn)
+{
+       int i;
+
+       for (i = 0; i < RUNTIME_ARRAY_SIZE; i++)
+               p_hwfn->rt_data.b_valid[i] = false;
+}
+
+void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn, u32 rt_offset, u32 val)
+{
+       p_hwfn->rt_data.init_val[rt_offset] = val;
+       p_hwfn->rt_data.b_valid[rt_offset] = true;
+}
+
+void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn,
+                            u32 rt_offset, u32 *p_val, osal_size_t size)
+{
+       osal_size_t i;
+
+       for (i = 0; i < size / sizeof(u32); i++) {
+               p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i];
+               p_hwfn->rt_data.b_valid[rt_offset + i] = true;
+       }
+}
+
+static enum _ecore_status_t ecore_init_rt(struct ecore_hwfn *p_hwfn,
+                                         struct ecore_ptt *p_ptt,
+                                         u32 addr,
+                                         u16 rt_offset,
+                                         u16 size, bool b_must_dmae)
+{
+       u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
+       bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       u16 i, segment;
+
+       /* Since not all RT entries are initialized, go over the RT and
+        * for each segment of initialized values use DMA.
+        */
+       for (i = 0; i < size; i++) {
+               if (!p_valid[i])
+                       continue;
+
+               /* In case there isn't any wide-bus configuration here,
+                * simply write the data instead of using dmae.
+                */
+               if (!b_must_dmae) {
+                       ecore_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]);
+                       continue;
+               }
+
+               /* Start of a new segment */
+               for (segment = 1; i + segment < size; segment++)
+                       if (!p_valid[i + segment])
+                               break;
+
+               rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
+                                        (osal_uintptr_t)(p_init_val + i),
+                                        addr + (i << 2), segment, 0);
+               if (rc != ECORE_SUCCESS)
+                       return rc;
+
+               /* Jump over the entire segment, including invalid entry */
+               i += segment;
+       }
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_init_alloc(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_rt_data *rt_data = &p_hwfn->rt_data;
+
+       rt_data->b_valid = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+                                      sizeof(bool) * RUNTIME_ARRAY_SIZE);
+       if (!rt_data->b_valid)
+               return ECORE_NOMEM;
+
+       rt_data->init_val = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+                                       sizeof(u32) * RUNTIME_ARRAY_SIZE);
+       if (!rt_data->init_val) {
+               OSAL_FREE(p_hwfn->p_dev, rt_data->b_valid);
+               return ECORE_NOMEM;
+       }
+
+       return ECORE_SUCCESS;
+}
+
+void ecore_init_free(struct ecore_hwfn *p_hwfn)
+{
+       OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.init_val);
+       OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.b_valid);
+}
+
+static enum _ecore_status_t ecore_init_array_dmae(struct ecore_hwfn *p_hwfn,
+                                                 struct ecore_ptt *p_ptt,
+                                                 u32 addr,
+                                                 u32 dmae_data_offset,
+                                                 u32 size, const u32 *p_buf,
+                                                 bool b_must_dmae,
+                                                 bool b_can_dmae)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+
+       /* Perform DMAE only for lengthy enough sections or for wide-bus */
+#ifndef ASIC_ONLY
+       if ((CHIP_REV_IS_SLOW(p_hwfn->p_dev) && (size < 16)) ||
+           !b_can_dmae || (!b_must_dmae && (size < 16))) {
+#else
+       if (!b_can_dmae || (!b_must_dmae && (size < 16))) {
+#endif
+               const u32 *data = p_buf + dmae_data_offset;
+               u32 i;
+
+               for (i = 0; i < size; i++)
+                       ecore_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]);
+       } else {
+               rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
+                                        (osal_uintptr_t)(p_buf +
+                                                          dmae_data_offset),
+                                        addr, size, 0);
+       }
+
+       return rc;
+}
+
+static enum _ecore_status_t ecore_init_fill_dmae(struct ecore_hwfn *p_hwfn,
+                                                struct ecore_ptt *p_ptt,
+                                                u32 addr, u32 fill,
+                                                u32 fill_count)
+{
+       static u32 zero_buffer[DMAE_MAX_RW_SIZE];
+
+       OSAL_MEMSET(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE);
+
+       return ecore_dmae_host2grc(p_hwfn, p_ptt,
+                                  (osal_uintptr_t)&zero_buffer[0],
+                                  addr, fill_count,
+                                  ECORE_DMAE_FLAG_RW_REPL_SRC);
+}
+
+static void ecore_init_fill(struct ecore_hwfn *p_hwfn,
+                           struct ecore_ptt *p_ptt,
+                           u32 addr, u32 fill, u32 fill_count)
+{
+       u32 i;
+
+       for (i = 0; i < fill_count; i++, addr += sizeof(u32))
+               ecore_wr(p_hwfn, p_ptt, addr, fill);
+}
+
+static enum _ecore_status_t ecore_init_cmd_array(struct ecore_hwfn *p_hwfn,
+                                                struct ecore_ptt *p_ptt,
+                                                struct init_write_op *cmd,
+                                                bool b_must_dmae,
+                                                bool b_can_dmae)
+{
+#ifdef CONFIG_ECORE_ZIPPED_FW
+       u32 offset, output_len, input_len, max_size;
+#endif
+       u32 dmae_array_offset = OSAL_LE32_TO_CPU(cmd->args.array_offset);
+       struct ecore_dev *p_dev = p_hwfn->p_dev;
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       union init_array_hdr *hdr;
+       const u32 *array_data;
+       u32 size, addr, data;
+
+       array_data = p_dev->fw_data->arr_data;
+       data = OSAL_LE32_TO_CPU(cmd->data);
+       addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
+
+       hdr = (union init_array_hdr *)
+               (uintptr_t)(array_data + dmae_array_offset);
+       data = OSAL_LE32_TO_CPU(hdr->raw.data);
+       switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
+       case INIT_ARR_ZIPPED:
+#ifdef CONFIG_ECORE_ZIPPED_FW
+               offset = dmae_array_offset + 1;
+               input_len = GET_FIELD(data, INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE);
+               max_size = MAX_ZIPPED_SIZE * 4;
+               OSAL_MEMSET(p_hwfn->unzip_buf, 0, max_size);
+
+               output_len = OSAL_UNZIP_DATA(p_hwfn, input_len,
+                               (u8 *)(uintptr_t)&array_data[offset],
+                               max_size,
+                               (u8 *)p_hwfn->unzip_buf);
+               if (output_len) {
+                       rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr, 0,
+                                                  output_len,
+                                                  p_hwfn->unzip_buf,
+                                                  b_must_dmae, b_can_dmae);
+               } else {
+                       DP_NOTICE(p_hwfn, true, "Failed to unzip dmae data\n");
+                       rc = ECORE_INVAL;
+               }
+#else
+               DP_NOTICE(p_hwfn, true,
+                         "Using zipped firmware without config enabled\n");
+               rc = ECORE_INVAL;
+#endif
+               break;
+       case INIT_ARR_PATTERN:
+               {
+                       u32 repeats = GET_FIELD(data,
+                                       INIT_ARRAY_PATTERN_HDR_REPETITIONS);
+                       u32 i;
+
+                       size = GET_FIELD(data,
+                                        INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE);
+
+                       for (i = 0; i < repeats; i++, addr += size << 2) {
+                               rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr,
+                                                          dmae_array_offset +
+                                                          1, size, array_data,
+                                                          b_must_dmae,
+                                                          b_can_dmae);
+                               if (rc)
+                                       break;
+                       }
+                       break;
+               }
+       case INIT_ARR_STANDARD:
+               size = GET_FIELD(data, INIT_ARRAY_STANDARD_HDR_SIZE);
+               rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr,
+                                          dmae_array_offset + 1,
+                                          size, array_data,
+                                          b_must_dmae, b_can_dmae);
+               break;
+       }
+
+       return rc;
+}
+
+/* init_ops write command */
+static enum _ecore_status_t ecore_init_cmd_wr(struct ecore_hwfn *p_hwfn,
+                                             struct ecore_ptt *p_ptt,
+                                             struct init_write_op *p_cmd,
+                                             bool b_can_dmae)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       bool b_must_dmae;
+       u32 addr, data;
+
+       data = OSAL_LE32_TO_CPU(p_cmd->data);
+       b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
+       addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
+
+       /* Sanitize */
+       if (b_must_dmae && !b_can_dmae) {
+               DP_NOTICE(p_hwfn, true,
+                         "Need to write to %08x for Wide-bus but DMAE isn't"
+                         " allowed\n",
+                         addr);
+               return ECORE_INVAL;
+       }
+
+       switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
+       case INIT_SRC_INLINE:
+               data = OSAL_LE32_TO_CPU(p_cmd->args.inline_val);
+               ecore_wr(p_hwfn, p_ptt, addr, data);
+               break;
+       case INIT_SRC_ZEROS:
+               data = OSAL_LE32_TO_CPU(p_cmd->args.zeros_count);
+               if (b_must_dmae || (b_can_dmae && (data >= 64)))
+                       rc = ecore_init_fill_dmae(p_hwfn, p_ptt, addr, 0, data);
+               else
+                       ecore_init_fill(p_hwfn, p_ptt, addr, 0, data);
+               break;
+       case INIT_SRC_ARRAY:
+               rc = ecore_init_cmd_array(p_hwfn, p_ptt, p_cmd,
+                                         b_must_dmae, b_can_dmae);
+               break;
+       case INIT_SRC_RUNTIME:
+               ecore_init_rt(p_hwfn, p_ptt, addr,
+                             OSAL_LE16_TO_CPU(p_cmd->args.runtime.offset),
+                             OSAL_LE16_TO_CPU(p_cmd->args.runtime.size),
+                             b_must_dmae);
+               break;
+       }
+
+       return rc;
+}
+
+static OSAL_INLINE bool comp_eq(u32 val, u32 expected_val)
+{
+       return (val == expected_val);
+}
+
+static OSAL_INLINE bool comp_and(u32 val, u32 expected_val)
+{
+       return (val & expected_val) == expected_val;
+}
+
+static OSAL_INLINE bool comp_or(u32 val, u32 expected_val)
+{
+       return (val | expected_val) > 0;
+}
+
+/* init_ops read/poll commands */
+static void ecore_init_cmd_rd(struct ecore_hwfn *p_hwfn,
+                             struct ecore_ptt *p_ptt, struct init_read_op *cmd)
+{
+       bool (*comp_check)(u32 val, u32 expected_val);
+       u32 delay = ECORE_INIT_POLL_PERIOD_US, val;
+       u32 data, addr, poll;
+       int i;
+
+       data = OSAL_LE32_TO_CPU(cmd->op_data);
+       addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
+       poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE);
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+               delay *= 100;
+#endif
+
+       val = ecore_rd(p_hwfn, p_ptt, addr);
+
+       if (poll == INIT_POLL_NONE)
+               return;
+
+       switch (poll) {
+       case INIT_POLL_EQ:
+               comp_check = comp_eq;
+               break;
+       case INIT_POLL_OR:
+               comp_check = comp_or;
+               break;
+       case INIT_POLL_AND:
+               comp_check = comp_and;
+               break;
+       default:
+               DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
+                      cmd->op_data);
+               return;
+       }
+
+       data = OSAL_LE32_TO_CPU(cmd->expected_val);
+       for (i = 0;
+            i < ECORE_INIT_MAX_POLL_COUNT && !comp_check(val, data); i++) {
+               OSAL_UDELAY(delay);
+               val = ecore_rd(p_hwfn, p_ptt, addr);
+       }
+
+       if (i == ECORE_INIT_MAX_POLL_COUNT)
+               DP_ERR(p_hwfn,
+                      "Timeout when polling reg: 0x%08x [ Waiting-for: %08x"
+                      " Got: %08x (comparsion %08x)]\n",
+                      addr, OSAL_LE32_TO_CPU(cmd->expected_val), val,
+                      OSAL_LE32_TO_CPU(cmd->op_data));
+}
+
+/* init_ops callbacks entry point */
+static void ecore_init_cmd_cb(struct ecore_hwfn *p_hwfn,
+                             struct ecore_ptt *p_ptt,
+                             struct init_callback_op *p_cmd)
+{
+       DP_NOTICE(p_hwfn, true,
+                 "Currently init values have no need of callbacks\n");
+}
+
+static u8 ecore_init_cmd_mode_match(struct ecore_hwfn *p_hwfn,
+                                   u16 *p_offset, int modes)
+{
+       struct ecore_dev *p_dev = p_hwfn->p_dev;
+       const u8 *modes_tree_buf;
+       u8 arg1, arg2, tree_val;
+
+       modes_tree_buf = p_dev->fw_data->modes_tree_buf;
+       tree_val = modes_tree_buf[(*p_offset)++];
+       switch (tree_val) {
+       case INIT_MODE_OP_NOT:
+               return ecore_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1;
+       case INIT_MODE_OP_OR:
+               arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
+               arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
+               return arg1 | arg2;
+       case INIT_MODE_OP_AND:
+               arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
+               arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
+               return arg1 & arg2;
+       default:
+               tree_val -= MAX_INIT_MODE_OPS;
+               return (modes & (1 << tree_val)) ? 1 : 0;
+       }
+}
+
+static u32 ecore_init_cmd_mode(struct ecore_hwfn *p_hwfn,
+                              struct init_if_mode_op *p_cmd, int modes)
+{
+       u16 offset = OSAL_LE16_TO_CPU(p_cmd->modes_buf_offset);
+
+       if (ecore_init_cmd_mode_match(p_hwfn, &offset, modes))
+               return 0;
+       else
+               return GET_FIELD(OSAL_LE32_TO_CPU(p_cmd->op_data),
+                                INIT_IF_MODE_OP_CMD_OFFSET);
+}
+
+static u32 ecore_init_cmd_phase(struct ecore_hwfn *p_hwfn,
+                               struct init_if_phase_op *p_cmd,
+                               u32 phase, u32 phase_id)
+{
+       u32 data = OSAL_LE32_TO_CPU(p_cmd->phase_data);
+
+       if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase &&
+             (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID ||
+              GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id)))
+               return GET_FIELD(OSAL_LE32_TO_CPU(p_cmd->op_data),
+                                INIT_IF_PHASE_OP_CMD_OFFSET);
+       else
+               return 0;
+}
+
+enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
+                                   struct ecore_ptt *p_ptt,
+                                   int phase, int phase_id, int modes)
+{
+       struct ecore_dev *p_dev = p_hwfn->p_dev;
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       u32 cmd_num, num_init_ops;
+       union init_op *init_ops;
+       bool b_dmae = false;
+
+       num_init_ops = p_dev->fw_data->init_ops_size;
+       init_ops = p_dev->fw_data->init_ops;
+
+#ifdef CONFIG_ECORE_ZIPPED_FW
+       p_hwfn->unzip_buf = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
+                                       MAX_ZIPPED_SIZE * 4);
+       if (!p_hwfn->unzip_buf) {
+               DP_NOTICE(p_hwfn, true, "Failed to allocate unzip buffer\n");
+               return ECORE_NOMEM;
+       }
+#endif
+
+       for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
+               union init_op *cmd = &init_ops[cmd_num];
+               u32 data = OSAL_LE32_TO_CPU(cmd->raw.op_data);
+
+               switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) {
+               case INIT_OP_WRITE:
+                       rc = ecore_init_cmd_wr(p_hwfn, p_ptt, &cmd->write,
+                                              b_dmae);
+                       break;
+
+               case INIT_OP_READ:
+                       ecore_init_cmd_rd(p_hwfn, p_ptt, &cmd->read);
+                       break;
+
+               case INIT_OP_IF_MODE:
+                       cmd_num += ecore_init_cmd_mode(p_hwfn, &cmd->if_mode,
+                                                      modes);
+                       break;
+               case INIT_OP_IF_PHASE:
+                       cmd_num += ecore_init_cmd_phase(p_hwfn, &cmd->if_phase,
+                                                       phase, phase_id);
+                       b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE);
+                       break;
+               case INIT_OP_DELAY:
+                       /* ecore_init_run is always invoked from
+                        * sleep-able context
+                        */
+                       OSAL_UDELAY(cmd->delay.delay);
+                       break;
+
+               case INIT_OP_CALLBACK:
+                       ecore_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
+                       break;
+               }
+
+               if (rc)
+                       break;
+       }
+#ifdef CONFIG_ECORE_ZIPPED_FW
+       OSAL_FREE(p_hwfn->p_dev, p_hwfn->unzip_buf);
+#endif
+       return rc;
+}
+
+void ecore_gtt_init(struct ecore_hwfn *p_hwfn)
+{
+       u32 gtt_base;
+       u32 i;
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+               /* This is done by MFW on ASIC; regardless, this should only
+                * be done once per chip [i.e., common]. Implementation is
+                * not too bright, but it should work on the simple FPGA/EMUL
+                * scenarios.
+                */
+               bool initialized = false; /* @DPDK */
+               int poll_cnt = 500;
+               u32 val;
+
+               /* initialize PTT/GTT (poll for completion) */
+               if (!initialized) {
+                       ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+                                PGLUE_B_REG_START_INIT_PTT_GTT, 1);
+                       initialized = true;
+               }
+
+               do {
+                       /* ptt might be overrided by HW until this is done */
+                       OSAL_UDELAY(10);
+                       ecore_ptt_invalidate(p_hwfn);
+                       val = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
+                                      PGLUE_B_REG_INIT_DONE_PTT_GTT);
+               } while ((val != 1) && --poll_cnt);
+
+               if (!poll_cnt)
+                       DP_ERR(p_hwfn,
+                              "PGLUE_B_REG_INIT_DONE didn't complete\n");
+       }
+#endif
+
+       /* Set the global windows */
+       gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START;
+
+       for (i = 0; i < OSAL_ARRAY_SIZE(pxp_global_win); i++)
+               if (pxp_global_win[i])
+                       REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE,
+                              pxp_global_win[i]);
+}
+
+enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
+                                       const u8 *data)
+{
+       struct ecore_fw_data *fw = p_dev->fw_data;
+
+#ifdef CONFIG_ECORE_BINARY_FW
+       struct bin_buffer_hdr *buf_hdr;
+       u32 offset, len;
+
+       if (!data) {
+               DP_NOTICE(p_dev, true, "Invalid fw data\n");
+               return ECORE_INVAL;
+       }
+
+       buf_hdr = (struct bin_buffer_hdr *)(uintptr_t)data;
+
+       offset = buf_hdr[BIN_BUF_FW_VER_INFO].offset;
+       fw->fw_ver_info = (struct fw_ver_info *)((uintptr_t)(data + offset));
+
+       offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
+       fw->init_ops = (union init_op *)((uintptr_t)(data + offset));
+
+       offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
+       fw->arr_data = (u32 *)((uintptr_t)(data + offset));
+
+       offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
+       fw->modes_tree_buf = (u8 *)((uintptr_t)(data + offset));
+       len = buf_hdr[BIN_BUF_INIT_CMD].length;
+       fw->init_ops_size = len / sizeof(struct init_raw_op);
+#else
+       fw->init_ops = (union init_op *)init_ops;
+       fw->arr_data = (u32 *)init_val;
+       fw->modes_tree_buf = (u8 *)modes_tree_buf;
+       fw->init_ops_size = init_ops_size;
+#endif
+
+       return ECORE_SUCCESS;
+}
diff --git a/drivers/net/qede/base/ecore_init_ops.h b/drivers/net/qede/base/ecore_init_ops.h
new file mode 100644 (file)
index 0000000..8a6fce4
--- /dev/null
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_INIT_OPS__
+#define __ECORE_INIT_OPS__
+
+#include "ecore.h"
+
+/**
+ * @brief ecore_init_iro_array - init iro_arr.
+ *
+ *
+ * @param p_dev
+ */
+void ecore_init_iro_array(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_init_run - Run the init-sequence.
+ *
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param phase
+ * @param phase_id
+ * @param modes
+ * @return _ecore_status_t
+ */
+enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
+                                   struct ecore_ptt *p_ptt,
+                                   int phase, int phase_id, int modes);
+
+/**
+ * @brief ecore_init_hwfn_allocate - Allocate RT array, Store 'values' ptrs.
+ *
+ *
+ * @param p_hwfn
+ *
+ * @return _ecore_status_t
+ */
+enum _ecore_status_t ecore_init_alloc(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_init_hwfn_deallocate
+ *
+ *
+ * @param p_hwfn
+ */
+void ecore_init_free(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_init_clear_rt_data - Clears the runtime init array.
+ *
+ *
+ * @param p_hwfn
+ */
+void ecore_init_clear_rt_data(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_init_store_rt_reg - Store a configuration value in the RT array.
+ *
+ *
+ * @param p_hwfn
+ * @param rt_offset
+ * @param val
+ */
+void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn, u32 rt_offset, u32 val);
+
+#define STORE_RT_REG(hwfn, offset, val)                                \
+       ecore_init_store_rt_reg(hwfn, offset, val)
+
+#define OVERWRITE_RT_REG(hwfn, offset, val)                    \
+       ecore_init_store_rt_reg(hwfn, offset, val)
+
+/**
+* @brief
+*
+*
+* @param p_hwfn
+* @param rt_offset
+* @param val
+* @param size
+*/
+
+void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn,
+                            u32 rt_offset, u32 *val, osal_size_t size);
+
+#define STORE_RT_REG_AGG(hwfn, offset, val)                    \
+       ecore_init_store_rt_agg(hwfn, offset, (u32 *)&val, sizeof(val))
+
+/**
+ * @brief
+ *      Initialize GTT global windows and set admin window
+ *      related params of GTT/PTT to default values.
+ *
+ * @param p_hwfn
+ */
+void ecore_gtt_init(struct ecore_hwfn *p_hwfn);
+#endif /* __ECORE_INIT_OPS__ */
diff --git a/drivers/net/qede/base/ecore_int.c b/drivers/net/qede/base/ecore_int.c
new file mode 100644 (file)
index 0000000..73d7fb5
--- /dev/null
@@ -0,0 +1,1069 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+#include "ecore.h"
+#include "ecore_spq.h"
+#include "reg_addr.h"
+#include "ecore_gtt_reg_addr.h"
+#include "ecore_init_ops.h"
+#include "ecore_rt_defs.h"
+#include "ecore_int.h"
+#include "reg_addr.h"
+#include "ecore_hw.h"
+#include "ecore_hw_defs.h"
+#include "ecore_hsi_common.h"
+#include "ecore_mcp.h"
+
+struct ecore_pi_info {
+       ecore_int_comp_cb_t comp_cb;
+       void *cookie;           /* Will be sent to the compl cb function */
+};
+
+struct ecore_sb_sp_info {
+       struct ecore_sb_info sb_info;
+       /* per protocol index data */
+       struct ecore_pi_info pi_info_arr[PIS_PER_SB];
+};
+
+enum ecore_attention_type {
+       ECORE_ATTN_TYPE_ATTN,
+       ECORE_ATTN_TYPE_PARITY,
+};
+
+#define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
+       ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
+
+struct aeu_invert_reg_bit {
+       char bit_name[30];
+
+#define ATTENTION_PARITY               (1 << 0)
+
+#define ATTENTION_LENGTH_MASK          (0x00000ff0)
+#define ATTENTION_LENGTH_SHIFT         (4)
+#define ATTENTION_LENGTH(flags)                (((flags) & ATTENTION_LENGTH_MASK) >> \
+                                        ATTENTION_LENGTH_SHIFT)
+#define ATTENTION_SINGLE               (1 << ATTENTION_LENGTH_SHIFT)
+#define ATTENTION_PAR                  (ATTENTION_SINGLE | ATTENTION_PARITY)
+#define ATTENTION_PAR_INT              ((2 << ATTENTION_LENGTH_SHIFT) | \
+                                        ATTENTION_PARITY)
+
+/* Multiple bits start with this offset */
+#define ATTENTION_OFFSET_MASK          (0x000ff000)
+#define ATTENTION_OFFSET_SHIFT         (12)
+
+#define        ATTENTION_CLEAR_ENABLE          (1 << 28)
+#define        ATTENTION_FW_DUMP               (1 << 29)
+#define        ATTENTION_PANIC_DUMP            (1 << 30)
+       unsigned int flags;
+
+       /* Callback to call if attention will be triggered */
+       enum _ecore_status_t (*cb)(struct ecore_hwfn *p_hwfn);
+
+       enum block_id block_index;
+};
+
+struct aeu_invert_reg {
+       struct aeu_invert_reg_bit bits[32];
+};
+
+#define NUM_ATTN_REGS          (9)
+
+#define ATTN_STATE_BITS                (0xfff)
+#define ATTN_BITS_MASKABLE     (0x3ff)
+struct ecore_sb_attn_info {
+       /* Virtual & Physical address of the SB */
+       struct atten_status_block *sb_attn;
+       dma_addr_t sb_phys;
+
+       /* Last seen running index */
+       u16 index;
+
+       /* A mask of the AEU bits resulting in a parity error */
+       u32 parity_mask[NUM_ATTN_REGS];
+
+       /* A pointer to the attention description structure */
+       struct aeu_invert_reg *p_aeu_desc;
+
+       /* Previously asserted attentions, which are still unasserted */
+       u16 known_attn;
+
+       /* Cleanup address for the link's general hw attention */
+       u32 mfw_attn_addr;
+};
+
+static u16 ecore_attn_update_idx(struct ecore_hwfn *p_hwfn,
+                                struct ecore_sb_attn_info *p_sb_desc)
+{
+       u16 rc = 0, index;
+
+       OSAL_MMIOWB(p_hwfn->p_dev);
+
+       index = OSAL_LE16_TO_CPU(p_sb_desc->sb_attn->sb_index);
+       if (p_sb_desc->index != index) {
+               p_sb_desc->index = index;
+               rc = ECORE_SB_ATT_IDX;
+       }
+
+       OSAL_MMIOWB(p_hwfn->p_dev);
+
+       return rc;
+}
+
+static void ecore_sb_ack_attn(struct ecore_hwfn *p_hwfn,
+                             void OSAL_IOMEM *igu_addr, u32 ack_cons)
+{
+       struct igu_prod_cons_update igu_ack = { 0 };
+
+       igu_ack.sb_id_and_flags =
+           ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
+            (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
+            (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
+            (IGU_SEG_ACCESS_ATTN <<
+             IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
+
+       DIRECT_REG_WR(p_hwfn, igu_addr, igu_ack.sb_id_and_flags);
+
+       /* Both segments (interrupts & acks) are written to same place address;
+        * Need to guarantee all commands will be received (in-order) by HW.
+        */
+       OSAL_MMIOWB(p_hwfn->p_dev);
+       OSAL_BARRIER(p_hwfn->p_dev);
+}
+
+void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie)
+{
+       struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)hwfn_cookie;
+       struct ecore_pi_info *pi_info = OSAL_NULL;
+       struct ecore_sb_attn_info *sb_attn;
+       struct ecore_sb_info *sb_info;
+       static int arr_size;
+       u16 rc = 0;
+
+       if (!p_hwfn) {
+               DP_ERR(p_hwfn->p_dev, "DPC called - no hwfn!\n");
+               return;
+       }
+
+       if (!p_hwfn->p_sp_sb) {
+               DP_ERR(p_hwfn->p_dev, "DPC called - no p_sp_sb\n");
+               return;
+       }
+
+       sb_info = &p_hwfn->p_sp_sb->sb_info;
+       arr_size = OSAL_ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr);
+       if (!sb_info) {
+               DP_ERR(p_hwfn->p_dev,
+                      "Status block is NULL - cannot ack interrupts\n");
+               return;
+       }
+
+       if (!p_hwfn->p_sb_attn) {
+               DP_ERR(p_hwfn->p_dev, "DPC called - no p_sb_attn");
+               return;
+       }
+       sb_attn = p_hwfn->p_sb_attn;
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "DPC Called! (hwfn %p %d)\n",
+                  p_hwfn, p_hwfn->my_id);
+
+       /* Disable ack for def status block. Required both for msix +
+        * inta in non-mask mode, in inta does no harm.
+        */
+       ecore_sb_ack(sb_info, IGU_INT_DISABLE, 0);
+
+       /* Gather Interrupts/Attentions information */
+       if (!sb_info->sb_virt) {
+               DP_ERR(p_hwfn->p_dev,
+                      "Interrupt Status block is NULL -"
+                      " cannot check for new interrupts!\n");
+       } else {
+               u32 tmp_index = sb_info->sb_ack;
+               rc = ecore_sb_update_sb_idx(sb_info);
+               DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR,
+                          "Interrupt indices: 0x%08x --> 0x%08x\n",
+                          tmp_index, sb_info->sb_ack);
+       }
+
+       if (!sb_attn || !sb_attn->sb_attn) {
+               DP_ERR(p_hwfn->p_dev,
+                      "Attentions Status block is NULL -"
+                      " cannot check for new attentions!\n");
+       } else {
+               u16 tmp_index = sb_attn->index;
+
+               rc |= ecore_attn_update_idx(p_hwfn, sb_attn);
+               DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR,
+                          "Attention indices: 0x%08x --> 0x%08x\n",
+                          tmp_index, sb_attn->index);
+       }
+
+       /* Check if we expect interrupts at this time. if not just ack them */
+       if (!(rc & ECORE_SB_EVENT_MASK)) {
+               ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
+               return;
+       }
+
+       /* Check the validity of the DPC ptt. If not ack interrupts and fail */
+       if (!p_hwfn->p_dpc_ptt) {
+               DP_NOTICE(p_hwfn->p_dev, true, "Failed to allocate PTT\n");
+               ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
+               return;
+       }
+
+       if (rc & ECORE_SB_IDX) {
+               int pi;
+
+               /* Since we only looked at the SB index, it's possible more
+                * than a single protocol-index on the SB incremented.
+                * Iterate over all configured protocol indices and check
+                * whether something happened for each.
+                */
+               for (pi = 0; pi < arr_size; pi++) {
+                       pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi];
+                       if (pi_info->comp_cb != OSAL_NULL)
+                               pi_info->comp_cb(p_hwfn, pi_info->cookie);
+               }
+       }
+
+       if (sb_attn && (rc & ECORE_SB_ATT_IDX)) {
+               /* This should be done before the interrupts are enabled,
+                * since otherwise a new attention will be generated.
+                */
+               ecore_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index);
+       }
+
+       ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
+}
+
+static void ecore_int_sb_attn_free(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_sb_attn_info *p_sb = p_hwfn->p_sb_attn;
+
+       if (!p_sb)
+               return;
+
+       if (p_sb->sb_attn) {
+               OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_sb->sb_attn,
+                                      p_sb->sb_phys,
+                                      SB_ATTN_ALIGNED_SIZE(p_hwfn));
+       }
+       OSAL_FREE(p_hwfn->p_dev, p_sb);
+}
+
+/* coalescing timeout = timeset << (timer_res + 1) */
+#ifdef RTE_LIBRTE_QEDE_RX_COAL_US
+#define ECORE_CAU_DEF_RX_USECS RTE_LIBRTE_QEDE_RX_COAL_US
+#else
+#define ECORE_CAU_DEF_RX_USECS 24
+#endif
+
+#ifdef RTE_LIBRTE_QEDE_TX_COAL_US
+#define ECORE_CAU_DEF_TX_USECS RTE_LIBRTE_QEDE_TX_COAL_US
+#else
+#define ECORE_CAU_DEF_TX_USECS 48
+#endif
+
+void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn,
+                            struct cau_sb_entry *p_sb_entry,
+                            u8 pf_id, u16 vf_number, u8 vf_valid)
+{
+       struct ecore_dev *p_dev = p_hwfn->p_dev;
+       u32 cau_state;
+
+       OSAL_MEMSET(p_sb_entry, 0, sizeof(*p_sb_entry));
+
+       SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id);
+       SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number);
+       SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid);
+       SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
+       SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
+
+       /* setting the time resultion to a fixed value ( = 1) */
+       SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0,
+                 ECORE_CAU_DEF_RX_TIMER_RES);
+       SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1,
+                 ECORE_CAU_DEF_TX_TIMER_RES);
+
+       cau_state = CAU_HC_DISABLE_STATE;
+
+       if (p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
+               cau_state = CAU_HC_ENABLE_STATE;
+               if (!p_dev->rx_coalesce_usecs) {
+                       p_dev->rx_coalesce_usecs = ECORE_CAU_DEF_RX_USECS;
+                       DP_INFO(p_dev, "Coalesce params rx-usecs=%u\n",
+                               p_dev->rx_coalesce_usecs);
+               }
+               if (!p_dev->tx_coalesce_usecs) {
+                       p_dev->tx_coalesce_usecs = ECORE_CAU_DEF_TX_USECS;
+                       DP_INFO(p_dev, "Coalesce params tx-usecs=%u\n",
+                               p_dev->tx_coalesce_usecs);
+               }
+       }
+
+       SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
+       SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
+}
+
+void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
+                          struct ecore_ptt *p_ptt,
+                          dma_addr_t sb_phys, u16 igu_sb_id,
+                          u16 vf_number, u8 vf_valid)
+{
+       struct cau_sb_entry sb_entry;
+
+       ecore_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id,
+                               vf_number, vf_valid);
+
+       if (p_hwfn->hw_init_done) {
+               /* Wide-bus, initialize via DMAE */
+               u64 phys_addr = (u64)sb_phys;
+
+               ecore_dmae_host2grc(p_hwfn, p_ptt,
+                                   (u64)(osal_uintptr_t)&phys_addr,
+                                   CAU_REG_SB_ADDR_MEMORY +
+                                   igu_sb_id * sizeof(u64), 2, 0);
+               ecore_dmae_host2grc(p_hwfn, p_ptt,
+                                   (u64)(osal_uintptr_t)&sb_entry,
+                                   CAU_REG_SB_VAR_MEMORY +
+                                   igu_sb_id * sizeof(u64), 2, 0);
+       } else {
+               /* Initialize Status Block Address */
+               STORE_RT_REG_AGG(p_hwfn,
+                                CAU_REG_SB_ADDR_MEMORY_RT_OFFSET +
+                                igu_sb_id * 2, sb_phys);
+
+               STORE_RT_REG_AGG(p_hwfn,
+                                CAU_REG_SB_VAR_MEMORY_RT_OFFSET +
+                                igu_sb_id * 2, sb_entry);
+       }
+
+       /* Configure pi coalescing if set */
+       if (p_hwfn->p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
+               u8 num_tc = 1;  /* @@@TBD aelior ECORE_MULTI_COS */
+               u8 timeset = p_hwfn->p_dev->rx_coalesce_usecs >>
+                   (ECORE_CAU_DEF_RX_TIMER_RES + 1);
+               u8 i;
+
+               ecore_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
+                                     ECORE_COAL_RX_STATE_MACHINE, timeset);
+
+               timeset = p_hwfn->p_dev->tx_coalesce_usecs >>
+                   (ECORE_CAU_DEF_TX_TIMER_RES + 1);
+
+               for (i = 0; i < num_tc; i++) {
+                       ecore_int_cau_conf_pi(p_hwfn, p_ptt,
+                                             igu_sb_id, TX_PI(i),
+                                             ECORE_COAL_TX_STATE_MACHINE,
+                                             timeset);
+               }
+       }
+}
+
+void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
+                          struct ecore_ptt *p_ptt,
+                          u16 igu_sb_id, u32 pi_index,
+                          enum ecore_coalescing_fsm coalescing_fsm, u8 timeset)
+{
+       struct cau_pi_entry pi_entry;
+       u32 sb_offset, pi_offset;
+
+       sb_offset = igu_sb_id * PIS_PER_SB;
+       OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry));
+
+       SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
+       if (coalescing_fsm == ECORE_COAL_RX_STATE_MACHINE)
+               SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
+       else
+               SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
+
+       pi_offset = sb_offset + pi_index;
+       if (p_hwfn->hw_init_done) {
+               ecore_wr(p_hwfn, p_ptt,
+                        CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
+                        *((u32 *)&(pi_entry)));
+       } else {
+               STORE_RT_REG(p_hwfn,
+                            CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
+                            *((u32 *)&(pi_entry)));
+       }
+}
+
+void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn,
+                       struct ecore_ptt *p_ptt, struct ecore_sb_info *sb_info)
+{
+       /* zero status block and ack counter */
+       sb_info->sb_ack = 0;
+       OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
+
+       ecore_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
+                                     sb_info->igu_sb_id, 0, 0);
+}
+
+/**
+ * @brief ecore_get_igu_sb_id - given a sw sb_id return the
+ *        igu_sb_id
+ *
+ * @param p_hwfn
+ * @param sb_id
+ *
+ * @return u16
+ */
+static u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
+{
+       u16 igu_sb_id;
+
+       /* Assuming continuous set of IGU SBs dedicated for given PF */
+       if (sb_id == ECORE_SP_SB_ID)
+               igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
+       else
+               igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb;
+
+       if (sb_id == ECORE_SP_SB_ID)
+               DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+                          "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id);
+       else
+               DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+                          "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id);
+
+       return igu_sb_id;
+}
+
+enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,
+                                      struct ecore_ptt *p_ptt,
+                                      struct ecore_sb_info *sb_info,
+                                      void *sb_virt_addr,
+                                      dma_addr_t sb_phy_addr, u16 sb_id)
+{
+       sb_info->sb_virt = sb_virt_addr;
+       sb_info->sb_phys = sb_phy_addr;
+
+       sb_info->igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id);
+
+       if (sb_id != ECORE_SP_SB_ID) {
+               p_hwfn->sbs_info[sb_id] = sb_info;
+               p_hwfn->num_sbs++;
+       }
+#ifdef ECORE_CONFIG_DIRECT_HWFN
+       sb_info->p_hwfn = p_hwfn;
+#endif
+       sb_info->p_dev = p_hwfn->p_dev;
+
+       /* The igu address will hold the absolute address that needs to be
+        * written to for a specific status block
+        */
+       sb_info->igu_addr = (u8 OSAL_IOMEM *)p_hwfn->regview +
+                   GTT_BAR0_MAP_REG_IGU_CMD + (sb_info->igu_sb_id << 3);
+
+       sb_info->flags |= ECORE_SB_INFO_INIT;
+
+       ecore_int_sb_setup(p_hwfn, p_ptt, sb_info);
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn,
+                                         struct ecore_sb_info *sb_info,
+                                         u16 sb_id)
+{
+       if (sb_id == ECORE_SP_SB_ID) {
+               DP_ERR(p_hwfn, "Do Not free sp sb using this function");
+               return ECORE_INVAL;
+       }
+
+       /* zero status block and ack counter */
+       sb_info->sb_ack = 0;
+       OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
+
+       if (p_hwfn->sbs_info[sb_id] != OSAL_NULL) {
+               p_hwfn->sbs_info[sb_id] = OSAL_NULL;
+               p_hwfn->num_sbs--;
+       }
+
+       return ECORE_SUCCESS;
+}
+
+static void ecore_int_sp_sb_free(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_sb_sp_info *p_sb = p_hwfn->p_sp_sb;
+
+       if (!p_sb)
+               return;
+
+       if (p_sb->sb_info.sb_virt) {
+               OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+                                      p_sb->sb_info.sb_virt,
+                                      p_sb->sb_info.sb_phys,
+                                      SB_ALIGNED_SIZE(p_hwfn));
+       }
+
+       OSAL_FREE(p_hwfn->p_dev, p_sb);
+}
+
+static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn,
+                                                 struct ecore_ptt *p_ptt)
+{
+       struct ecore_sb_sp_info *p_sb;
+       dma_addr_t p_phys = 0;
+       void *p_virt;
+
+       /* SB struct */
+       p_sb =
+           OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
+                      sizeof(struct ecore_sb_sp_info));
+       if (!p_sb) {
+               DP_NOTICE(p_hwfn, true,
+                         "Failed to allocate `struct ecore_sb_info'");
+               return ECORE_NOMEM;
+       }
+
+       /* SB ring  */
+       p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+                                        &p_phys, SB_ALIGNED_SIZE(p_hwfn));
+       if (!p_virt) {
+               DP_NOTICE(p_hwfn, true, "Failed to allocate status block");
+               OSAL_FREE(p_hwfn->p_dev, p_sb);
+               return ECORE_NOMEM;
+       }
+
+       /* Status Block setup */
+       p_hwfn->p_sp_sb = p_sb;
+       ecore_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info,
+                         p_virt, p_phys, ECORE_SP_SB_ID);
+
+       OSAL_MEMSET(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr));
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn,
+                                          ecore_int_comp_cb_t comp_cb,
+                                          void *cookie,
+                                          u8 *sb_idx, __le16 **p_fw_cons)
+{
+       struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
+       enum _ecore_status_t rc = ECORE_NOMEM;
+       u8 pi;
+
+       /* Look for a free index */
+       for (pi = 0; pi < OSAL_ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {
+               if (p_sp_sb->pi_info_arr[pi].comp_cb != OSAL_NULL)
+                       continue;
+
+               p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
+               p_sp_sb->pi_info_arr[pi].cookie = cookie;
+               *sb_idx = pi;
+               *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
+               rc = ECORE_SUCCESS;
+               break;
+       }
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn, u8 pi)
+{
+       struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
+
+       if (p_sp_sb->pi_info_arr[pi].comp_cb == OSAL_NULL)
+               return ECORE_NOMEM;
+
+       p_sp_sb->pi_info_arr[pi].comp_cb = OSAL_NULL;
+       p_sp_sb->pi_info_arr[pi].cookie = OSAL_NULL;
+       return ECORE_SUCCESS;
+}
+
+u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn)
+{
+       return p_hwfn->p_sp_sb->sb_info.igu_sb_id;
+}
+
+void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn,
+                             struct ecore_ptt *p_ptt,
+                             enum ecore_int_mode int_mode)
+{
+       u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
+               DP_INFO(p_hwfn, "FPGA - don't enable ATTN generation in IGU\n");
+       else
+#endif
+               igu_pf_conf |= IGU_PF_CONF_ATTN_BIT_EN;
+
+       p_hwfn->p_dev->int_mode = int_mode;
+       switch (p_hwfn->p_dev->int_mode) {
+       case ECORE_INT_MODE_INTA:
+               igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN;
+               igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
+               break;
+
+       case ECORE_INT_MODE_MSI:
+               igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
+               igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
+               break;
+
+       case ECORE_INT_MODE_MSIX:
+               igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
+               break;
+       case ECORE_INT_MODE_POLL:
+               break;
+       }
+
+       ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
+}
+
+static void ecore_int_igu_enable_attn(struct ecore_hwfn *p_hwfn,
+                                     struct ecore_ptt *p_ptt)
+{
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
+               DP_INFO(p_hwfn,
+                       "FPGA - Don't enable Attentions in IGU and MISC\n");
+               return;
+       }
+#endif
+
+       /* Configure AEU signal change to produce attentions */
+       ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0);
+       ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
+       ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
+       ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff);
+
+       OSAL_MMIOWB(p_hwfn->p_dev);
+
+       /* Unmask AEU signals toward IGU */
+       ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
+}
+
+enum _ecore_status_t
+ecore_int_igu_enable(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+                    enum ecore_int_mode int_mode)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       u32 tmp, reg_addr;
+
+       /* @@@tmp - Mask General HW attentions 0-31, Enable 32-36 */
+       tmp = ecore_rd(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0);
+       tmp |= 0xf;
+       ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE3_IGU_OUT_0, 0);
+       ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0, tmp);
+
+       /* @@@tmp - Starting with MFW 8.2.1.0 we've started hitting AVS stop
+        * attentions. Since we're waiting for BRCM answer regarding this
+        * attention, in the meanwhile we simply mask it.
+        */
+       tmp = ecore_rd(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0);
+       tmp &= ~0x800;
+       ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0, tmp);
+
+       ecore_int_igu_enable_attn(p_hwfn, p_ptt);
+
+       if ((int_mode != ECORE_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
+               rc = OSAL_SLOWPATH_IRQ_REQ(p_hwfn);
+               if (rc != ECORE_SUCCESS) {
+                       DP_NOTICE(p_hwfn, true,
+                                 "Slowpath IRQ request failed\n");
+                       return ECORE_NORESOURCES;
+               }
+               p_hwfn->b_int_requested = true;
+       }
+
+       /* Enable interrupt Generation */
+       ecore_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
+
+       p_hwfn->b_int_enabled = 1;
+
+       return rc;
+}
+
+void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn,
+                              struct ecore_ptt *p_ptt)
+{
+       p_hwfn->b_int_enabled = 0;
+
+       ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
+}
+
+#define IGU_CLEANUP_SLEEP_LENGTH               (1000)
+void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,
+                             struct ecore_ptt *p_ptt,
+                             u32 sb_id, bool cleanup_set, u16 opaque_fid)
+{
+       u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
+       u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id;
+       u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
+       u8 type = 0;            /* FIXME MichalS type??? */
+
+       OSAL_BUILD_BUG_ON((IGU_REG_CLEANUP_STATUS_4 -
+                          IGU_REG_CLEANUP_STATUS_0) != 0x200);
+
+       /* USE Control Command Register to perform cleanup. There is an
+        * option to do this using IGU bar, but then it can't be used for VFs.
+        */
+
+       /* Set the data field */
+       SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
+       SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, type);
+       SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET);
+
+       /* Set the control register */
+       SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr);
+       SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid);
+       SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR);
+
+       ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data);
+
+       OSAL_BARRIER(p_hwfn->p_dev);
+
+       ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
+
+       OSAL_MMIOWB(p_hwfn->p_dev);
+
+       /* calculate where to read the status bit from */
+       sb_bit = 1 << (sb_id % 32);
+       sb_bit_addr = sb_id / 32 * sizeof(u32);
+
+       sb_bit_addr += IGU_REG_CLEANUP_STATUS_0 + (0x80 * type);
+
+       /* Now wait for the command to complete */
+       while (--sleep_cnt) {
+               val = ecore_rd(p_hwfn, p_ptt, sb_bit_addr);
+               if ((val & sb_bit) == (cleanup_set ? sb_bit : 0))
+                       break;
+               OSAL_MSLEEP(5);
+       }
+
+       if (!sleep_cnt)
+               DP_NOTICE(p_hwfn, true,
+                         "Timeout waiting for clear status 0x%08x [for sb %d]\n",
+                         val, sb_id);
+}
+
+void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
+                                      struct ecore_ptt *p_ptt,
+                                      u32 sb_id, u16 opaque, bool b_set)
+{
+       int pi;
+
+       /* Set */
+       if (b_set)
+               ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 1, opaque);
+
+       /* Clear */
+       ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque);
+
+       /* Clear the CAU for the SB */
+       for (pi = 0; pi < 12; pi++)
+               ecore_wr(p_hwfn, p_ptt,
+                        CAU_REG_PI_MEMORY + (sb_id * 12 + pi) * 4, 0);
+}
+
+void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn,
+                               struct ecore_ptt *p_ptt,
+                               bool b_set, bool b_slowpath)
+{
+       u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb;
+       u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt;
+       u32 sb_id = 0, val = 0;
+
+       /* @@@TBD MichalK temporary... should be moved to init-tool... */
+       val = ecore_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
+       val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
+       val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN;
+       ecore_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
+       /* end temporary */
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+                  "IGU cleaning SBs [%d,...,%d]\n",
+                  igu_base_sb, igu_base_sb + igu_sb_cnt - 1);
+
+       for (sb_id = igu_base_sb; sb_id < igu_base_sb + igu_sb_cnt; sb_id++)
+               ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
+                                                 p_hwfn->hw_info.opaque_fid,
+                                                 b_set);
+
+       if (!b_slowpath)
+               return;
+
+       sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
+       DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+                  "IGU cleaning slowpath SB [%d]\n", sb_id);
+       ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
+                                         p_hwfn->hw_info.opaque_fid, b_set);
+}
+
+static u32 ecore_int_igu_read_cam_block(struct ecore_hwfn *p_hwfn,
+                                       struct ecore_ptt *p_ptt, u16 sb_id)
+{
+       u32 val = ecore_rd(p_hwfn, p_ptt,
+                          IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id);
+       struct ecore_igu_block *p_block;
+
+       p_block = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
+
+       /* stop scanning when hit first invalid PF entry */
+       if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
+           GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
+               goto out;
+
+       /* Fill the block information */
+       p_block->status = ECORE_IGU_STATUS_VALID;
+       p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER);
+       p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
+       p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER);
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+                  "IGU_BLOCK: [SB 0x%04x, Value in CAM 0x%08x] func_id = %d"
+                  " is_pf = %d vector_num = 0x%x\n",
+                  sb_id, val, p_block->function_id, p_block->is_pf,
+                  p_block->vector_number);
+
+out:
+       return val;
+}
+
+enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn,
+                                           struct ecore_ptt *p_ptt)
+{
+       struct ecore_igu_info *p_igu_info;
+       struct ecore_igu_block *p_block;
+       u16 sb_id, last_iov_sb_id = 0;
+       u32 min_vf, max_vf, val;
+       u16 prev_sb_id = 0xFF;
+
+       p_hwfn->hw_info.p_igu_info = OSAL_ALLOC(p_hwfn->p_dev,
+                                               GFP_KERNEL,
+                                               sizeof(*p_igu_info));
+       if (!p_hwfn->hw_info.p_igu_info)
+               return ECORE_NOMEM;
+
+       OSAL_MEMSET(p_hwfn->hw_info.p_igu_info, 0, sizeof(*p_igu_info));
+
+       p_igu_info = p_hwfn->hw_info.p_igu_info;
+
+       /* Initialize base sb / sb cnt for PFs and VFs */
+       p_igu_info->igu_base_sb = 0xffff;
+       p_igu_info->igu_sb_cnt = 0;
+       p_igu_info->igu_dsb_id = 0xffff;
+       p_igu_info->igu_base_sb_iov = 0xffff;
+
+       min_vf = 0;
+       max_vf = 0;
+
+       for (sb_id = 0; sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
+            sb_id++) {
+               p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
+               val = ecore_int_igu_read_cam_block(p_hwfn, p_ptt, sb_id);
+               if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
+                   GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
+                       break;
+
+               if (p_block->is_pf) {
+                       if (p_block->function_id == p_hwfn->rel_pf_id) {
+                               p_block->status |= ECORE_IGU_STATUS_PF;
+
+                               if (p_block->vector_number == 0) {
+                                       if (p_igu_info->igu_dsb_id == 0xffff)
+                                               p_igu_info->igu_dsb_id = sb_id;
+                               } else {
+                                       if (p_igu_info->igu_base_sb == 0xffff) {
+                                               p_igu_info->igu_base_sb = sb_id;
+                                       } else if (prev_sb_id != sb_id - 1) {
+                                               DP_NOTICE(p_hwfn->p_dev, false,
+                                                         "consecutive igu"
+                                                         " vectors for HWFN"
+                                                         " %x broken",
+                                                         p_hwfn->rel_pf_id);
+                                               break;
+                                       }
+                                       prev_sb_id = sb_id;
+                                       /* we don't count the default */
+                                       (p_igu_info->igu_sb_cnt)++;
+                               }
+                       }
+               } else {
+                       if ((p_block->function_id >= min_vf) &&
+                           (p_block->function_id < max_vf)) {
+                               /* Available for VFs of this PF */
+                               if (p_igu_info->igu_base_sb_iov == 0xffff) {
+                                       p_igu_info->igu_base_sb_iov = sb_id;
+                               } else if (last_iov_sb_id != sb_id - 1) {
+                                       if (!val)
+                                               DP_VERBOSE(p_hwfn->p_dev,
+                                                          ECORE_MSG_INTR,
+                                                          "First uninited IGU"
+                                                          " CAM entry at"
+                                                          " index 0x%04x\n",
+                                                          sb_id);
+                                       else
+                                               DP_NOTICE(p_hwfn->p_dev, false,
+                                                         "Consecutive igu"
+                                                         " vectors for HWFN"
+                                                         " %x vfs is broken"
+                                                         " [jumps from %04x"
+                                                         " to %04x]\n",
+                                                         p_hwfn->rel_pf_id,
+                                                         last_iov_sb_id,
+                                                         sb_id);
+                                       break;
+                               }
+                               p_block->status |= ECORE_IGU_STATUS_FREE;
+                               p_hwfn->hw_info.p_igu_info->free_blks++;
+                               last_iov_sb_id = sb_id;
+                       }
+               }
+       }
+       p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks;
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+                  "IGU igu_base_sb=0x%x [IOV 0x%x] igu_sb_cnt=%d [IOV 0x%x] "
+                  "igu_dsb_id=0x%x\n",
+                  p_igu_info->igu_base_sb, p_igu_info->igu_base_sb_iov,
+                  p_igu_info->igu_sb_cnt, p_igu_info->igu_sb_cnt_iov,
+                  p_igu_info->igu_dsb_id);
+
+       if (p_igu_info->igu_base_sb == 0xffff ||
+           p_igu_info->igu_dsb_id == 0xffff || p_igu_info->igu_sb_cnt == 0) {
+               DP_NOTICE(p_hwfn, true,
+                         "IGU CAM returned invalid values igu_base_sb=0x%x "
+                         "igu_sb_cnt=%d igu_dsb_id=0x%x\n",
+                         p_igu_info->igu_base_sb, p_igu_info->igu_sb_cnt,
+                         p_igu_info->igu_dsb_id);
+               return ECORE_INVAL;
+       }
+
+       return ECORE_SUCCESS;
+}
+
+/**
+ * @brief Initialize igu runtime registers
+ *
+ * @param p_hwfn
+ */
+void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn)
+{
+       u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
+
+       STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
+}
+
+#define LSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - \
+                         IGU_CMD_INT_ACK_BASE)
+#define MSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - \
+                         IGU_CMD_INT_ACK_BASE)
+u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn)
+{
+       u32 intr_status_hi = 0, intr_status_lo = 0;
+       u64 intr_status = 0;
+
+       intr_status_lo = REG_RD(p_hwfn,
+                               GTT_BAR0_MAP_REG_IGU_CMD +
+                               LSB_IGU_CMD_ADDR * 8);
+       intr_status_hi = REG_RD(p_hwfn,
+                               GTT_BAR0_MAP_REG_IGU_CMD +
+                               MSB_IGU_CMD_ADDR * 8);
+       intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo;
+
+       return intr_status;
+}
+
+static void ecore_int_sp_dpc_setup(struct ecore_hwfn *p_hwfn)
+{
+       OSAL_DPC_INIT(p_hwfn->sp_dpc, p_hwfn);
+       p_hwfn->b_sp_dpc_enabled = true;
+}
+
+static enum _ecore_status_t ecore_int_sp_dpc_alloc(struct ecore_hwfn *p_hwfn)
+{
+       p_hwfn->sp_dpc = OSAL_DPC_ALLOC(p_hwfn);
+       if (!p_hwfn->sp_dpc)
+               return ECORE_NOMEM;
+
+       return ECORE_SUCCESS;
+}
+
+static void ecore_int_sp_dpc_free(struct ecore_hwfn *p_hwfn)
+{
+       OSAL_FREE(p_hwfn->p_dev, p_hwfn->sp_dpc);
+}
+
+enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn,
+                                    struct ecore_ptt *p_ptt)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+
+       rc = ecore_int_sp_dpc_alloc(p_hwfn);
+       if (rc != ECORE_SUCCESS) {
+               DP_ERR(p_hwfn->p_dev, "Failed to allocate sp dpc mem\n");
+               return rc;
+       }
+
+       rc = ecore_int_sp_sb_alloc(p_hwfn, p_ptt);
+       if (rc != ECORE_SUCCESS) {
+               DP_ERR(p_hwfn->p_dev, "Failed to allocate sp sb mem\n");
+               return rc;
+       }
+
+       return rc;
+}
+
+void ecore_int_free(struct ecore_hwfn *p_hwfn)
+{
+       ecore_int_sp_sb_free(p_hwfn);
+       ecore_int_sb_attn_free(p_hwfn);
+       ecore_int_sp_dpc_free(p_hwfn);
+}
+
+void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+       if (!p_hwfn || !p_hwfn->p_sp_sb || !p_hwfn->p_sb_attn)
+               return;
+
+       ecore_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
+       ecore_int_sp_dpc_setup(p_hwfn);
+}
+
+void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn,
+                          struct ecore_sb_cnt_info *p_sb_cnt_info)
+{
+       struct ecore_igu_info *info = p_hwfn->hw_info.p_igu_info;
+
+       if (!info || !p_sb_cnt_info)
+               return;
+
+       p_sb_cnt_info->sb_cnt = info->igu_sb_cnt;
+       p_sb_cnt_info->sb_iov_cnt = info->igu_sb_cnt_iov;
+       p_sb_cnt_info->sb_free_blk = info->free_blks;
+}
+
+u16 ecore_int_queue_id_from_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
+{
+       struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
+
+       /* Determine origin of SB id */
+       if ((sb_id >= p_info->igu_base_sb) &&
+           (sb_id < p_info->igu_base_sb + p_info->igu_sb_cnt)) {
+               return sb_id - p_info->igu_base_sb;
+       } else if ((sb_id >= p_info->igu_base_sb_iov) &&
+                  (sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) {
+               return sb_id - p_info->igu_base_sb_iov + p_info->igu_sb_cnt;
+       }
+
+       DP_NOTICE(p_hwfn, true, "SB %d not in range for function\n",
+                 sb_id);
+       return 0;
+}
+
+void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev)
+{
+       int i;
+
+       for_each_hwfn(p_dev, i)
+               p_dev->hwfns[i].b_int_requested = false;
+}
diff --git a/drivers/net/qede/base/ecore_int.h b/drivers/net/qede/base/ecore_int.h
new file mode 100644 (file)
index 0000000..17c9521
--- /dev/null
@@ -0,0 +1,234 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_INT_H__
+#define __ECORE_INT_H__
+
+#include "ecore.h"
+#include "ecore_int_api.h"
+
+#define ECORE_CAU_DEF_RX_TIMER_RES 0
+#define ECORE_CAU_DEF_TX_TIMER_RES 0
+
+#define ECORE_SB_ATT_IDX       0x0001
+#define ECORE_SB_EVENT_MASK    0x0003
+
+#define SB_ALIGNED_SIZE(p_hwfn)                                        \
+       ALIGNED_TYPE_SIZE(struct status_block, p_hwfn)
+
+struct ecore_igu_block {
+       u8 status;
+#define ECORE_IGU_STATUS_FREE  0x01
+#define ECORE_IGU_STATUS_VALID 0x02
+#define ECORE_IGU_STATUS_PF    0x04
+
+       u8 vector_number;
+       u8 function_id;
+       u8 is_pf;
+};
+
+struct ecore_igu_map {
+       struct ecore_igu_block igu_blocks[MAX_TOT_SB_PER_PATH];
+};
+
+struct ecore_igu_info {
+       struct ecore_igu_map igu_map;
+       u16 igu_dsb_id;
+       u16 igu_base_sb;
+       u16 igu_base_sb_iov;
+       u16 igu_sb_cnt;
+       u16 igu_sb_cnt_iov;
+       u16 free_blks;
+};
+
+/* TODO Names of function may change... */
+void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn,
+                               struct ecore_ptt *p_ptt,
+                               bool b_set, bool b_slowpath);
+
+void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_int_igu_read_cam - Reads the IGU CAM.
+ *     This function needs to be called during hardware
+ *     prepare. It reads the info from igu cam to know which
+ *     status block is the default / base status block etc.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn,
+                                           struct ecore_ptt *p_ptt);
+
+typedef enum _ecore_status_t (*ecore_int_comp_cb_t) (struct ecore_hwfn *p_hwfn,
+                                                    void *cookie);
+/**
+ * @brief ecore_int_register_cb - Register callback func for
+ *      slowhwfn statusblock.
+ *
+ *     Every protocol that uses the slowhwfn status block
+ *     should register a callback function that will be called
+ *     once there is an update of the sp status block.
+ *
+ * @param p_hwfn
+ * @param comp_cb - function to be called when there is an
+ *                  interrupt on the sp sb
+ *
+ * @param cookie  - passed to the callback function
+ * @param sb_idx  - OUT parameter which gives the chosen index
+ *                  for this protocol.
+ * @param p_fw_cons  - pointer to the actual address of the
+ *                     consumer for this protocol.
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn,
+                                          ecore_int_comp_cb_t comp_cb,
+                                          void *cookie,
+                                          u8 *sb_idx, __le16 **p_fw_cons);
+/**
+ * @brief ecore_int_unregister_cb - Unregisters callback
+ *      function from sp sb.
+ *      Partner of ecore_int_register_cb -> should be called
+ *      when no longer required.
+ *
+ * @param p_hwfn
+ * @param pi
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn, u8 pi);
+
+/**
+ * @brief ecore_int_get_sp_sb_id - Get the slowhwfn sb id.
+ *
+ * @param p_hwfn
+ *
+ * @return u16
+ */
+u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief Status block cleanup. Should be called for each status
+ *        block that will be used -> both PF / VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_id                - igu status block id
+ * @param cleanup_set  - set(1) / clear(0)
+ * @param opaque_fid    - the function for which to perform
+ *                     cleanup, for example a PF on behalf of
+ *                     its VFs.
+ */
+void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,
+                             struct ecore_ptt *p_ptt,
+                             u32 sb_id, bool cleanup_set, u16 opaque_fid);
+
+/**
+ * @brief Status block cleanup. Should be called for each status
+ *        block that will be used -> both PF / VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_id                - igu status block id
+ * @param opaque       - opaque fid of the sb owner.
+ * @param cleanup_set  - set(1) / clear(0)
+ */
+void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
+                                      struct ecore_ptt *p_ptt,
+                                      u32 sb_id, u16 opaque, bool b_set);
+
+/**
+ * @brief ecore_int_cau_conf - configure cau for a given status
+ *        block
+ *
+ * @param p_hwfn
+ * @param ptt
+ * @param sb_phys
+ * @param igu_sb_id
+ * @param vf_number
+ * @param vf_valid
+ */
+void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
+                          struct ecore_ptt *p_ptt,
+                          dma_addr_t sb_phys,
+                          u16 igu_sb_id, u16 vf_number, u8 vf_valid);
+
+/**
+* @brief ecore_int_alloc
+*
+* @param p_hwfn
+ * @param p_ptt
+*
+* @return enum _ecore_status_t
+*/
+enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn,
+                                    struct ecore_ptt *p_ptt);
+
+/**
+* @brief ecore_int_free
+*
+* @param p_hwfn
+*/
+void ecore_int_free(struct ecore_hwfn *p_hwfn);
+
+/**
+* @brief ecore_int_setup
+*
+* @param p_hwfn
+* @param p_ptt
+*/
+void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+
+/**
+ * @brief - Returns an Rx queue index appropriate for usage with given SB.
+ *
+ * @param p_hwfn
+ * @param sb_id - absolute index of SB
+ *
+ * @return index of Rx queue
+ */
+u16 ecore_int_queue_id_from_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id);
+
+/**
+ * @brief - Enable Interrupt & Attention for hw function
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param int_mode
+ *
+* @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_int_igu_enable(struct ecore_hwfn *p_hwfn,
+                                         struct ecore_ptt *p_ptt,
+                                         enum ecore_int_mode int_mode);
+
+/**
+ * @brief - Initialize CAU status block entry
+ *
+ * @param p_hwfn
+ * @param p_sb_entry
+ * @param pf_id
+ * @param vf_number
+ * @param vf_valid
+ */
+void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn,
+                            struct cau_sb_entry *p_sb_entry, u8 pf_id,
+                            u16 vf_number, u8 vf_valid);
+
+#ifndef ASIC_ONLY
+#define ECORE_MAPPING_MEMORY_SIZE(dev) \
+       ((CHIP_REV_IS_SLOW(dev) && (!(dev)->b_is_emul_full)) ? \
+        136 : NUM_OF_SBS(dev))
+#else
+#define ECORE_MAPPING_MEMORY_SIZE(dev) NUM_OF_SBS(dev)
+#endif
+
+#endif /* __ECORE_INT_H__ */
diff --git a/drivers/net/qede/base/ecore_int_api.h b/drivers/net/qede/base/ecore_int_api.h
new file mode 100644 (file)
index 0000000..f6db807
--- /dev/null
@@ -0,0 +1,277 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_INT_API_H__
+#define __ECORE_INT_API_H__
+
+#ifndef __EXTRACT__LINUX__
+#define ECORE_SB_IDX           0x0002
+
+#define RX_PI          0
+#define TX_PI(tc)      (RX_PI + 1 + tc)
+
+#ifndef ECORE_INT_MODE
+#define ECORE_INT_MODE
+enum ecore_int_mode {
+       ECORE_INT_MODE_INTA,
+       ECORE_INT_MODE_MSIX,
+       ECORE_INT_MODE_MSI,
+       ECORE_INT_MODE_POLL,
+};
+#endif
+
+struct ecore_sb_info {
+       struct status_block *sb_virt;
+       dma_addr_t sb_phys;
+       u32 sb_ack;             /* Last given ack */
+       u16 igu_sb_id;
+       void OSAL_IOMEM *igu_addr;
+       u8 flags;
+#define ECORE_SB_INFO_INIT     0x1
+#define ECORE_SB_INFO_SETUP    0x2
+
+#ifdef ECORE_CONFIG_DIRECT_HWFN
+       struct ecore_hwfn *p_hwfn;
+#endif
+       struct ecore_dev *p_dev;
+};
+
+struct ecore_sb_cnt_info {
+       int sb_cnt;
+       int sb_iov_cnt;
+       int sb_free_blk;
+};
+
+static OSAL_INLINE u16 ecore_sb_update_sb_idx(struct ecore_sb_info *sb_info)
+{
+       u32 prod = 0;
+       u16 rc = 0;
+
+       /* barrier(); status block is written to by the chip */
+       /* FIXME: need some sort of barrier. */
+       prod = OSAL_LE32_TO_CPU(sb_info->sb_virt->prod_index) &
+           STATUS_BLOCK_PROD_INDEX_MASK;
+       if (sb_info->sb_ack != prod) {
+               sb_info->sb_ack = prod;
+               rc |= ECORE_SB_IDX;
+       }
+
+       OSAL_MMIOWB(sb_info->p_dev);
+       return rc;
+}
+
+/**
+ *
+ * @brief This function creates an update command for interrupts that is
+ *        written to the IGU.
+ *
+ * @param sb_info      - This is the structure allocated and
+ *        initialized per status block. Assumption is
+ *        that it was initialized using ecore_sb_init
+ * @param int_cmd      - Enable/Disable/Nop
+ * @param upd_flg      - whether igu consumer should be
+ *        updated.
+ *
+ * @return OSAL_INLINE void
+ */
+static OSAL_INLINE void ecore_sb_ack(struct ecore_sb_info *sb_info,
+                                    enum igu_int_cmd int_cmd, u8 upd_flg)
+{
+       struct igu_prod_cons_update igu_ack = { 0 };
+
+       igu_ack.sb_id_and_flags =
+           ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
+            (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
+            (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
+            (IGU_SEG_ACCESS_REG << IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
+
+#ifdef ECORE_CONFIG_DIRECT_HWFN
+       DIRECT_REG_WR(sb_info->p_hwfn, sb_info->igu_addr,
+                     igu_ack.sb_id_and_flags);
+#else
+       DIRECT_REG_WR(OSAL_NULL, sb_info->igu_addr, igu_ack.sb_id_and_flags);
+#endif
+       /* Both segments (interrupts & acks) are written to same place address;
+        * Need to guarantee all commands will be received (in-order) by HW.
+        */
+       OSAL_MMIOWB(sb_info->p_dev);
+       OSAL_BARRIER(sb_info->p_dev);
+}
+
+#ifdef ECORE_CONFIG_DIRECT_HWFN
+static OSAL_INLINE void __internal_ram_wr(struct ecore_hwfn *p_hwfn,
+                                         void OSAL_IOMEM *addr,
+                                         int size, u32 *data)
+#else
+static OSAL_INLINE void __internal_ram_wr(void *p_hwfn,
+                                         void OSAL_IOMEM *addr,
+                                         int size, u32 *data)
+#endif
+{
+       unsigned int i;
+
+       for (i = 0; i < size / sizeof(*data); i++)
+               DIRECT_REG_WR(p_hwfn, &((u32 OSAL_IOMEM *)addr)[i], data[i]);
+}
+
+#ifdef ECORE_CONFIG_DIRECT_HWFN
+static OSAL_INLINE void internal_ram_wr(struct ecore_hwfn *p_hwfn,
+                                       void OSAL_IOMEM *addr,
+                                       int size, u32 *data)
+{
+       __internal_ram_wr(p_hwfn, addr, size, data);
+}
+#else
+static OSAL_INLINE void internal_ram_wr(void OSAL_IOMEM *addr,
+                                       int size, u32 *data)
+{
+       __internal_ram_wr(OSAL_NULL, addr, size, data);
+}
+#endif
+#endif
+
+struct ecore_hwfn;
+struct ecore_ptt;
+
+enum ecore_coalescing_fsm {
+       ECORE_COAL_RX_STATE_MACHINE,
+       ECORE_COAL_TX_STATE_MACHINE
+};
+
+/**
+ * @brief ecore_int_cau_conf_pi - configure cau for a given
+ *        status block
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param igu_sb_id
+ * @param pi_index
+ * @param state
+ * @param timeset
+ */
+void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
+                          struct ecore_ptt *p_ptt,
+                          u16 igu_sb_id,
+                          u32 pi_index,
+                          enum ecore_coalescing_fsm coalescing_fsm,
+                          u8 timeset);
+
+/**
+ *
+ * @brief ecore_int_igu_enable_int - enable device interrupts
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param int_mode - interrupt mode to use
+ */
+void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn,
+                             struct ecore_ptt *p_ptt,
+                             enum ecore_int_mode int_mode);
+
+/**
+ *
+ * @brief ecore_int_igu_disable_int - disable device interrupts
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn,
+                              struct ecore_ptt *p_ptt);
+
+/**
+ *
+ * @brief ecore_int_igu_read_sisr_reg - Reads the single isr multiple dpc
+ *        register from igu.
+ *
+ * @param p_hwfn
+ *
+ * @return u64
+ */
+u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn);
+
+#define ECORE_SP_SB_ID 0xffff
+/**
+ * @brief ecore_int_sb_init - Initializes the sb_info structure.
+ *
+ * once the structure is initialized it can be passed to sb related functions.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_info      points to an uninitialized (but
+ *                     allocated) sb_info structure
+ * @param sb_virt_addr
+ * @param sb_phy_addr
+ * @param sb_id                the sb_id to be used (zero based in driver)
+ *                     should use ECORE_SP_SB_ID for SP Status block
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,
+                                      struct ecore_ptt *p_ptt,
+                                      struct ecore_sb_info *sb_info,
+                                      void *sb_virt_addr,
+                                      dma_addr_t sb_phy_addr, u16 sb_id);
+/**
+ * @brief ecore_int_sb_setup - Setup the sb.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_info      initialized sb_info structure
+ */
+void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn,
+                       struct ecore_ptt *p_ptt, struct ecore_sb_info *sb_info);
+
+/**
+ * @brief ecore_int_sb_release - releases the sb_info structure.
+ *
+ * once the structure is released, it's memory can be freed
+ *
+ * @param p_hwfn
+ * @param sb_info      points to an allocated sb_info structure
+ * @param sb_id                the sb_id to be used (zero based in driver)
+ *                     should never be equal to ECORE_SP_SB_ID
+ *                     (SP Status block)
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn,
+                                         struct ecore_sb_info *sb_info,
+                                         u16 sb_id);
+
+/**
+ * @brief ecore_int_sp_dpc - To be called when an interrupt is received on the
+ *        default status block.
+ *
+ * @param p_hwfn - pointer to hwfn
+ *
+ */
+void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie);
+
+/**
+ * @brief ecore_int_get_num_sbs - get the number of status
+ *        blocks configured for this funciton in the igu.
+ *
+ * @param p_hwfn
+ * @param p_sb_cnt_info
+ *
+ * @return
+ */
+void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn,
+                          struct ecore_sb_cnt_info *p_sb_cnt_info);
+
+/**
+ * @brief ecore_int_disable_post_isr_release - performs the cleanup post ISR
+ *        release. The API need to be called after releasing all slowpath IRQs
+ *        of the device.
+ *
+ * @param p_dev
+ *
+ */
+void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev);
+
+#endif
diff --git a/drivers/net/qede/base/ecore_iro.h b/drivers/net/qede/base/ecore_iro.h
new file mode 100644 (file)
index 0000000..dd53ea9
--- /dev/null
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __IRO_H__
+#define __IRO_H__
+
+/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
+#define YSTORM_FLOW_CONTROL_MODE_OFFSET                (IRO[0].base)
+#define YSTORM_FLOW_CONTROL_MODE_SIZE          (IRO[0].size)
+/* Tstorm port statistics */
+#define TSTORM_PORT_STAT_OFFSET(port_id) \
+(IRO[1].base + ((port_id) * IRO[1].m1))
+#define TSTORM_PORT_STAT_SIZE                  (IRO[1].size)
+/* Ustorm VF-PF Channel ready flag */
+#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \
+(IRO[3].base + ((vf_id) * IRO[3].m1))
+#define USTORM_VF_PF_CHANNEL_READY_SIZE                (IRO[3].size)
+/* Ustorm Final flr cleanup ack */
+#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) \
+(IRO[4].base + ((pf_id) * IRO[4].m1))
+#define USTORM_FLR_FINAL_ACK_SIZE              (IRO[4].size)
+/* Ustorm Event ring consumer */
+#define USTORM_EQE_CONS_OFFSET(pf_id) \
+(IRO[5].base + ((pf_id) * IRO[5].m1))
+#define USTORM_EQE_CONS_SIZE                   (IRO[5].size)
+/* Ustorm Common Queue ring consumer */
+#define USTORM_COMMON_QUEUE_CONS_OFFSET(global_queue_id) \
+(IRO[6].base + ((global_queue_id) * IRO[6].m1))
+#define USTORM_COMMON_QUEUE_CONS_SIZE          (IRO[6].size)
+/* Xstorm Integration Test Data */
+#define XSTORM_INTEG_TEST_DATA_OFFSET          (IRO[7].base)
+#define XSTORM_INTEG_TEST_DATA_SIZE            (IRO[7].size)
+/* Ystorm Integration Test Data */
+#define YSTORM_INTEG_TEST_DATA_OFFSET          (IRO[8].base)
+#define YSTORM_INTEG_TEST_DATA_SIZE            (IRO[8].size)
+/* Pstorm Integration Test Data */
+#define PSTORM_INTEG_TEST_DATA_OFFSET          (IRO[9].base)
+#define PSTORM_INTEG_TEST_DATA_SIZE            (IRO[9].size)
+/* Tstorm Integration Test Data */
+#define TSTORM_INTEG_TEST_DATA_OFFSET          (IRO[10].base)
+#define TSTORM_INTEG_TEST_DATA_SIZE            (IRO[10].size)
+/* Mstorm Integration Test Data */
+#define MSTORM_INTEG_TEST_DATA_OFFSET          (IRO[11].base)
+#define MSTORM_INTEG_TEST_DATA_SIZE            (IRO[11].size)
+/* Ustorm Integration Test Data */
+#define USTORM_INTEG_TEST_DATA_OFFSET          (IRO[12].base)
+#define USTORM_INTEG_TEST_DATA_SIZE            (IRO[12].size)
+/* Mstorm queue statistics */
+#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+(IRO[17].base + ((stat_counter_id) * IRO[17].m1))
+#define MSTORM_QUEUE_STAT_SIZE                 (IRO[17].size)
+/* Mstorm producers */
+#define MSTORM_PRODS_OFFSET(queue_id) \
+(IRO[18].base + ((queue_id) * IRO[18].m1))
+#define MSTORM_PRODS_SIZE                      (IRO[18].size)
+/* TPA agregation timeout in us resolution (on ASIC) */
+#define MSTORM_TPA_TIMEOUT_US_OFFSET           (IRO[19].base)
+#define MSTORM_TPA_TIMEOUT_US_SIZE             (IRO[19].size)
+/* Ustorm queue statistics */
+#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+(IRO[20].base + ((stat_counter_id) * IRO[20].m1))
+#define USTORM_QUEUE_STAT_SIZE (IRO[20].size)
+/* Ustorm queue zone */
+#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
+(IRO[21].base + ((queue_id) * IRO[21].m1))
+#define USTORM_ETH_QUEUE_ZONE_SIZE             (IRO[21].size)
+/* Pstorm queue statistics */
+#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+(IRO[22].base + ((stat_counter_id) * IRO[22].m1))
+#define PSTORM_QUEUE_STAT_SIZE                 (IRO[22].size)
+/* Tstorm last parser message */
+#define TSTORM_ETH_PRS_INPUT_OFFSET            (IRO[23].base)
+#define TSTORM_ETH_PRS_INPUT_SIZE              (IRO[23].size)
+/* Tstorm Eth limit Rx rate */
+#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
+(IRO[24].base + ((pf_id) * IRO[24].m1))
+#define ETH_RX_RATE_LIMIT_SIZE                 (IRO[24].size)
+/* Ystorm queue zone */
+#define YSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
+(IRO[25].base + ((queue_id) * IRO[25].m1))
+#define YSTORM_ETH_QUEUE_ZONE_SIZE             (IRO[25].size)
+/* Ystorm cqe producer */
+#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \
+(IRO[26].base + ((rss_id) * IRO[26].m1))
+#define YSTORM_TOE_CQ_PROD_SIZE                        (IRO[26].size)
+/* Ustorm cqe producer */
+#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \
+(IRO[27].base + ((rss_id) * IRO[27].m1))
+#define USTORM_TOE_CQ_PROD_SIZE                        (IRO[27].size)
+/* Ustorm grq producer */
+#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \
+(IRO[28].base + ((pf_id) * IRO[28].m1))
+#define USTORM_TOE_GRQ_PROD_SIZE               (IRO[28].size)
+/* Tstorm cmdq-cons of given command queue-id */
+#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \
+(IRO[29].base + ((cmdq_queue_id) * IRO[29].m1))
+#define TSTORM_SCSI_CMDQ_CONS_SIZE             (IRO[29].size)
+#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
+(IRO[30].base + ((func_id) * IRO[30].m1) + ((bdq_id) * IRO[30].m2))
+#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE          (IRO[30].size)
+/* Mstorm rq-cons of given queue-id */
+#define MSTORM_SCSI_RQ_CONS_OFFSET(rq_queue_id) \
+(IRO[31].base + ((rq_queue_id) * IRO[31].m1))
+#define MSTORM_SCSI_RQ_CONS_SIZE               (IRO[31].size)
+/* Mstorm bdq-external-producer of given BDQ function ID, BDqueue-id */
+#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
+(IRO[32].base + ((func_id) * IRO[32].m1) + ((bdq_id) * IRO[32].m2))
+#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE          (IRO[32].size)
+
+#endif /* __IRO_H__ */
diff --git a/drivers/net/qede/base/ecore_iro_values.h b/drivers/net/qede/base/ecore_iro_values.h
new file mode 100644 (file)
index 0000000..c818b58
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __IRO_VALUES_H__
+#define __IRO_VALUES_H__
+
+static const struct iro iro_arr[44] = {
+       {0x0, 0x0, 0x0, 0x0, 0x8},
+       {0x4db0, 0x60, 0x0, 0x0, 0x60},
+       {0x6418, 0x20, 0x0, 0x0, 0x20},
+       {0x500, 0x8, 0x0, 0x0, 0x4},
+       {0x480, 0x8, 0x0, 0x0, 0x4},
+       {0x0, 0x8, 0x0, 0x0, 0x2},
+       {0x80, 0x8, 0x0, 0x0, 0x2},
+       {0x4938, 0x0, 0x0, 0x0, 0x78},
+       {0x3df0, 0x0, 0x0, 0x0, 0x78},
+       {0x29b0, 0x0, 0x0, 0x0, 0x78},
+       {0x4d38, 0x0, 0x0, 0x0, 0x78},
+       {0x56c8, 0x0, 0x0, 0x0, 0x78},
+       {0x7e48, 0x0, 0x0, 0x0, 0x78},
+       {0xa28, 0x8, 0x0, 0x0, 0x8},
+       {0x61f8, 0x10, 0x0, 0x0, 0x10},
+       {0xb500, 0x30, 0x0, 0x0, 0x30},
+       {0x95b8, 0x30, 0x0, 0x0, 0x30},
+       {0x5898, 0x40, 0x0, 0x0, 0x40},
+       {0x1f8, 0x10, 0x0, 0x0, 0x8},
+       {0xa228, 0x0, 0x0, 0x0, 0x4},
+       {0x8050, 0x40, 0x0, 0x0, 0x30},
+       {0xcf8, 0x8, 0x0, 0x0, 0x8},
+       {0x2b48, 0x80, 0x0, 0x0, 0x38},
+       {0xadf0, 0x0, 0x0, 0x0, 0xf0},
+       {0xaee0, 0x8, 0x0, 0x0, 0x8},
+       {0x80, 0x8, 0x0, 0x0, 0x8},
+       {0xac0, 0x8, 0x0, 0x0, 0x8},
+       {0x2578, 0x8, 0x0, 0x0, 0x8},
+       {0x24f8, 0x8, 0x0, 0x0, 0x8},
+       {0x0, 0x8, 0x0, 0x0, 0x8},
+       {0x200, 0x10, 0x8, 0x0, 0x8},
+       {0x17f8, 0x8, 0x0, 0x0, 0x2},
+       {0x19f8, 0x10, 0x8, 0x0, 0x2},
+       {0xd988, 0x38, 0x0, 0x0, 0x24},
+       {0x11040, 0x10, 0x0, 0x0, 0x8},
+       {0x11670, 0x38, 0x0, 0x0, 0x18},
+       {0xaeb8, 0x30, 0x0, 0x0, 0x10},
+       {0x86f8, 0x28, 0x0, 0x0, 0x18},
+       {0xebf8, 0x10, 0x0, 0x0, 0x10},
+       {0xde08, 0x40, 0x0, 0x0, 0x30},
+       {0x121a0, 0x38, 0x0, 0x0, 0x8},
+       {0xf060, 0x20, 0x0, 0x0, 0x20},
+       {0x2b80, 0x80, 0x0, 0x0, 0x10},
+       {0x50a0, 0x10, 0x0, 0x0, 0x10},
+};
+
+#endif /* __IRO_VALUES_H__ */
diff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c
new file mode 100644 (file)
index 0000000..d287a36
--- /dev/null
@@ -0,0 +1,1886 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+#include "ecore.h"
+#include "ecore_status.h"
+#include "ecore_mcp.h"
+#include "mcp_public.h"
+#include "reg_addr.h"
+#include "ecore_hw.h"
+#include "ecore_init_fw_funcs.h"
+#include "ecore_gtt_reg_addr.h"
+#include "ecore_iro.h"
+
+#define CHIP_MCP_RESP_ITER_US 10
+#define EMUL_MCP_RESP_ITER_US (1000 * 1000)
+
+#define ECORE_DRV_MB_MAX_RETRIES (500 * 1000)  /* Account for 5 sec */
+#define ECORE_MCP_RESET_RETRIES (50 * 1000)    /* Account for 500 msec */
+
+#define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
+       ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
+                _val)
+
+#define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
+       ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
+
+#define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
+       DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
+                    OFFSETOF(struct public_drv_mb, _field), _val)
+
+#define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
+       DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
+                    OFFSETOF(struct public_drv_mb, _field))
+
+#define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
+       DRV_ID_PDA_COMP_VER_SHIFT)
+
+#define MCP_BYTES_PER_MBIT_SHIFT 17
+
+#ifndef ASIC_ONLY
+static int loaded;
+static int loaded_port[MAX_NUM_PORTS] = { 0 };
+#endif
+
+bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn)
+{
+       if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
+               return false;
+       return true;
+}
+
+void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+       u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+                                       PUBLIC_PORT);
+       u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr);
+
+       p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
+                                                  MFW_PORT(p_hwfn));
+       DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+                  "port_addr = 0x%x, port_id 0x%02x\n",
+                  p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
+}
+
+void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+       u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
+       OSAL_BE32 tmp;
+       u32 i;
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev))
+               return;
+#endif
+
+       if (!p_hwfn->mcp_info->public_base)
+               return;
+
+       for (i = 0; i < length; i++) {
+               tmp = ecore_rd(p_hwfn, p_ptt,
+                              p_hwfn->mcp_info->mfw_mb_addr +
+                              (i << 2) + sizeof(u32));
+
+               ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
+                   OSAL_BE32_TO_CPU(tmp);
+       }
+}
+
+enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
+{
+       if (p_hwfn->mcp_info) {
+               OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
+               OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
+               OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->lock);
+       }
+       OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
+       p_hwfn->mcp_info = OSAL_NULL;
+
+       return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
+                                                  struct ecore_ptt *p_ptt)
+{
+       struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
+       u32 drv_mb_offsize, mfw_mb_offsize;
+       u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+               DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n");
+               p_info->public_base = 0;
+               return ECORE_INVAL;
+       }
+#endif
+
+       p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
+       if (!p_info->public_base)
+               return ECORE_INVAL;
+
+       p_info->public_base |= GRCBASE_MCP;
+
+       /* Calculate the driver and MFW mailbox address */
+       drv_mb_offsize = ecore_rd(p_hwfn, p_ptt,
+                                 SECTION_OFFSIZE_ADDR(p_info->public_base,
+                                                      PUBLIC_DRV_MB));
+       p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
+       DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+                  "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
+                  drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
+
+       /* Set the MFW MB address */
+       mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
+                                 SECTION_OFFSIZE_ADDR(p_info->public_base,
+                                                      PUBLIC_MFW_MB));
+       p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
+       p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
+                                              p_info->mfw_mb_addr);
+
+       /* Get the current driver mailbox sequence before sending
+        * the first command
+        */
+       p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
+           DRV_MSG_SEQ_NUMBER_MASK;
+
+       /* Get current FW pulse sequence */
+       p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
+           DRV_PULSE_SEQ_MASK;
+
+       p_info->mcp_hist = (u16)ecore_rd(p_hwfn, p_ptt,
+                                         MISCS_REG_GENERIC_POR_0);
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
+                                       struct ecore_ptt *p_ptt)
+{
+       struct ecore_mcp_info *p_info;
+       u32 size;
+
+       /* Allocate mcp_info structure */
+       p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+                                      sizeof(*p_hwfn->mcp_info));
+       if (!p_hwfn->mcp_info)
+               goto err;
+       p_info = p_hwfn->mcp_info;
+
+       if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
+               DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
+               /* Do not free mcp_info here, since public_base indicate that
+                * the MCP is not initialized
+                */
+               return ECORE_SUCCESS;
+       }
+
+       size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
+       p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
+       p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
+       if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
+               goto err;
+
+       /* Initialize the MFW spinlock */
+       OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->lock);
+       OSAL_SPIN_LOCK_INIT(&p_info->lock);
+
+       return ECORE_SUCCESS;
+
+err:
+       DP_NOTICE(p_hwfn, true, "Failed to allocate mcp memory\n");
+       ecore_mcp_free(p_hwfn);
+       return ECORE_NOMEM;
+}
+
+enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
+                                    struct ecore_ptt *p_ptt)
+{
+       u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
+       u32 delay = CHIP_MCP_RESP_ITER_US;
+       u32 org_mcp_reset_seq, cnt = 0;
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+               delay = EMUL_MCP_RESP_ITER_US;
+#endif
+
+       OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
+
+       /* Set drv command along with the updated sequence */
+       org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
+       DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
+
+       do {
+               /* Wait for MFW response */
+               OSAL_UDELAY(delay);
+               /* Give the FW up to 500 second (50*1000*10usec) */
+       } while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt,
+                                               MISCS_REG_GENERIC_POR_0)) &&
+                (cnt++ < ECORE_MCP_RESET_RETRIES));
+
+       if (org_mcp_reset_seq !=
+           ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+                          "MCP was reset after %d usec\n", cnt * delay);
+       } else {
+               DP_ERR(p_hwfn, "Failed to reset MCP\n");
+               rc = ECORE_AGAIN;
+       }
+
+       OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
+
+       return rc;
+}
+
+/* Should be called while the dedicated spinlock is acquired */
+static enum _ecore_status_t ecore_do_mcp_cmd(struct ecore_hwfn *p_hwfn,
+                                            struct ecore_ptt *p_ptt,
+                                            u32 cmd, u32 param,
+                                            u32 *o_mcp_resp,
+                                            u32 *o_mcp_param)
+{
+       u32 delay = CHIP_MCP_RESP_ITER_US;
+       u32 seq, cnt = 1, actual_mb_seq;
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+               delay = EMUL_MCP_RESP_ITER_US;
+#endif
+
+       /* Get actual driver mailbox sequence */
+       actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
+           DRV_MSG_SEQ_NUMBER_MASK;
+
+       /* Use MCP history register to check if MCP reset occurred between
+        * init time and now.
+        */
+       if (p_hwfn->mcp_info->mcp_hist !=
+           ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Rereading MCP offsets\n");
+               ecore_load_mcp_offsets(p_hwfn, p_ptt);
+               ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
+       }
+       seq = ++p_hwfn->mcp_info->drv_mb_seq;
+
+       /* Set drv param */
+       DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
+
+       /* Set drv command along with the updated sequence */
+       DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+                  "wrote command (%x) to MFW MB param 0x%08x\n",
+                  (cmd | seq), param);
+
+       do {
+               /* Wait for MFW response */
+               OSAL_UDELAY(delay);
+               *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
+
+               /* Give the FW up to 5 second (500*10ms) */
+       } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
+                (cnt++ < ECORE_DRV_MB_MAX_RETRIES));
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+                  "[after %d ms] read (%x) seq is (%x) from FW MB\n",
+                  cnt * delay, *o_mcp_resp, seq);
+
+       /* Is this a reply to our command? */
+       if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
+               *o_mcp_resp &= FW_MSG_CODE_MASK;
+               /* Get the MCP param */
+               *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
+       } else {
+               /* FW BUG! */
+               DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
+                      cmd, param);
+               *o_mcp_resp = 0;
+               rc = ECORE_AGAIN;
+               ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
+       }
+       return rc;
+}
+
+enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
+                                  struct ecore_ptt *p_ptt, u32 cmd, u32 param,
+                                  u32 *o_mcp_resp, u32 *o_mcp_param)
+{
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+               if (cmd == DRV_MSG_CODE_UNLOAD_REQ) {
+                       loaded--;
+                       loaded_port[p_hwfn->port_id]--;
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n",
+                                  loaded);
+               }
+               return ECORE_SUCCESS;
+       }
+#endif
+
+       return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, cmd, param, OSAL_NULL,
+                                      o_mcp_resp, o_mcp_param);
+}
+
+enum _ecore_status_t ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
+                                            struct ecore_ptt *p_ptt,
+                                            u32 cmd, u32 param,
+                                            union drv_union_data *p_union_data,
+                                            u32 *o_mcp_resp,
+                                            u32 *o_mcp_param)
+{
+       u32 union_data_addr;
+       enum _ecore_status_t rc;
+
+       /* MCP not initialized */
+       if (!ecore_mcp_is_init(p_hwfn)) {
+               DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
+               return ECORE_BUSY;
+       }
+
+       /* Acquiring a spinlock is needed to ensure that only a single thread
+        * is accessing the mailbox at a certain time.
+        */
+       OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
+
+       if (p_union_data != OSAL_NULL) {
+               union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
+                   OFFSETOF(struct public_drv_mb, union_data);
+               ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, p_union_data,
+                               sizeof(*p_union_data));
+       }
+
+       rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, cmd, param, o_mcp_resp,
+                             o_mcp_param);
+
+       OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
+                                         struct ecore_ptt *p_ptt,
+                                         u32 cmd,
+                                         u32 param,
+                                         u32 *o_mcp_resp,
+                                         u32 *o_mcp_param,
+                                         u32 i_txn_size, u32 *i_buf)
+{
+       union drv_union_data union_data;
+
+       OSAL_MEMCPY((u32 *)&union_data.raw_data, i_buf, i_txn_size);
+
+       return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, cmd, param, &union_data,
+                                      o_mcp_resp, o_mcp_param);
+}
+
+enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
+                                         struct ecore_ptt *p_ptt,
+                                         u32 cmd,
+                                         u32 param,
+                                         u32 *o_mcp_resp,
+                                         u32 *o_mcp_param,
+                                         u32 *o_txn_size, u32 *o_buf)
+{
+       enum _ecore_status_t rc;
+       u32 i;
+
+       /* MCP not initialized */
+       if (!ecore_mcp_is_init(p_hwfn)) {
+               DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
+               return ECORE_BUSY;
+       }
+
+       OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
+       rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, cmd, param, o_mcp_resp,
+                             o_mcp_param);
+       if (rc != ECORE_SUCCESS)
+               goto out;
+
+       /* Get payload after operation completes successfully */
+       *o_txn_size = *o_mcp_param;
+       for (i = 0; i < *o_txn_size; i += 4)
+               o_buf[i / sizeof(u32)] = DRV_MB_RD(p_hwfn, p_ptt,
+                                                  union_data.raw_data[i]);
+
+out:
+       OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
+       return rc;
+}
+
+#ifndef ASIC_ONLY
+static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
+                                   u32 *p_load_code)
+{
+       static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
+
+       if (!loaded)
+               load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
+       else if (!loaded_port[p_hwfn->port_id])
+               load_phase = FW_MSG_CODE_DRV_LOAD_PORT;
+       else
+               load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
+
+       /* On CMT, always tell that it's engine */
+       if (p_hwfn->p_dev->num_hwfns > 1)
+               load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
+
+       *p_load_code = load_phase;
+       loaded++;
+       loaded_port[p_hwfn->port_id]++;
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+                  "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n",
+                  *p_load_code, loaded, p_hwfn->port_id,
+                  loaded_port[p_hwfn->port_id]);
+}
+#endif
+
+enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
+                                       struct ecore_ptt *p_ptt,
+                                       u32 *p_load_code)
+{
+       struct ecore_dev *p_dev = p_hwfn->p_dev;
+       union drv_union_data union_data;
+       u32 param;
+       enum _ecore_status_t rc;
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+               ecore_mcp_mf_workaround(p_hwfn, p_load_code);
+               return ECORE_SUCCESS;
+       }
+#endif
+
+       OSAL_MEMCPY(&union_data.ver_str, p_dev->ver_str, MCP_DRV_VER_STR_SIZE);
+
+       rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_REQ,
+                                    (PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
+                                     p_dev->drv_type),
+                                    &union_data, p_load_code, &param);
+
+       /* if mcp fails to respond we must abort */
+       if (rc != ECORE_SUCCESS) {
+               DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+               return rc;
+       }
+
+       /* If MFW refused (e.g. other port is in diagnostic mode) we
+        * must abort. This can happen in the following cases:
+        * - Other port is in diagnostic mode
+        * - Previously loaded function on the engine is not compliant with
+        *   the requester.
+        * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION.
+        *      -
+        */
+       if (!(*p_load_code) ||
+           ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) ||
+           ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) ||
+           ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) {
+               DP_ERR(p_hwfn, "MCP refused load request, aborting\n");
+               return ECORE_BUSY;
+       }
+
+       return ECORE_SUCCESS;
+}
+
+static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
+                                   struct ecore_ptt *p_ptt)
+{
+       u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+                                       PUBLIC_PATH);
+       u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
+       u32 path_addr = SECTION_ADDR(mfw_path_offsize,
+                                    ECORE_PATH_ID(p_hwfn));
+       u32 disabled_vfs[VF_MAX_STATIC / 32];
+       int i;
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+                  "Reading Disabled VF information from [offset %08x],"
+                  " path_addr %08x\n",
+                  mfw_path_offsize, path_addr);
+
+       for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
+               disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,
+                                          path_addr +
+                                          OFFSETOF(struct public_path,
+                                                   mcp_vf_disabled) +
+                                          sizeof(u32) * i);
+               DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
+                          "FLR-ed VFs [%08x,...,%08x] - %08x\n",
+                          i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
+       }
+}
+
+enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
+                                         struct ecore_ptt *p_ptt,
+                                         u32 *vfs_to_ack)
+{
+       u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+                                       PUBLIC_FUNC);
+       u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);
+       u32 func_addr = SECTION_ADDR(mfw_func_offsize,
+                                    MCP_PF_ID(p_hwfn));
+       union drv_union_data union_data;
+       u32 resp, param;
+       enum _ecore_status_t rc;
+       int i;
+
+       for (i = 0; i < (VF_MAX_STATIC / 32); i++)
+               DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
+                          "Acking VFs [%08x,...,%08x] - %08x\n",
+                          i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
+
+       OSAL_MEMCPY(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8);
+
+       rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt,
+                                    DRV_MSG_CODE_VF_DISABLED_DONE, 0,
+                                    &union_data, &resp, &param);
+       if (rc != ECORE_SUCCESS) {
+               DP_NOTICE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
+                         "Failed to pass ACK for VF flr to MFW\n");
+               return ECORE_TIMEOUT;
+       }
+
+       /* TMP - clear the ACK bits; should be done by MFW */
+       for (i = 0; i < (VF_MAX_STATIC / 32); i++)
+               ecore_wr(p_hwfn, p_ptt,
+                        func_addr +
+                        OFFSETOF(struct public_func, drv_ack_vf_disabled) +
+                        i * sizeof(u32), 0);
+
+       return rc;
+}
+
+static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
+                                               struct ecore_ptt *p_ptt)
+{
+       u32 transceiver_state;
+
+       transceiver_state = ecore_rd(p_hwfn, p_ptt,
+                                    p_hwfn->mcp_info->port_addr +
+                                    OFFSETOF(struct public_port,
+                                             transceiver_data));
+
+       DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP),
+                  "Received transceiver state update [0x%08x] from mfw"
+                  "[Addr 0x%x]\n",
+                  transceiver_state, (u32)(p_hwfn->mcp_info->port_addr +
+                                           OFFSETOF(struct public_port,
+                                                    transceiver_data)));
+
+       transceiver_state = GET_FIELD(transceiver_state, PMM_TRANSCEIVER_STATE);
+
+       if (transceiver_state == PMM_TRANSCEIVER_STATE_PRESENT)
+               DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
+       else
+               DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
+}
+
+static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
+                                        struct ecore_ptt *p_ptt, bool b_reset)
+{
+       struct ecore_mcp_link_state *p_link;
+       u32 status = 0;
+
+       p_link = &p_hwfn->mcp_info->link_output;
+       OSAL_MEMSET(p_link, 0, sizeof(*p_link));
+       if (!b_reset) {
+               status = ecore_rd(p_hwfn, p_ptt,
+                                 p_hwfn->mcp_info->port_addr +
+                                 OFFSETOF(struct public_port, link_status));
+               DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP),
+                          "Received link update [0x%08x] from mfw"
+                          " [Addr 0x%x]\n",
+                          status, (u32)(p_hwfn->mcp_info->port_addr +
+                                         OFFSETOF(struct public_port,
+                                                  link_status)));
+       } else {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+                          "Resetting link indications\n");
+               return;
+       }
+
+       if (p_hwfn->b_drv_link_init)
+               p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
+       else
+               p_link->link_up = false;
+
+       p_link->full_duplex = true;
+       switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
+       case LINK_STATUS_SPEED_AND_DUPLEX_100G:
+               p_link->speed = 100000;
+               break;
+       case LINK_STATUS_SPEED_AND_DUPLEX_50G:
+               p_link->speed = 50000;
+               break;
+       case LINK_STATUS_SPEED_AND_DUPLEX_40G:
+               p_link->speed = 40000;
+               break;
+       case LINK_STATUS_SPEED_AND_DUPLEX_25G:
+               p_link->speed = 25000;
+               break;
+       case LINK_STATUS_SPEED_AND_DUPLEX_20G:
+               p_link->speed = 20000;
+               break;
+       case LINK_STATUS_SPEED_AND_DUPLEX_10G:
+               p_link->speed = 10000;
+               break;
+       case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
+               p_link->full_duplex = false;
+               /* Fall-through */
+       case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
+               p_link->speed = 1000;
+               break;
+       default:
+               p_link->speed = 0;
+       }
+
+       /* We never store total line speed as p_link->speed is
+        * again changes according to bandwidth allocation.
+        */
+       if (p_link->link_up && p_link->speed)
+               p_link->line_speed = p_link->speed;
+       else
+               p_link->line_speed = 0;
+
+       /* Correct speed according to bandwidth allocation */
+       if (p_hwfn->mcp_info->func_info.bandwidth_max && p_link->speed) {
+               u8 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
+
+               __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
+                                                  p_link, max_bw);
+       }
+
+       if (p_hwfn->mcp_info->func_info.bandwidth_min && p_link->speed) {
+               u8 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
+
+               __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
+                                                  p_link, min_bw);
+
+               ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev,
+                                                     p_link->min_pf_rate);
+       }
+
+       p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
+       p_link->an_complete = !!(status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
+       p_link->parallel_detection = !!(status &
+                                        LINK_STATUS_PARALLEL_DETECTION_USED);
+       p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
+
+       p_link->partner_adv_speed |=
+           (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
+           ECORE_LINK_PARTNER_SPEED_1G_FD : 0;
+       p_link->partner_adv_speed |=
+           (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
+           ECORE_LINK_PARTNER_SPEED_1G_HD : 0;
+       p_link->partner_adv_speed |=
+           (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
+           ECORE_LINK_PARTNER_SPEED_10G : 0;
+       p_link->partner_adv_speed |=
+           (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
+           ECORE_LINK_PARTNER_SPEED_20G : 0;
+       p_link->partner_adv_speed |=
+           (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
+           ECORE_LINK_PARTNER_SPEED_25G : 0;
+       p_link->partner_adv_speed |=
+           (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
+           ECORE_LINK_PARTNER_SPEED_40G : 0;
+       p_link->partner_adv_speed |=
+           (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
+           ECORE_LINK_PARTNER_SPEED_50G : 0;
+       p_link->partner_adv_speed |=
+           (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
+           ECORE_LINK_PARTNER_SPEED_100G : 0;
+
+       p_link->partner_tx_flow_ctrl_en =
+           !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
+       p_link->partner_rx_flow_ctrl_en =
+           !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
+
+       switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
+       case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
+               p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE;
+               break;
+       case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
+               p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE;
+               break;
+       case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
+               p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE;
+               break;
+       default:
+               p_link->partner_adv_pause = 0;
+       }
+
+       p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
+
+       OSAL_LINK_UPDATE(p_hwfn);
+}
+
+enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
+                                       struct ecore_ptt *p_ptt, bool b_up)
+{
+       struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
+       union drv_union_data union_data;
+       struct pmm_phy_cfg *p_phy_cfg;
+       u32 param = 0, reply = 0, cmd;
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+               return ECORE_SUCCESS;
+#endif
+
+       /* Set the shmem configuration according to params */
+       p_phy_cfg = &union_data.drv_phy_cfg;
+       OSAL_MEMSET(p_phy_cfg, 0, sizeof(*p_phy_cfg));
+       cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
+       if (!params->speed.autoneg)
+               p_phy_cfg->speed = params->speed.forced_speed;
+       p_phy_cfg->pause |= (params->pause.autoneg) ? PMM_PAUSE_AUTONEG : 0;
+       p_phy_cfg->pause |= (params->pause.forced_rx) ? PMM_PAUSE_RX : 0;
+       p_phy_cfg->pause |= (params->pause.forced_tx) ? PMM_PAUSE_TX : 0;
+       p_phy_cfg->adv_speed = params->speed.advertised_speeds;
+       p_phy_cfg->loopback_mode = params->loopback_mode;
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
+               DP_INFO(p_hwfn,
+                       "Link on FPGA - Ask for loopback mode '5' at 10G\n");
+               p_phy_cfg->loopback_mode = 5;
+               p_phy_cfg->speed = 10000;
+       }
+#endif
+
+       p_hwfn->b_drv_link_init = b_up;
+
+       if (b_up)
+               DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+                          "Configuring Link: Speed 0x%08x, Pause 0x%08x,"
+                          " adv_speed 0x%08x, loopback 0x%08x,"
+                          " features 0x%08x\n",
+                          p_phy_cfg->speed, p_phy_cfg->pause,
+                          p_phy_cfg->adv_speed, p_phy_cfg->loopback_mode,
+                          p_phy_cfg->feature_config_flags);
+       else
+               DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
+
+       rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, cmd, 0, &union_data, &reply,
+                                    &param);
+
+       /* if mcp fails to respond we must abort */
+       if (rc != ECORE_SUCCESS) {
+               DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+               return rc;
+       }
+
+       /* Reset the link status if needed */
+       if (!b_up)
+               ecore_mcp_handle_link_change(p_hwfn, p_ptt, true);
+
+       return rc;
+}
+
+u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
+                                  struct ecore_ptt *p_ptt)
+{
+       u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
+
+       path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+                                                PUBLIC_PATH);
+       path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr);
+       path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn));
+
+       proc_kill_cnt = ecore_rd(p_hwfn, p_ptt,
+                                path_addr +
+                                OFFSETOF(struct public_path, process_kill)) &
+           PROCESS_KILL_COUNTER_MASK;
+
+       return proc_kill_cnt;
+}
+
+static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn,
+                                         struct ecore_ptt *p_ptt)
+{
+       struct ecore_dev *p_dev = p_hwfn->p_dev;
+       u32 proc_kill_cnt;
+
+       /* Prevent possible attentions/interrupts during the recovery handling
+        * and till its load phase, during which they will be re-enabled.
+        */
+       ecore_int_igu_disable_int(p_hwfn, p_ptt);
+
+       DP_NOTICE(p_hwfn, false, "Received a process kill indication\n");
+
+       /* The following operations should be done once, and thus in CMT mode
+        * are carried out by only the first HW function.
+        */
+       if (p_hwfn != ECORE_LEADING_HWFN(p_dev))
+               return;
+
+       if (p_dev->recov_in_prog) {
+               DP_NOTICE(p_hwfn, false,
+                         "Ignoring the indication since a recovery"
+                         " process is already in progress\n");
+               return;
+       }
+
+       p_dev->recov_in_prog = true;
+
+       proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt);
+       DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt);
+
+       OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn);
+}
+
+static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
+                                         struct ecore_ptt *p_ptt,
+                                         enum MFW_DRV_MSG_TYPE type)
+{
+       enum ecore_mcp_protocol_type stats_type;
+       union ecore_mcp_protocol_stats stats;
+       u32 hsi_param, param = 0, reply = 0;
+       union drv_union_data union_data;
+
+       switch (type) {
+       case MFW_DRV_MSG_GET_LAN_STATS:
+               stats_type = ECORE_MCP_LAN_STATS;
+               hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
+               break;
+       default:
+               DP_NOTICE(p_hwfn, false, "Invalid protocol type %d\n", type);
+               return;
+       }
+
+       OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats);
+
+       OSAL_MEMCPY(&union_data, &stats, sizeof(stats));
+
+       ecore_mcp_cmd_and_union(p_hwfn, p_ptt, DRV_MSG_CODE_GET_STATS,
+                               hsi_param, &union_data, &reply, &param);
+}
+
+static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
+                                   struct ecore_ptt *p_ptt,
+                                   struct public_func *p_data, int pfid)
+{
+       u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+                                       PUBLIC_FUNC);
+       u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
+       u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
+       u32 i, size;
+
+       OSAL_MEM_ZERO(p_data, sizeof(*p_data));
+
+       size = OSAL_MIN_T(u32, sizeof(*p_data), SECTION_SIZE(mfw_path_offsize));
+       for (i = 0; i < size / sizeof(u32); i++)
+               ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
+                                             func_addr + (i << 2));
+
+       return size;
+}
+
+static void
+ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
+                       struct public_func *p_shmem_info)
+{
+       struct ecore_mcp_function_info *p_info;
+
+       p_info = &p_hwfn->mcp_info->func_info;
+
+       /* TODO - bandwidth min/max should have valid values of 1-100,
+        * as well as some indication that the feature is disabled.
+        * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS
+        * limit and correct value to min `1' and max `100' if limit isn't in
+        * range.
+        */
+       p_info->bandwidth_min = (p_shmem_info->config &
+                                FUNC_MF_CFG_MIN_BW_MASK) >>
+           FUNC_MF_CFG_MIN_BW_SHIFT;
+       if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
+               DP_INFO(p_hwfn,
+                       "bandwidth minimum out of bounds [%02x]. Set to 1\n",
+                       p_info->bandwidth_min);
+               p_info->bandwidth_min = 1;
+       }
+
+       p_info->bandwidth_max = (p_shmem_info->config &
+                                FUNC_MF_CFG_MAX_BW_MASK) >>
+           FUNC_MF_CFG_MAX_BW_SHIFT;
+       if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
+               DP_INFO(p_hwfn,
+                       "bandwidth maximum out of bounds [%02x]. Set to 100\n",
+                       p_info->bandwidth_max);
+               p_info->bandwidth_max = 100;
+       }
+}
+
+static void
+ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+       struct ecore_mcp_function_info *p_info;
+       struct public_func shmem_info;
+       u32 resp = 0, param = 0;
+
+       ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
+
+       ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
+
+       p_info = &p_hwfn->mcp_info->func_info;
+
+       ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min);
+
+       ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max);
+
+       /* Acknowledge the MFW */
+       ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
+                     &param);
+}
+
+static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn,
+                                        struct ecore_ptt *p_ptt)
+{
+       /* A single notification should be sent to upper driver in CMT mode */
+       if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
+               return;
+
+       DP_NOTICE(p_hwfn, false,
+                 "Fan failure was detected on the network interface card"
+                 " and it's going to be shut down.\n");
+
+       ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
+}
+
+enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
+                                            struct ecore_ptt *p_ptt)
+{
+       struct ecore_mcp_info *info = p_hwfn->mcp_info;
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       bool found = false;
+       u16 i;
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n");
+
+       /* Read Messages from MFW */
+       ecore_mcp_read_mb(p_hwfn, p_ptt);
+
+       /* Compare current messages to old ones */
+       for (i = 0; i < info->mfw_mb_length; i++) {
+               if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
+                       continue;
+
+               found = true;
+
+               DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+                          "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
+                          i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
+
+               switch (i) {
+               case MFW_DRV_MSG_LINK_CHANGE:
+                       ecore_mcp_handle_link_change(p_hwfn, p_ptt, false);
+                       break;
+               case MFW_DRV_MSG_VF_DISABLED:
+                       ecore_mcp_handle_vf_flr(p_hwfn, p_ptt);
+                       break;
+               case MFW_DRV_MSG_ERROR_RECOVERY:
+                       ecore_mcp_handle_process_kill(p_hwfn, p_ptt);
+                       break;
+               case MFW_DRV_MSG_GET_LAN_STATS:
+               case MFW_DRV_MSG_GET_FCOE_STATS:
+               case MFW_DRV_MSG_GET_ISCSI_STATS:
+               case MFW_DRV_MSG_GET_RDMA_STATS:
+                       ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
+                       break;
+               case MFW_DRV_MSG_BW_UPDATE:
+                       ecore_mcp_update_bw(p_hwfn, p_ptt);
+                       break;
+               case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
+                       ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
+                       break;
+               case MFW_DRV_MSG_FAILURE_DETECTED:
+                       ecore_mcp_handle_fan_failure(p_hwfn, p_ptt);
+                       break;
+               default:
+                       /* @DPDK */
+                       DP_NOTICE(p_hwfn, false,
+                                 "Unimplemented MFW message %d\n", i);
+                       rc = ECORE_INVAL;
+               }
+       }
+
+       /* ACK everything */
+       for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
+               OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]);
+
+               /* MFW expect answer in BE, so we force write in that format */
+               ecore_wr(p_hwfn, p_ptt,
+                        info->mfw_mb_addr + sizeof(u32) +
+                        MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
+                        sizeof(u32) + i * sizeof(u32), val);
+       }
+
+       if (!found) {
+               DP_NOTICE(p_hwfn, false,
+                         "Received an MFW message indication but no"
+                         " new message!\n");
+               rc = ECORE_INVAL;
+       }
+
+       /* Copy the new mfw messages into the shadow */
+       OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_dev *p_dev,
+                                          struct ecore_ptt *p_ptt,
+                                          u32 *p_mfw_ver,
+                                          u32 *p_running_bundle_id)
+{
+       struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+       u32 global_offsize;
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_EMUL(p_dev)) {
+               DP_NOTICE(p_dev, false, "Emulation - can't get MFW version\n");
+               return ECORE_SUCCESS;
+       }
+#endif
+
+       global_offsize = ecore_rd(p_hwfn, p_ptt,
+                                 SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
+                                                      public_base,
+                                                      PUBLIC_GLOBAL));
+       *p_mfw_ver =
+           ecore_rd(p_hwfn, p_ptt,
+                    SECTION_ADDR(global_offsize,
+                                 0) + OFFSETOF(struct public_global, mfw_ver));
+
+       if (p_running_bundle_id != OSAL_NULL) {
+               *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt,
+                                               SECTION_ADDR(global_offsize,
+                                                            0) +
+                                               OFFSETOF(struct public_global,
+                                                        running_bundle_id));
+       }
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
+                                             u32 *p_media_type)
+{
+       struct ecore_hwfn *p_hwfn = &p_dev->hwfns[0];
+       struct ecore_ptt *p_ptt;
+
+       if (!ecore_mcp_is_init(p_hwfn)) {
+               DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
+               return ECORE_BUSY;
+       }
+
+       *p_media_type = MEDIA_UNSPECIFIED;
+
+       p_ptt = ecore_ptt_acquire(p_hwfn);
+       if (!p_ptt)
+               return ECORE_BUSY;
+
+       *p_media_type = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
+                                OFFSETOF(struct public_port, media_type));
+
+       ecore_ptt_release(p_hwfn, p_ptt);
+
+       return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn,
+                         struct public_func *p_info,
+                         enum ecore_pci_personality *p_proto)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+
+       switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
+       case FUNC_MF_CFG_PROTOCOL_ETHERNET:
+               *p_proto = ECORE_PCI_ETH;
+               break;
+       default:
+               rc = ECORE_INVAL;
+       }
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
+                                                   struct ecore_ptt *p_ptt)
+{
+       struct ecore_mcp_function_info *info;
+       struct public_func shmem_info;
+
+       ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
+       info = &p_hwfn->mcp_info->func_info;
+
+       info->pause_on_host = (shmem_info.config &
+                              FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
+
+       if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, &info->protocol)) {
+               DP_ERR(p_hwfn, "Unknown personality %08x\n",
+                      (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
+               return ECORE_INVAL;
+       }
+
+       ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
+
+       if (shmem_info.mac_upper || shmem_info.mac_lower) {
+               info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
+               info->mac[1] = (u8)(shmem_info.mac_upper);
+               info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
+               info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
+               info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
+               info->mac[5] = (u8)(shmem_info.mac_lower);
+       } else {
+               /* TODO - are there protocols for which there's no MAC? */
+               DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n");
+       }
+
+       info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
+
+       DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP),
+                  "Read configuration from shmem: pause_on_host %02x"
+                   " protocol %02x BW [%02x - %02x]"
+                   " MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %lx"
+                   " node %lx ovlan %04x\n",
+                  info->pause_on_host, info->protocol,
+                  info->bandwidth_min, info->bandwidth_max,
+                  info->mac[0], info->mac[1], info->mac[2],
+                  info->mac[3], info->mac[4], info->mac[5],
+                  info->wwn_port, info->wwn_node, info->ovlan);
+
+       return ECORE_SUCCESS;
+}
+
+struct ecore_mcp_link_params
+*ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn)
+{
+       if (!p_hwfn || !p_hwfn->mcp_info)
+               return OSAL_NULL;
+       return &p_hwfn->mcp_info->link_input;
+}
+
+struct ecore_mcp_link_state
+*ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn)
+{
+       if (!p_hwfn || !p_hwfn->mcp_info)
+               return OSAL_NULL;
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+               DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n");
+               p_hwfn->mcp_info->link_output.link_up = true;
+       }
+#endif
+
+       return &p_hwfn->mcp_info->link_output;
+}
+
+struct ecore_mcp_link_capabilities
+*ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn)
+{
+       if (!p_hwfn || !p_hwfn->mcp_info)
+               return OSAL_NULL;
+       return &p_hwfn->mcp_info->link_capabilities;
+}
+
+enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
+                                    struct ecore_ptt *p_ptt)
+{
+       enum _ecore_status_t rc;
+       u32 resp = 0, param = 0;
+
+       rc = ecore_mcp_cmd(p_hwfn, p_ptt,
+                          DRV_MSG_CODE_NIG_DRAIN, 100, &resp, &param);
+
+       /* Wait for the drain to complete before returning */
+       OSAL_MSLEEP(120);
+
+       return rc;
+}
+
+const struct ecore_mcp_function_info
+*ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn)
+{
+       if (!p_hwfn || !p_hwfn->mcp_info)
+               return OSAL_NULL;
+       return &p_hwfn->mcp_info->func_info;
+}
+
+enum _ecore_status_t ecore_mcp_nvm_command(struct ecore_hwfn *p_hwfn,
+                                          struct ecore_ptt *p_ptt,
+                                          struct ecore_mcp_nvm_params *params)
+{
+       enum _ecore_status_t rc;
+
+       switch (params->type) {
+       case ECORE_MCP_NVM_RD:
+               rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
+                                         params->nvm_common.offset,
+                                         &params->nvm_common.resp,
+                                         &params->nvm_common.param,
+                                         params->nvm_rd.buf_size,
+                                         params->nvm_rd.buf);
+               break;
+       case ECORE_MCP_CMD:
+               rc = ecore_mcp_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
+                                  params->nvm_common.offset,
+                                  &params->nvm_common.resp,
+                                  &params->nvm_common.param);
+               break;
+       case ECORE_MCP_NVM_WR:
+               rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
+                                         params->nvm_common.offset,
+                                         &params->nvm_common.resp,
+                                         &params->nvm_common.param,
+                                         params->nvm_wr.buf_size,
+                                         params->nvm_wr.buf);
+               break;
+       default:
+               rc = ECORE_NOTIMPL;
+               break;
+       }
+       return rc;
+}
+
+int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
+                                 struct ecore_ptt *p_ptt, u32 personalities)
+{
+       enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT;
+       struct public_func shmem_info;
+       int i, count = 0, num_pfs;
+
+       num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
+
+       for (i = 0; i < num_pfs; i++) {
+               ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+                                        MCP_PF_ID_BY_REL(p_hwfn, i));
+               if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
+                       continue;
+
+               if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info,
+                                             &protocol) != ECORE_SUCCESS)
+                       continue;
+
+               if ((1 << ((u32)protocol)) & personalities)
+                       count++;
+       }
+
+       return count;
+}
+
+enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
+                                             struct ecore_ptt *p_ptt,
+                                             u32 *p_flash_size)
+{
+       u32 flash_size;
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+               DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n");
+               return ECORE_INVAL;
+       }
+#endif
+
+       flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
+       flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
+           MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
+       flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
+
+       *p_flash_size = flash_size;
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
+                                                 struct ecore_ptt *p_ptt)
+{
+       struct ecore_dev *p_dev = p_hwfn->p_dev;
+
+       if (p_dev->recov_in_prog) {
+               DP_NOTICE(p_hwfn, false,
+                         "Avoid triggering a recovery since such a process"
+                         " is already in progress\n");
+               return ECORE_AGAIN;
+       }
+
+       DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n");
+       ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
+                                             struct ecore_ptt *p_ptt,
+                                             u8 vf_id, u8 num)
+{
+       u32 resp = 0, param = 0, rc_param = 0;
+       enum _ecore_status_t rc;
+
+       param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
+           DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
+       param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
+           DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
+
+       rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
+                          &resp, &rc_param);
+
+       if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
+               DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n",
+                         vf_id);
+               rc = ECORE_INVAL;
+       }
+
+       return rc;
+}
+
+enum _ecore_status_t
+ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+                          struct ecore_mcp_drv_version *p_ver)
+{
+       u32 param = 0, reply = 0, num_words, i;
+       struct drv_version_stc *p_drv_version;
+       union drv_union_data union_data;
+       void *p_name;
+       OSAL_BE32 val;
+       enum _ecore_status_t rc;
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
+               return ECORE_SUCCESS;
+#endif
+
+       p_drv_version = &union_data.drv_version;
+       p_drv_version->version = p_ver->version;
+       num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4;
+       for (i = 0; i < num_words; i++) {
+               p_name = &p_ver->name[i * sizeof(u32)];
+               val = OSAL_CPU_TO_BE32(*(u32 *)p_name);
+               *(u32 *)&p_drv_version->name[i * sizeof(u32)] = val;
+       }
+
+       rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, DRV_MSG_CODE_SET_VERSION, 0,
+                                    &union_data, &reply, &param);
+       if (rc != ECORE_SUCCESS)
+               DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
+                                   struct ecore_ptt *p_ptt)
+{
+       enum _ecore_status_t rc;
+       u32 resp = 0, param = 0;
+
+       rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
+                          &param);
+       if (rc != ECORE_SUCCESS)
+               DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
+                                     struct ecore_ptt *p_ptt)
+{
+       u32 value, cpu_mode;
+
+       ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
+
+       value = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
+       value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
+       ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
+       cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
+
+       return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -1 : 0;
+}
+
+enum _ecore_status_t
+ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
+                                  struct ecore_ptt *p_ptt,
+                                  enum ecore_ov_config_method config,
+                                  enum ecore_ov_client client)
+{
+       enum _ecore_status_t rc;
+       u32 resp = 0, param = 0;
+       u32 drv_mb_param;
+
+       switch (config) {
+       case ECORE_OV_CLIENT_DRV:
+               drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
+               break;
+       case ECORE_OV_CLIENT_USER:
+               drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
+               break;
+       default:
+               DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", config);
+               return ECORE_INVAL;
+       }
+
+       rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
+                          drv_mb_param, &resp, &param);
+       if (rc != ECORE_SUCCESS)
+               DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+
+       return rc;
+}
+
+enum _ecore_status_t
+ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
+                                struct ecore_ptt *p_ptt,
+                                enum ecore_ov_driver_state drv_state)
+{
+       enum _ecore_status_t rc;
+       u32 resp = 0, param = 0;
+       u32 drv_mb_param;
+
+       switch (drv_state) {
+       case ECORE_OV_DRIVER_STATE_NOT_LOADED:
+               drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
+               break;
+       case ECORE_OV_DRIVER_STATE_DISABLED:
+               drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
+               break;
+       case ECORE_OV_DRIVER_STATE_ACTIVE:
+               drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
+               break;
+       default:
+               DP_NOTICE(p_hwfn, true, "Invalid driver state %d\n", drv_state);
+               return ECORE_INVAL;
+       }
+
+       rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
+                          drv_state, &resp, &param);
+       if (rc != ECORE_SUCCESS)
+               DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+
+       return rc;
+}
+
+enum _ecore_status_t
+ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+                        struct ecore_fc_npiv_tbl *p_table)
+{
+       return 0;
+}
+
+enum _ecore_status_t
+ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn,
+                       struct ecore_ptt *p_ptt, u16 mtu)
+{
+       return 0;
+}
+
+enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
+                                      struct ecore_ptt *p_ptt,
+                                      enum ecore_led_mode mode)
+{
+       u32 resp = 0, param = 0, drv_mb_param;
+       enum _ecore_status_t rc;
+
+       switch (mode) {
+       case ECORE_LED_MODE_ON:
+               drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
+               break;
+       case ECORE_LED_MODE_OFF:
+               drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
+               break;
+       case ECORE_LED_MODE_RESTORE:
+               drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
+               break;
+       default:
+               DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode);
+               return ECORE_INVAL;
+       }
+
+       rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
+                          drv_mb_param, &resp, &param);
+       if (rc != ECORE_SUCCESS)
+               DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
+                                            struct ecore_ptt *p_ptt,
+                                            u32 mask_parities)
+{
+       enum _ecore_status_t rc;
+       u32 resp = 0, param = 0;
+
+       rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
+                          mask_parities, &resp, &param);
+
+       if (rc != ECORE_SUCCESS) {
+               DP_ERR(p_hwfn,
+                      "MCP response failure for mask parities, aborting\n");
+       } else if (resp != FW_MSG_CODE_OK) {
+               DP_ERR(p_hwfn,
+                      "MCP did not ack mask parity request. Old MFW?\n");
+               rc = ECORE_INVAL;
+       }
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
+                                       u8 *p_buf, u32 len)
+{
+       struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+       u32 bytes_left, offset, bytes_to_copy, buf_size;
+       struct ecore_mcp_nvm_params params;
+       struct ecore_ptt *p_ptt;
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+
+       p_ptt = ecore_ptt_acquire(p_hwfn);
+       if (!p_ptt)
+               return ECORE_BUSY;
+
+       OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
+       bytes_left = len;
+       offset = 0;
+       params.type = ECORE_MCP_NVM_RD;
+       params.nvm_rd.buf_size = &buf_size;
+       params.nvm_common.cmd = DRV_MSG_CODE_NVM_READ_NVRAM;
+       while (bytes_left > 0) {
+               bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
+                                          MCP_DRV_NVM_BUF_LEN);
+               params.nvm_common.offset = (addr + offset) |
+                   (bytes_to_copy << DRV_MB_PARAM_NVM_LEN_SHIFT);
+               params.nvm_rd.buf = (u32 *)(p_buf + offset);
+               rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+               if (rc != ECORE_SUCCESS || (params.nvm_common.resp !=
+                                           FW_MSG_CODE_NVM_OK)) {
+                       DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
+                       break;
+               }
+               offset += *params.nvm_rd.buf_size;
+               bytes_left -= *params.nvm_rd.buf_size;
+       }
+
+       p_dev->mcp_nvm_resp = params.nvm_common.resp;
+       ecore_ptt_release(p_hwfn, p_ptt);
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
+                                       u32 addr, u8 *p_buf, u32 len)
+{
+       struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+       struct ecore_mcp_nvm_params params;
+       struct ecore_ptt *p_ptt;
+       enum _ecore_status_t rc;
+
+       p_ptt = ecore_ptt_acquire(p_hwfn);
+       if (!p_ptt)
+               return ECORE_BUSY;
+
+       OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
+       params.type = ECORE_MCP_NVM_RD;
+       params.nvm_rd.buf_size = &len;
+       params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_READ) ?
+           DRV_MSG_CODE_PHY_CORE_READ : DRV_MSG_CODE_PHY_RAW_READ;
+       params.nvm_common.offset = addr;
+       params.nvm_rd.buf = (u32 *)p_buf;
+       rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+       if (rc != ECORE_SUCCESS)
+               DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
+
+       p_dev->mcp_nvm_resp = params.nvm_common.resp;
+       ecore_ptt_release(p_hwfn, p_ptt);
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
+{
+       struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+       struct ecore_mcp_nvm_params params;
+       struct ecore_ptt *p_ptt;
+
+       p_ptt = ecore_ptt_acquire(p_hwfn);
+       if (!p_ptt)
+               return ECORE_BUSY;
+
+       OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
+       OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp));
+       ecore_ptt_release(p_hwfn, p_ptt);
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr)
+{
+       struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+       struct ecore_mcp_nvm_params params;
+       struct ecore_ptt *p_ptt;
+       enum _ecore_status_t rc;
+
+       p_ptt = ecore_ptt_acquire(p_hwfn);
+       if (!p_ptt)
+               return ECORE_BUSY;
+       OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
+       params.type = ECORE_MCP_CMD;
+       params.nvm_common.cmd = DRV_MSG_CODE_NVM_DEL_FILE;
+       params.nvm_common.offset = addr;
+       rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+       p_dev->mcp_nvm_resp = params.nvm_common.resp;
+       ecore_ptt_release(p_hwfn, p_ptt);
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
+                                                 u32 addr)
+{
+       struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+       struct ecore_mcp_nvm_params params;
+       struct ecore_ptt *p_ptt;
+       enum _ecore_status_t rc;
+
+       p_ptt = ecore_ptt_acquire(p_hwfn);
+       if (!p_ptt)
+               return ECORE_BUSY;
+       OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
+       params.type = ECORE_MCP_CMD;
+       params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN;
+       params.nvm_common.offset = addr;
+       rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+       p_dev->mcp_nvm_resp = params.nvm_common.resp;
+       ecore_ptt_release(p_hwfn, p_ptt);
+
+       return rc;
+}
+
+/* rc receives ECORE_INVAL as default parameter because
+ * it might not enter the while loop if the len is 0
+ */
+enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
+                                        u32 addr, u8 *p_buf, u32 len)
+{
+       struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+       enum _ecore_status_t rc = ECORE_INVAL;
+       struct ecore_mcp_nvm_params params;
+       struct ecore_ptt *p_ptt;
+       u32 buf_idx, buf_size;
+
+       p_ptt = ecore_ptt_acquire(p_hwfn);
+       if (!p_ptt)
+               return ECORE_BUSY;
+
+       OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
+       params.type = ECORE_MCP_NVM_WR;
+       if (cmd == ECORE_PUT_FILE_DATA)
+               params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
+       else
+               params.nvm_common.cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
+       buf_idx = 0;
+       while (buf_idx < len) {
+               buf_size = OSAL_MIN_T(u32, (len - buf_idx),
+                                     MCP_DRV_NVM_BUF_LEN);
+               params.nvm_common.offset = ((buf_size <<
+                                            DRV_MB_PARAM_NVM_LEN_SHIFT)
+                                           | addr) + buf_idx;
+               params.nvm_wr.buf_size = buf_size;
+               params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
+               rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+               if (rc != ECORE_SUCCESS ||
+                   ((params.nvm_common.resp != FW_MSG_CODE_NVM_OK) &&
+                    (params.nvm_common.resp !=
+                     FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK)))
+                       DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
+
+               buf_idx += buf_size;
+       }
+
+       p_dev->mcp_nvm_resp = params.nvm_common.resp;
+       ecore_ptt_release(p_hwfn, p_ptt);
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
+                                        u32 addr, u8 *p_buf, u32 len)
+{
+       struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+       struct ecore_mcp_nvm_params params;
+       struct ecore_ptt *p_ptt;
+       enum _ecore_status_t rc;
+
+       p_ptt = ecore_ptt_acquire(p_hwfn);
+       if (!p_ptt)
+               return ECORE_BUSY;
+
+       OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
+       params.type = ECORE_MCP_NVM_WR;
+       params.nvm_wr.buf_size = len;
+       params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_WRITE) ?
+           DRV_MSG_CODE_PHY_CORE_WRITE : DRV_MSG_CODE_PHY_RAW_WRITE;
+       params.nvm_common.offset = addr;
+       params.nvm_wr.buf = (u32 *)p_buf;
+       rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+       if (rc != ECORE_SUCCESS)
+               DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
+       p_dev->mcp_nvm_resp = params.nvm_common.resp;
+       ecore_ptt_release(p_hwfn, p_ptt);
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
+                                                  u32 addr)
+{
+       struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+       struct ecore_mcp_nvm_params params;
+       struct ecore_ptt *p_ptt;
+       enum _ecore_status_t rc;
+
+       p_ptt = ecore_ptt_acquire(p_hwfn);
+       if (!p_ptt)
+               return ECORE_BUSY;
+
+       OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
+       params.type = ECORE_MCP_CMD;
+       params.nvm_common.cmd = DRV_MSG_CODE_SET_SECURE_MODE;
+       params.nvm_common.offset = addr;
+       rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+       p_dev->mcp_nvm_resp = params.nvm_common.resp;
+       ecore_ptt_release(p_hwfn, p_ptt);
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
+                                           struct ecore_ptt *p_ptt,
+                                           u32 port, u32 addr, u32 offset,
+                                           u32 len, u8 *p_buf)
+{
+       struct ecore_mcp_nvm_params params;
+       enum _ecore_status_t rc;
+       u32 bytes_left, bytes_to_copy, buf_size;
+
+       OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
+       SET_FIELD(params.nvm_common.offset,
+                 DRV_MB_PARAM_TRANSCEIVER_PORT, port);
+       SET_FIELD(params.nvm_common.offset,
+                 DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS, addr);
+       addr = offset;
+       offset = 0;
+       bytes_left = len;
+       params.type = ECORE_MCP_NVM_RD;
+       params.nvm_rd.buf_size = &buf_size;
+       params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_READ;
+       while (bytes_left > 0) {
+               bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
+                                          MAX_I2C_TRANSACTION_SIZE);
+               params.nvm_rd.buf = (u32 *)(p_buf + offset);
+               SET_FIELD(params.nvm_common.offset,
+                         DRV_MB_PARAM_TRANSCEIVER_OFFSET, addr + offset);
+               SET_FIELD(params.nvm_common.offset,
+                         DRV_MB_PARAM_TRANSCEIVER_SIZE, bytes_to_copy);
+               rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+               if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
+                   FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
+                       return ECORE_NODEV;
+               } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
+                          FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
+                       return ECORE_UNKNOWN_ERROR;
+
+               offset += *params.nvm_rd.buf_size;
+               bytes_left -= *params.nvm_rd.buf_size;
+       }
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
+                                            struct ecore_ptt *p_ptt,
+                                            u32 port, u32 addr, u32 offset,
+                                            u32 len, u8 *p_buf)
+{
+       struct ecore_mcp_nvm_params params;
+       enum _ecore_status_t rc;
+       u32 buf_idx, buf_size;
+
+       OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
+       SET_FIELD(params.nvm_common.offset,
+                 DRV_MB_PARAM_TRANSCEIVER_PORT, port);
+       SET_FIELD(params.nvm_common.offset,
+                 DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS, addr);
+       params.type = ECORE_MCP_NVM_WR;
+       params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_WRITE;
+       buf_idx = 0;
+       while (buf_idx < len) {
+               buf_size = OSAL_MIN_T(u32, (len - buf_idx),
+                                     MAX_I2C_TRANSACTION_SIZE);
+               SET_FIELD(params.nvm_common.offset,
+                         DRV_MB_PARAM_TRANSCEIVER_OFFSET, offset + buf_idx);
+               SET_FIELD(params.nvm_common.offset,
+                         DRV_MB_PARAM_TRANSCEIVER_SIZE, buf_size);
+               params.nvm_wr.buf_size = buf_size;
+               params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
+               rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+               if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
+                   FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
+                       return ECORE_NODEV;
+               } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
+                          FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
+                       return ECORE_UNKNOWN_ERROR;
+
+               buf_idx += buf_size;
+       }
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
+                                        struct ecore_ptt *p_ptt,
+                                        u16 gpio, u32 *gpio_val)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       u32 drv_mb_param = 0, rsp;
+
+       SET_FIELD(drv_mb_param, DRV_MB_PARAM_GPIO_NUMBER, gpio);
+
+       rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ,
+                          drv_mb_param, &rsp, gpio_val);
+
+       if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
+               return ECORE_UNKNOWN_ERROR;
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
+                                         struct ecore_ptt *p_ptt,
+                                         u16 gpio, u16 gpio_val)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       u32 drv_mb_param = 0, param, rsp;
+
+       SET_FIELD(drv_mb_param, DRV_MB_PARAM_GPIO_NUMBER, gpio);
+       SET_FIELD(drv_mb_param, DRV_MB_PARAM_GPIO_VALUE, gpio_val);
+
+       rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE,
+                          drv_mb_param, &rsp, &param);
+
+       if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
+               return ECORE_UNKNOWN_ERROR;
+
+       return ECORE_SUCCESS;
+}
diff --git a/drivers/net/qede/base/ecore_mcp.h b/drivers/net/qede/base/ecore_mcp.h
new file mode 100644 (file)
index 0000000..448c30b
--- /dev/null
@@ -0,0 +1,304 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_MCP_H__
+#define __ECORE_MCP_H__
+
+#include "bcm_osal.h"
+#include "mcp_public.h"
+#include "ecore_mcp_api.h"
+
+/* Using hwfn number (and not pf_num) is required since in CMT mode,
+ * same pf_num may be used by two different hwfn
+ * TODO - this shouldn't really be in .h file, but until all fields
+ * required during hw-init will be placed in their correct place in shmem
+ * we need it in ecore_dev.c [for readin the nvram reflection in shmem].
+ */
+#define MCP_PF_ID_BY_REL(p_hwfn, rel_pfid) (ECORE_IS_BB((p_hwfn)->p_dev) ? \
+                                           ((rel_pfid) | \
+                                            ((p_hwfn)->abs_pf_id & 1) << 3) : \
+                                            rel_pfid)
+#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
+
+/* TODO - this is only correct as long as only BB is supported, and
+ * no port-swapping is implemented; Afterwards we'll need to fix it.
+ */
+#define MFW_PORT(_p_hwfn)      ((_p_hwfn)->abs_pf_id % \
+                                ((_p_hwfn)->p_dev->num_ports_in_engines * 2))
+struct ecore_mcp_info {
+       osal_spinlock_t lock;   /* Spinlock used for accessing MCP mailbox */
+       u32 public_base;        /* Address of the MCP public area */
+       u32 drv_mb_addr;        /* Address of the driver mailbox */
+       u32 mfw_mb_addr;        /* Address of the MFW mailbox */
+       u32 port_addr;          /* Address of the port configuration (link) */
+       u16 drv_mb_seq;         /* Current driver mailbox sequence */
+       u16 drv_pulse_seq;      /* Current driver pulse sequence */
+       struct ecore_mcp_link_params link_input;
+       struct ecore_mcp_link_state link_output;
+       struct ecore_mcp_link_capabilities link_capabilities;
+       struct ecore_mcp_function_info func_info;
+
+       u8 *mfw_mb_cur;
+       u8 *mfw_mb_shadow;
+       u16 mfw_mb_length;
+       u16 mcp_hist;
+};
+
+/**
+ * @brief Initialize the interface with the MCP
+ *
+ * @param p_hwfn - HW func
+ * @param p_ptt - PTT required for register access
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
+                                       struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Initialize the port interface with the MCP
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * Can only be called after `num_ports_in_engines' is set
+ */
+void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn,
+                            struct ecore_ptt *p_ptt);
+/**
+ * @brief Releases resources allocated during the init process.
+ *
+ * @param p_hwfn - HW func
+ * @param p_ptt - PTT required for register access
+ *
+ * @return enum _ecore_status_t
+ */
+
+enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief This function is called from the DPC context. After
+ * pointing PTT to the mfw mb, check for events sent by the MCP
+ * to the driver and ack them. In case a critical event
+ * detected, it will be handled here, otherwise the work will be
+ * queued to a sleepable work-queue.
+ *
+ * @param p_hwfn - HW function
+ * @param p_ptt - PTT required for register access
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation
+ * was successul.
+ */
+enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
+                                            struct ecore_ptt *p_ptt);
+
+/**
+ * @brief When MFW doesn't get driver pulse for couple of seconds, at some
+ * threshold before timeout expires, it will generate interrupt
+ * through a dedicated status block (DPSB - Driver Pulse Status
+ * Block), which the driver should respond immediately, by
+ * providing keepalive indication after setting the PTT to the
+ * driver-MFW mailbox. This function is called directly from the
+ * DPC upon receiving the DPSB attention.
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation
+ * was successul.
+ */
+enum _ecore_status_t ecore_issue_pulse(struct ecore_hwfn *p_hwfn,
+                                      struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Sends a LOAD_REQ to the MFW, and in case operation
+ *        succeed, returns whether this PF is the first on the
+ *        chip/engine/port or function. This function should be
+ *        called when driver is ready to accept MFW events after
+ *        Storms initializations are done.
+ *
+ * @param p_hwfn       - hw function
+ * @param p_ptt        - PTT required for register access
+ * @param p_load_code  - The MCP response param containing one
+ *      of the following:
+ *      FW_MSG_CODE_DRV_LOAD_ENGINE
+ *      FW_MSG_CODE_DRV_LOAD_PORT
+ *      FW_MSG_CODE_DRV_LOAD_FUNCTION
+ * @return enum _ecore_status_t -
+ *      ECORE_SUCCESS - Operation was successul.
+ *      ECORE_BUSY - Operation failed
+ */
+enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
+                                       struct ecore_ptt *p_ptt,
+                                       u32 *p_load_code);
+
+/**
+ * @brief Read the MFW mailbox into Current buffer.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Ack to mfw that driver finished FLR process for VFs
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vfs_to_ack - bit mask of all engine VFs for which the PF acks.
+ *
+ * @param return enum _ecore_status_t - ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
+                                         struct ecore_ptt *p_ptt,
+                                         u32 *vfs_to_ack);
+
+/**
+ * @brief - calls during init to read shmem of all function-related info.
+ *
+ * @param p_hwfn
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
+                                                   struct ecore_ptt *p_ptt);
+
+/**
+ * @brief - Reset the MCP using mailbox command.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
+                                    struct ecore_ptt *p_ptt);
+
+/**
+ * @brief - Sets the union data in the MCP mailbox and sends a mailbox command.
+ *
+ * @param p_hwfn       - hw function
+ * @param p_ptt        - PTT required for register access
+ * @param cmd          - command to be sent to the MCP
+ * @param param        - optional param
+ * @param p_union_data - pointer to a drv_union_data
+ * @param o_mcp_resp   - the MCP response code (exclude sequence)
+ * @param o_mcp_param  - optional parameter provided by the MCP response
+ *
+ * @return enum _ecore_status_t -
+ *      ECORE_SUCCESS - operation was successful
+ *      ECORE_BUSY    - operation failed
+ */
+enum _ecore_status_t ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
+                                            struct ecore_ptt *p_ptt,
+                                            u32 cmd, u32 param,
+                                            union drv_union_data *p_union_data,
+                                            u32 *o_mcp_resp,
+                                            u32 *o_mcp_param);
+
+/**
+ * @brief - Sends an NVM write command request to the MFW with
+ *          payload.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param cmd - Command: Either DRV_MSG_CODE_NVM_WRITE_NVRAM or
+ *            DRV_MSG_CODE_NVM_PUT_FILE_DATA
+ * @param param - [0:23] - Offset [24:31] - Size
+ * @param o_mcp_resp - MCP response
+ * @param o_mcp_param - MCP response param
+ * @param i_txn_size -  Buffer size
+ * @param i_buf - Pointer to the buffer
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
+                                         struct ecore_ptt *p_ptt,
+                                         u32 cmd,
+                                         u32 param,
+                                         u32 *o_mcp_resp,
+                                         u32 *o_mcp_param,
+                                         u32 i_txn_size, u32 *i_buf);
+
+/**
+ * @brief - Sends an NVM read command request to the MFW to get
+ *        a buffer.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param cmd - Command: DRV_MSG_CODE_NVM_GET_FILE_DATA or
+ *            DRV_MSG_CODE_NVM_READ_NVRAM commands
+ * @param param - [0:23] - Offset [24:31] - Size
+ * @param o_mcp_resp - MCP response
+ * @param o_mcp_param - MCP response param
+ * @param o_txn_size -  Buffer size output
+ * @param o_buf - Pointer to the buffer returned by the MFW.
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
+                                         struct ecore_ptt *p_ptt,
+                                         u32 cmd,
+                                         u32 param,
+                                         u32 *o_mcp_resp,
+                                         u32 *o_mcp_param,
+                                         u32 *o_txn_size, u32 *o_buf);
+
+/**
+ * @brief indicates whether the MFW objects [under mcp_info] are accessible
+ *
+ * @param p_hwfn
+ *
+ * @return true iff MFW is running and mcp_info is initialized
+ */
+bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief request MFW to configure MSI-X for a VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vf_id - absolute inside engine
+ * @param num_sbs - number of entries to request
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
+                                             struct ecore_ptt *p_ptt,
+                                             u8 vf_id, u8 num);
+
+/**
+ * @brief - Halt the MCP.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
+                                   struct ecore_ptt *p_ptt);
+
+/**
+ * @brief - Wake up the MCP.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
+                                     struct ecore_ptt *p_ptt);
+int __ecore_configure_pf_max_bandwidth(struct ecore_hwfn *p_hwfn,
+                                      struct ecore_ptt *p_ptt,
+                                      struct ecore_mcp_link_state *p_link,
+                                      u8 max_bw);
+int __ecore_configure_pf_min_bandwidth(struct ecore_hwfn *p_hwfn,
+                                      struct ecore_ptt *p_ptt,
+                                      struct ecore_mcp_link_state *p_link,
+                                      u8 min_bw);
+enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
+                                            struct ecore_ptt *p_ptt,
+                                            u32 mask_parities);
+#endif /* __ECORE_MCP_H__ */
diff --git a/drivers/net/qede/base/ecore_mcp_api.h b/drivers/net/qede/base/ecore_mcp_api.h
new file mode 100644 (file)
index 0000000..7360b35
--- /dev/null
@@ -0,0 +1,611 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_MCP_API_H__
+#define __ECORE_MCP_API_H__
+
+#include "ecore_status.h"
+
+struct ecore_mcp_link_speed_params {
+       bool autoneg;
+       u32 advertised_speeds;  /* bitmask of DRV_SPEED_CAPABILITY */
+       u32 forced_speed;       /* In Mb/s */
+};
+
+struct ecore_mcp_link_pause_params {
+       bool autoneg;
+       bool forced_rx;
+       bool forced_tx;
+};
+
+struct ecore_mcp_link_params {
+       struct ecore_mcp_link_speed_params speed;
+       struct ecore_mcp_link_pause_params pause;
+       u32 loopback_mode;      /* in PMM_LOOPBACK values */
+};
+
+struct ecore_mcp_link_capabilities {
+       u32 speed_capabilities;
+};
+
+struct ecore_mcp_link_state {
+       bool link_up;
+
+       u32 line_speed;         /* In Mb/s */
+       u32 min_pf_rate;        /* In Mb/s */
+       u32 speed;              /* In Mb/s */
+       bool full_duplex;
+
+       bool an;
+       bool an_complete;
+       bool parallel_detection;
+       bool pfc_enabled;
+
+#define ECORE_LINK_PARTNER_SPEED_1G_HD (1 << 0)
+#define ECORE_LINK_PARTNER_SPEED_1G_FD (1 << 1)
+#define ECORE_LINK_PARTNER_SPEED_10G   (1 << 2)
+#define ECORE_LINK_PARTNER_SPEED_20G   (1 << 3)
+#define ECORE_LINK_PARTNER_SPEED_25G   (1 << 4)
+#define ECORE_LINK_PARTNER_SPEED_40G   (1 << 5)
+#define ECORE_LINK_PARTNER_SPEED_50G   (1 << 6)
+#define ECORE_LINK_PARTNER_SPEED_100G  (1 << 7)
+       u32 partner_adv_speed;
+
+       bool partner_tx_flow_ctrl_en;
+       bool partner_rx_flow_ctrl_en;
+
+#define ECORE_LINK_PARTNER_SYMMETRIC_PAUSE (1)
+#define ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE (2)
+#define ECORE_LINK_PARTNER_BOTH_PAUSE (3)
+       u8 partner_adv_pause;
+
+       bool sfp_tx_fault;
+};
+
+struct ecore_mcp_function_info {
+       u8 pause_on_host;
+
+       enum ecore_pci_personality protocol;
+
+       u8 bandwidth_min;
+       u8 bandwidth_max;
+
+       u8 mac[ETH_ALEN];
+
+       u64 wwn_port;
+       u64 wwn_node;
+
+#define ECORE_MCP_VLAN_UNSET           (0xffff)
+       u16 ovlan;
+};
+
+struct ecore_mcp_nvm_common {
+       u32 offset;
+       u32 param;
+       u32 resp;
+       u32 cmd;
+};
+
+struct ecore_mcp_nvm_rd {
+       u32 *buf_size;
+       u32 *buf;
+};
+
+struct ecore_mcp_nvm_wr {
+       u32 buf_size;
+       u32 *buf;
+};
+
+struct ecore_mcp_nvm_params {
+#define ECORE_MCP_CMD          (1 << 0)
+#define ECORE_MCP_NVM_RD       (1 << 1)
+#define ECORE_MCP_NVM_WR       (1 << 2)
+       u8 type;
+
+       struct ecore_mcp_nvm_common nvm_common;
+
+       union {
+               struct ecore_mcp_nvm_rd nvm_rd;
+               struct ecore_mcp_nvm_wr nvm_wr;
+       };
+};
+
+struct ecore_mcp_drv_version {
+       u32 version;
+       u8 name[MCP_DRV_VER_STR_SIZE - 4];
+};
+
+struct ecore_mcp_lan_stats {
+       u64 ucast_rx_pkts;
+       u64 ucast_tx_pkts;
+       u32 fcs_err;
+};
+
+#ifndef ECORE_PROTO_STATS
+#define ECORE_PROTO_STATS
+
+enum ecore_mcp_protocol_type {
+       ECORE_MCP_LAN_STATS,
+};
+
+union ecore_mcp_protocol_stats {
+       struct ecore_mcp_lan_stats lan_stats;
+};
+#endif
+
+enum ecore_ov_config_method {
+       ECORE_OV_CONFIG_MTU,
+       ECORE_OV_CONFIG_MAC,
+       ECORE_OV_CONFIG_WOL
+};
+
+enum ecore_ov_client {
+       ECORE_OV_CLIENT_DRV,
+       ECORE_OV_CLIENT_USER
+};
+
+enum ecore_ov_driver_state {
+       ECORE_OV_DRIVER_STATE_NOT_LOADED,
+       ECORE_OV_DRIVER_STATE_DISABLED,
+       ECORE_OV_DRIVER_STATE_ACTIVE
+};
+
+#define ECORE_MAX_NPIV_ENTRIES 128
+#define ECORE_WWN_SIZE 8
+struct ecore_fc_npiv_tbl {
+       u32 count;
+       u8 wwpn[ECORE_MAX_NPIV_ENTRIES][ECORE_WWN_SIZE];
+       u8 wwnn[ECORE_MAX_NPIV_ENTRIES][ECORE_WWN_SIZE];
+};
+
+#ifndef __EXTRACT__LINUX__
+enum ecore_led_mode {
+       ECORE_LED_MODE_OFF,
+       ECORE_LED_MODE_ON,
+       ECORE_LED_MODE_RESTORE
+};
+#endif
+
+/**
+ * @brief - returns the link params of the hw function
+ *
+ * @param p_hwfn
+ *
+ * @returns pointer to link params
+ */
+struct ecore_mcp_link_params *ecore_mcp_get_link_params(struct ecore_hwfn *);
+
+/**
+ * @brief - return the link state of the hw function
+ *
+ * @param p_hwfn
+ *
+ * @returns pointer to link state
+ */
+struct ecore_mcp_link_state *ecore_mcp_get_link_state(struct ecore_hwfn *);
+
+/**
+ * @brief - return the link capabilities of the hw function
+ *
+ * @param p_hwfn
+ *
+ * @returns pointer to link capabilities
+ */
+struct ecore_mcp_link_capabilities
+*ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief Request the MFW to set the the link according to 'link_input'.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param b_up - raise link if `true'. Reset link if `false'.
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
+                                       struct ecore_ptt *p_ptt, bool b_up);
+
+/**
+ * @brief Get the management firmware version value
+ *
+ * @param p_dev       - ecore dev pointer
+ * @param p_ptt
+ * @param p_mfw_ver    - mfw version value
+ * @param p_running_bundle_id  - image id in nvram; Optional.
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_dev *p_dev,
+                                          struct ecore_ptt *p_ptt,
+                                          u32 *p_mfw_ver,
+                                          u32 *p_running_bundle_id);
+
+/**
+ * @brief Get media type value of the port.
+ *
+ * @param p_dev      - ecore dev pointer
+ * @param mfw_ver    - media type value
+ *
+ * @return enum _ecore_status_t -
+ *      ECORE_SUCCESS - Operation was successful.
+ *      ECORE_BUSY - Operation failed
+ */
+enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
+                                             u32 *media_type);
+
+/**
+ * @brief - Sends a command to the MCP mailbox.
+ *
+ * @param p_hwfn      - hw function
+ * @param p_ptt       - PTT required for register access
+ * @param cmd         - command to be sent to the MCP
+ * @param param       - optional param
+ * @param o_mcp_resp  - the MCP response code (exclude sequence)
+ * @param o_mcp_param - optional parameter provided by the MCP response
+ *
+ * @return enum _ecore_status_t -
+ *      ECORE_SUCCESS - operation was successful
+ *      ECORE_BUSY    - operation failed
+ */
+enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
+                                  struct ecore_ptt *p_ptt, u32 cmd, u32 param,
+                                  u32 *o_mcp_resp, u32 *o_mcp_param);
+
+/**
+ * @brief - drains the nig, allowing completion to pass in case of pauses.
+ *          (Should be called only from sleepable context)
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
+                                    struct ecore_ptt *p_ptt);
+
+/**
+ * @brief - return the mcp function info of the hw function
+ *
+ * @param p_hwfn
+ *
+ * @returns pointer to mcp function info
+ */
+const struct ecore_mcp_function_info
+*ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief - Function for reading/manipulating the nvram. Following are supported
+ *          functionalities.
+ *          1. Read: Read the specified nvram offset.
+ *             input values:
+ *               type   - ECORE_MCP_NVM_RD
+ *               cmd    - command code (e.g. DRV_MSG_CODE_NVM_READ_NVRAM)
+ *               offset - nvm offset
+ *
+ *             output values:
+ *               buf      - buffer
+ *               buf_size - buffer size
+ *
+ *          2. Write: Write the data at the specified nvram offset
+ *             input values:
+ *               type     - ECORE_MCP_NVM_WR
+ *               cmd      - command code (e.g. DRV_MSG_CODE_NVM_WRITE_NVRAM)
+ *               offset   - nvm offset
+ *               buf      - buffer
+ *               buf_size - buffer size
+ *
+ *          3. Command: Send the NVM command to MCP.
+ *             input values:
+ *               type   - ECORE_MCP_CMD
+ *               cmd    - command code (e.g. DRV_MSG_CODE_NVM_DEL_FILE)
+ *               offset - nvm offset
+ *
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param params
+ *
+ * @return ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_nvm_command(struct ecore_hwfn *p_hwfn,
+                                          struct ecore_ptt *p_ptt,
+                                          struct ecore_mcp_nvm_params *params);
+
+/**
+ * @brief - count number of function with a matching personality on engine.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param personalities - a bitmask of ecore_pci_personality values
+ *
+ * @returns the count of all devices on engine whose personality match one of
+ *          the bitsmasks.
+ */
+int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
+                                 struct ecore_ptt *p_ptt, u32 personalities);
+
+/**
+ * @brief Get the flash size value
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_flash_size  - flash size in bytes to be filled.
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
+                                             struct ecore_ptt *p_ptt,
+                                             u32 *p_flash_size);
+
+/**
+ * @brief Send driver version to MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param version - Version value
+ * @param name - Protocol driver name
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t
+ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+                          struct ecore_mcp_drv_version *p_ver);
+
+/**
+ * @brief Read the MFW process kill counter
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return u32
+ */
+u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
+                                  struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Trigger a recovery process
+ *
+ *  @param p_hwfn
+ *  @param p_ptt
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
+                                                 struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Notify MFW about the change in base device properties
+ *
+ *  @param p_hwfn
+ *  @param p_ptt
+ *  @param config - Configuation that has been updated
+ *  @param client - ecore client type
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t
+ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
+                                  struct ecore_ptt *p_ptt,
+                                  enum ecore_ov_config_method config,
+                                  enum ecore_ov_client client);
+
+/**
+ * @brief Notify MFW about the driver state
+ *
+ *  @param p_hwfn
+ *  @param p_ptt
+ *  @param drv_state - Driver state
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t
+ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
+                                struct ecore_ptt *p_ptt,
+                                enum ecore_ov_driver_state drv_state);
+
+/**
+ * @brief Read NPIV settings form the MFW
+ *
+ *  @param p_hwfn
+ *  @param p_ptt
+ *  @param p_table - Array to hold the FC NPIV data. Client need allocate the
+ *                   required buffer. The field 'count' specifies number of NPIV
+ *                   entries. A value of 0 means the table was not populated.
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t
+ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+                        struct ecore_fc_npiv_tbl *p_table);
+
+/**
+ * @brief Send MTU size to MFW
+ *
+ *  @param p_hwfn
+ *  @param p_ptt
+ *  @param mtu - MTU size
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn,
+                                            struct ecore_ptt *p_ptt, u16 mtu);
+
+/**
+ * @brief Set LED status
+ *
+ *  @param p_hwfn
+ *  @param p_ptt
+ *  @param mode - LED mode
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
+                                      struct ecore_ptt *p_ptt,
+                                      enum ecore_led_mode mode);
+
+/**
+ * @brief Set secure mode
+ *
+ *  @param p_dev
+ *  @param addr - nvm offset
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
+                                                  u32 addr);
+
+/**
+ * @brief Write to phy
+ *
+ *  @param p_dev
+ *  @param addr - nvm offset
+ *  @param cmd - nvm command
+ *  @param p_buf - nvm write buffer
+ *  @param len - buffer len
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
+                                        u32 addr, u8 *p_buf, u32 len);
+
+/**
+ * @brief Write to nvm
+ *
+ *  @param p_dev
+ *  @param addr - nvm offset
+ *  @param cmd - nvm command
+ *  @param p_buf - nvm write buffer
+ *  @param len - buffer len
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
+                                        u32 addr, u8 *p_buf, u32 len);
+
+/**
+ * @brief Put file begin
+ *
+ *  @param p_dev
+ *  @param addr - nvm offset
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
+                                                 u32 addr);
+
+/**
+ * @brief Delete file
+ *
+ *  @param p_dev
+ *  @param addr - nvm offset
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr);
+
+/**
+ * @brief Check latest response
+ *
+ *  @param p_dev
+ *  @param p_buf - nvm write buffer
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf);
+
+/**
+ * @brief Read from phy
+ *
+ *  @param p_dev
+ *  @param addr - nvm offset
+ *  @param cmd - nvm command
+ *  @param p_buf - nvm write buffer
+ *  @param len - buffer len
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
+                                       u32 addr, u8 *p_buf, u32 len);
+
+/**
+ * @brief Read from nvm
+ *
+ *  @param p_dev
+ *  @param addr - nvm offset
+ *  @param p_buf - nvm write buffer
+ *  @param len - buffer len
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
+                                       u8 *p_buf, u32 len);
+
+/**
+ * @brief Read from sfp
+ *
+ *  @param p_hwfn - hw function
+ *  @param p_ptt  - PTT required for register access
+ *  @param port   - transceiver port
+ *  @param addr   - I2C address
+ *  @param offset - offset in sfp
+ *  @param len    - buffer length
+ *  @param p_buf  - buffer to read into
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
+                                           struct ecore_ptt *p_ptt,
+                                           u32 port, u32 addr, u32 offset,
+                                           u32 len, u8 *p_buf);
+
+/**
+ * @brief Write to sfp
+ *
+ *  @param p_hwfn - hw function
+ *  @param p_ptt  - PTT required for register access
+ *  @param port   - transceiver port
+ *  @param addr   - I2C address
+ *  @param offset - offset in sfp
+ *  @param len    - buffer length
+ *  @param p_buf  - buffer to write from
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
+                                            struct ecore_ptt *p_ptt,
+                                            u32 port, u32 addr, u32 offset,
+                                            u32 len, u8 *p_buf);
+
+/**
+ * @brief Gpio read
+ *
+ *  @param p_hwfn    - hw function
+ *  @param p_ptt     - PTT required for register access
+ *  @param gpio      - gpio number
+ *  @param gpio_val  - value read from gpio
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
+                                        struct ecore_ptt *p_ptt,
+                                        u16 gpio, u32 *gpio_val);
+
+/**
+ * @brief Gpio write
+ *
+ *  @param p_hwfn    - hw function
+ *  @param p_ptt     - PTT required for register access
+ *  @param gpio      - gpio number
+ *  @param gpio_val  - value to write to gpio
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
+                                         struct ecore_ptt *p_ptt,
+                                         u16 gpio, u16 gpio_val);
+
+#endif
diff --git a/drivers/net/qede/base/ecore_proto_if.h b/drivers/net/qede/base/ecore_proto_if.h
new file mode 100644 (file)
index 0000000..2fecbc8
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_PROTO_IF_H__
+#define __ECORE_PROTO_IF_H__
+
+/*
+ * PF parameters (according to personality/protocol)
+ */
+
+struct ecore_eth_pf_params {
+       /* The following parameters are used during HW-init
+        * and these parameters need to be passed as arguments
+        * to update_pf_params routine invoked before slowpath start
+        */
+       u16 num_cons;
+};
+
+struct ecore_pf_params {
+       struct ecore_eth_pf_params eth_pf_params;
+};
+
+#endif
diff --git a/drivers/net/qede/base/ecore_rt_defs.h b/drivers/net/qede/base/ecore_rt_defs.h
new file mode 100644 (file)
index 0000000..1f5139e
--- /dev/null
@@ -0,0 +1,446 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __RT_DEFS_H__
+#define __RT_DEFS_H__
+
+/* Runtime array offsets */
+#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET       0
+#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET       1
+#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET       2
+#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET       3
+#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET       4
+#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET       5
+#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET       6
+#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET       7
+#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET       8
+#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET       9
+#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET       10
+#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET       11
+#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET       12
+#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET       13
+#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET       14
+#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET       15
+#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET         16
+#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET              17
+#define IGU_REG_PF_CONFIGURATION_RT_OFFSET             18
+#define IGU_REG_VF_CONFIGURATION_RT_OFFSET             19
+#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET              20
+#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET              21
+#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET           22
+#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET          23
+#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET            24
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET                761
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE          736
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET                761
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE          736
+#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET       1497
+#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE         736
+#define CAU_REG_PI_MEMORY_RT_OFFSET            2233
+#define CAU_REG_PI_MEMORY_RT_SIZE              4416
+#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET           6649
+#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET             6650
+#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET             6651
+#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET        6652
+#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET        6653
+#define PRS_REG_SEARCH_TCP_RT_OFFSET           6654
+#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET              6659
+#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET            6660
+#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET          6661
+#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET             6662
+#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET              6663
+#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET        6664
+#define SRC_REG_FIRSTFREE_RT_OFFSET            6665
+#define SRC_REG_FIRSTFREE_RT_SIZE              2
+#define SRC_REG_LASTFREE_RT_OFFSET             6667
+#define SRC_REG_LASTFREE_RT_SIZE               2
+#define SRC_REG_COUNTFREE_RT_OFFSET            6669
+#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET             6670
+#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET       6671
+#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET       6672
+#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET         6673
+#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET         6674
+#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET                6675
+#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET       6676
+#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET              6677
+#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET       6678
+#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET              6679
+#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET       6680
+#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET             6681
+#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET              6682
+#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET            6683
+#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET             6684
+#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET            6685
+#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET             6686
+#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET            6687
+#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET             6688
+#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET            6689
+#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET  6690
+#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET  6691
+#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET              6692
+#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET            6693
+#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET            6694
+#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET          6695
+#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET        6696
+#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET        6697
+#define PSWRQ2_REG_VF_BASE_RT_OFFSET           6698
+#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET       6699
+#define PSWRQ2_REG_WR_MBS0_RT_OFFSET           6700
+#define PSWRQ2_REG_RD_MBS0_RT_OFFSET           6701
+#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET             6702
+#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET             6703
+#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET                6704
+#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE          22000
+#define PGLUE_REG_B_VF_BASE_RT_OFFSET          28704
+#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET          28705
+#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET             28706
+#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET             28707
+#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET             28708
+#define TM_REG_VF_ENABLE_CONN_RT_OFFSET                28709
+#define TM_REG_PF_ENABLE_CONN_RT_OFFSET                28710
+#define TM_REG_PF_ENABLE_TASK_RT_OFFSET                28711
+#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET            28712
+#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET            28713
+#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET       28714
+#define TM_REG_CONFIG_CONN_MEM_RT_SIZE         416
+#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET       29130
+#define TM_REG_CONFIG_TASK_MEM_RT_SIZE         512
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET           29642
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET           29643
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET           29644
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET              29645
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET              29646
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET              29647
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET              29648
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET              29649
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET              29650
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET              29651
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET              29652
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET              29653
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET              29654
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET             29655
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET             29656
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET             29657
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET             29658
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET             29659
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET             29660
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET             29661
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET             29662
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET             29663
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET             29664
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET             29665
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET             29666
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET             29667
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET             29668
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET             29669
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET             29670
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET             29671
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET             29672
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET             29673
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET             29674
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET             29675
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET             29676
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET             29677
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET             29678
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET             29679
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET             29680
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET             29681
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET             29682
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET             29683
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET             29684
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET             29685
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET             29686
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET             29687
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET             29688
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET             29689
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET             29690
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET             29691
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET             29692
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET             29693
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET             29694
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET             29695
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET             29696
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET             29697
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET             29698
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET             29699
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET             29700
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET             29701
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET             29702
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET             29703
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET             29704
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET             29705
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET             29706
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET             29707
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET             29708
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET       29709
+#define QM_REG_BASEADDROTHERPQ_RT_SIZE         128
+#define QM_REG_VOQCRDLINE_RT_OFFSET            29837
+#define QM_REG_VOQCRDLINE_RT_SIZE              20
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET                29857
+#define QM_REG_VOQINITCRDLINE_RT_SIZE          20
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET            29877
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET            29878
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET             29879
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET           29880
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET          29881
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET       29882
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET       29883
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET       29884
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET       29885
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET       29886
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET       29887
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET       29888
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET       29889
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET       29890
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET       29891
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET              29892
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET              29893
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET              29894
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET              29895
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET              29896
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET              29897
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET           29898
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET           29899
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET           29900
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET           29901
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET              29902
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET              29903
+#define QM_REG_PQTX2PF_0_RT_OFFSET             29904
+#define QM_REG_PQTX2PF_1_RT_OFFSET             29905
+#define QM_REG_PQTX2PF_2_RT_OFFSET             29906
+#define QM_REG_PQTX2PF_3_RT_OFFSET             29907
+#define QM_REG_PQTX2PF_4_RT_OFFSET             29908
+#define QM_REG_PQTX2PF_5_RT_OFFSET             29909
+#define QM_REG_PQTX2PF_6_RT_OFFSET             29910
+#define QM_REG_PQTX2PF_7_RT_OFFSET             29911
+#define QM_REG_PQTX2PF_8_RT_OFFSET             29912
+#define QM_REG_PQTX2PF_9_RT_OFFSET             29913
+#define QM_REG_PQTX2PF_10_RT_OFFSET            29914
+#define QM_REG_PQTX2PF_11_RT_OFFSET            29915
+#define QM_REG_PQTX2PF_12_RT_OFFSET            29916
+#define QM_REG_PQTX2PF_13_RT_OFFSET            29917
+#define QM_REG_PQTX2PF_14_RT_OFFSET            29918
+#define QM_REG_PQTX2PF_15_RT_OFFSET            29919
+#define QM_REG_PQTX2PF_16_RT_OFFSET            29920
+#define QM_REG_PQTX2PF_17_RT_OFFSET            29921
+#define QM_REG_PQTX2PF_18_RT_OFFSET            29922
+#define QM_REG_PQTX2PF_19_RT_OFFSET            29923
+#define QM_REG_PQTX2PF_20_RT_OFFSET            29924
+#define QM_REG_PQTX2PF_21_RT_OFFSET            29925
+#define QM_REG_PQTX2PF_22_RT_OFFSET            29926
+#define QM_REG_PQTX2PF_23_RT_OFFSET            29927
+#define QM_REG_PQTX2PF_24_RT_OFFSET            29928
+#define QM_REG_PQTX2PF_25_RT_OFFSET            29929
+#define QM_REG_PQTX2PF_26_RT_OFFSET            29930
+#define QM_REG_PQTX2PF_27_RT_OFFSET            29931
+#define QM_REG_PQTX2PF_28_RT_OFFSET            29932
+#define QM_REG_PQTX2PF_29_RT_OFFSET            29933
+#define QM_REG_PQTX2PF_30_RT_OFFSET            29934
+#define QM_REG_PQTX2PF_31_RT_OFFSET            29935
+#define QM_REG_PQTX2PF_32_RT_OFFSET            29936
+#define QM_REG_PQTX2PF_33_RT_OFFSET            29937
+#define QM_REG_PQTX2PF_34_RT_OFFSET            29938
+#define QM_REG_PQTX2PF_35_RT_OFFSET            29939
+#define QM_REG_PQTX2PF_36_RT_OFFSET            29940
+#define QM_REG_PQTX2PF_37_RT_OFFSET            29941
+#define QM_REG_PQTX2PF_38_RT_OFFSET            29942
+#define QM_REG_PQTX2PF_39_RT_OFFSET            29943
+#define QM_REG_PQTX2PF_40_RT_OFFSET            29944
+#define QM_REG_PQTX2PF_41_RT_OFFSET            29945
+#define QM_REG_PQTX2PF_42_RT_OFFSET            29946
+#define QM_REG_PQTX2PF_43_RT_OFFSET            29947
+#define QM_REG_PQTX2PF_44_RT_OFFSET            29948
+#define QM_REG_PQTX2PF_45_RT_OFFSET            29949
+#define QM_REG_PQTX2PF_46_RT_OFFSET            29950
+#define QM_REG_PQTX2PF_47_RT_OFFSET            29951
+#define QM_REG_PQTX2PF_48_RT_OFFSET            29952
+#define QM_REG_PQTX2PF_49_RT_OFFSET            29953
+#define QM_REG_PQTX2PF_50_RT_OFFSET            29954
+#define QM_REG_PQTX2PF_51_RT_OFFSET            29955
+#define QM_REG_PQTX2PF_52_RT_OFFSET            29956
+#define QM_REG_PQTX2PF_53_RT_OFFSET            29957
+#define QM_REG_PQTX2PF_54_RT_OFFSET            29958
+#define QM_REG_PQTX2PF_55_RT_OFFSET            29959
+#define QM_REG_PQTX2PF_56_RT_OFFSET            29960
+#define QM_REG_PQTX2PF_57_RT_OFFSET            29961
+#define QM_REG_PQTX2PF_58_RT_OFFSET            29962
+#define QM_REG_PQTX2PF_59_RT_OFFSET            29963
+#define QM_REG_PQTX2PF_60_RT_OFFSET            29964
+#define QM_REG_PQTX2PF_61_RT_OFFSET            29965
+#define QM_REG_PQTX2PF_62_RT_OFFSET            29966
+#define QM_REG_PQTX2PF_63_RT_OFFSET            29967
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET          29968
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET          29969
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET          29970
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET          29971
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET          29972
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET          29973
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET          29974
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET          29975
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET          29976
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET          29977
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET         29978
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET         29979
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET         29980
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET         29981
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET         29982
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET         29983
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET                29984
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET                29985
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET           29986
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET           29987
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET             29988
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET             29989
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET             29990
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET             29991
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET             29992
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET             29993
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET             29994
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET             29995
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET          29996
+#define QM_REG_RLGLBLINCVAL_RT_SIZE            256
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET              30252
+#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE                256
+#define QM_REG_RLGLBLCRD_RT_OFFSET             30508
+#define QM_REG_RLGLBLCRD_RT_SIZE               256
+#define QM_REG_RLGLBLENABLE_RT_OFFSET          30764
+#define QM_REG_RLPFPERIOD_RT_OFFSET            30765
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET       30766
+#define QM_REG_RLPFINCVAL_RT_OFFSET            30767
+#define QM_REG_RLPFINCVAL_RT_SIZE              16
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET                30783
+#define QM_REG_RLPFUPPERBOUND_RT_SIZE          16
+#define QM_REG_RLPFCRD_RT_OFFSET               30799
+#define QM_REG_RLPFCRD_RT_SIZE                 16
+#define QM_REG_RLPFENABLE_RT_OFFSET            30815
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET         30816
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET           30817
+#define QM_REG_WFQPFWEIGHT_RT_SIZE             16
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET       30833
+#define QM_REG_WFQPFUPPERBOUND_RT_SIZE         16
+#define QM_REG_WFQPFCRD_RT_OFFSET              30849
+#define QM_REG_WFQPFCRD_RT_SIZE                        160
+#define QM_REG_WFQPFENABLE_RT_OFFSET           31009
+#define QM_REG_WFQVPENABLE_RT_OFFSET           31010
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET          31011
+#define QM_REG_BASEADDRTXPQ_RT_SIZE            512
+#define QM_REG_TXPQMAP_RT_OFFSET               31523
+#define QM_REG_TXPQMAP_RT_SIZE                 512
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET           32035
+#define QM_REG_WFQVPWEIGHT_RT_SIZE             512
+#define QM_REG_WFQVPCRD_RT_OFFSET              32547
+#define QM_REG_WFQVPCRD_RT_SIZE                        512
+#define QM_REG_WFQVPMAP_RT_OFFSET              33059
+#define QM_REG_WFQVPMAP_RT_SIZE                        512
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET          33571
+#define QM_REG_WFQPFCRD_MSB_RT_SIZE            160
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET              33731
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET        33732
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET        33733
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET        33734
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET        33735
+#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET         33736
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET             33737
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET              33738
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE                4
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET         33742
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE           4
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET           33746
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE             4
+#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET              33750
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET        33751
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE          32
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET           33783
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE             16
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET         33799
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE           16
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET                33815
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE  16
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET              33831
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE        16
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET         33847
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET              33848
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET              33849
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET              33850
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET          33851
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET          33852
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET          33853
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET          33854
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET               33855
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET               33856
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET               33857
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET               33858
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET           33859
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET        33860
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET              33861
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET         33862
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET               33863
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET          33864
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET           33865
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET               33866
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET          33867
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET           33868
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET               33869
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET          33870
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET           33871
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET               33872
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET          33873
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET           33874
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET               33875
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET          33876
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET           33877
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET               33878
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET          33879
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET           33880
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET               33881
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET          33882
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET           33883
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET               33884
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET          33885
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET           33886
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET               33887
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET          33888
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET           33889
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET               33890
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET          33891
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET           33892
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET              33893
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET         33894
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET  33895
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET              33896
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET         33897
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET  33898
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET              33899
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET         33900
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET  33901
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET              33902
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET         33903
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET  33904
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET              33905
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET         33906
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET  33907
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET              33908
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET         33909
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET  33910
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET              33911
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET         33912
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET  33913
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET              33914
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET         33915
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET  33916
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET              33917
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET         33918
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET  33919
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET              33920
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET         33921
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET  33922
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET           33923
+
+#define RUNTIME_ARRAY_SIZE 33924
+
+#endif /* __RT_DEFS_H__ */
diff --git a/drivers/net/qede/base/ecore_sp_api.h b/drivers/net/qede/base/ecore_sp_api.h
new file mode 100644 (file)
index 0000000..e80f5ef
--- /dev/null
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_SP_API_H__
+#define __ECORE_SP_API_H__
+
+#include "ecore_status.h"
+
+enum spq_mode {
+       ECORE_SPQ_MODE_BLOCK,   /* Client will poll a designated mem. address */
+       ECORE_SPQ_MODE_CB,      /* Client supplies a callback */
+       ECORE_SPQ_MODE_EBLOCK,  /* ECORE should block until completion */
+};
+
+struct ecore_hwfn;
+union event_ring_data;
+struct eth_slow_path_rx_cqe;
+
+struct ecore_spq_comp_cb {
+       void (*function)(struct ecore_hwfn *,
+                        void *, union event_ring_data *, u8 fw_return_code);
+       void *cookie;
+};
+
+/**
+ * @brief ecore_eth_cqe_completion - handles the completion of a
+ *        ramrod on the cqe ring
+ *
+ * @param p_hwfn
+ * @param cqe
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
+                                             struct eth_slow_path_rx_cqe *cqe);
+
+#endif
diff --git a/drivers/net/qede/base/ecore_sp_commands.c b/drivers/net/qede/base/ecore_sp_commands.c
new file mode 100644 (file)
index 0000000..7c6e254
--- /dev/null
@@ -0,0 +1,521 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+
+#include "ecore.h"
+#include "ecore_status.h"
+#include "ecore_chain.h"
+#include "ecore_spq.h"
+#include "ecore_init_fw_funcs.h"
+#include "ecore_cxt.h"
+#include "ecore_sp_commands.h"
+#include "ecore_gtt_reg_addr.h"
+#include "ecore_iro.h"
+#include "reg_addr.h"
+#include "ecore_int.h"
+#include "ecore_hw.h"
+
+enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
+                                          struct ecore_spq_entry **pp_ent,
+                                          u8 cmd,
+                                          u8 protocol,
+                                          struct ecore_sp_init_data *p_data)
+{
+       u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
+       struct ecore_spq_entry *p_ent = OSAL_NULL;
+       enum _ecore_status_t rc = ECORE_NOTIMPL;
+
+       /* Get an SPQ entry */
+       rc = ecore_spq_get_entry(p_hwfn, pp_ent);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       /* Fill the SPQ entry */
+       p_ent = *pp_ent;
+       p_ent->elem.hdr.cid = OSAL_CPU_TO_LE32(opaque_cid);
+       p_ent->elem.hdr.cmd_id = cmd;
+       p_ent->elem.hdr.protocol_id = protocol;
+       p_ent->priority = ECORE_SPQ_PRIORITY_NORMAL;
+       p_ent->comp_mode = p_data->comp_mode;
+       p_ent->comp_done.done = 0;
+
+       switch (p_ent->comp_mode) {
+       case ECORE_SPQ_MODE_EBLOCK:
+               p_ent->comp_cb.cookie = &p_ent->comp_done;
+               break;
+
+       case ECORE_SPQ_MODE_BLOCK:
+               if (!p_data->p_comp_data)
+                       return ECORE_INVAL;
+
+               p_ent->comp_cb.cookie = p_data->p_comp_data->cookie;
+               break;
+
+       case ECORE_SPQ_MODE_CB:
+               if (!p_data->p_comp_data)
+                       p_ent->comp_cb.function = OSAL_NULL;
+               else
+                       p_ent->comp_cb = *p_data->p_comp_data;
+               break;
+
+       default:
+               DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
+                         p_ent->comp_mode);
+               return ECORE_INVAL;
+       }
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+                  "Initialized: CID %08x cmd %02x protocol %02x data_addr %lu comp_mode [%s]\n",
+                  opaque_cid, cmd, protocol,
+                  (unsigned long)&p_ent->ramrod,
+                  D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
+                          ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
+                          "MODE_CB"));
+
+       OSAL_MEMSET(&p_ent->ramrod, 0, sizeof(p_ent->ramrod));
+
+       return ECORE_SUCCESS;
+}
+
+static enum tunnel_clss ecore_tunn_get_clss_type(u8 type)
+{
+       switch (type) {
+       case ECORE_TUNN_CLSS_MAC_VLAN:
+               return TUNNEL_CLSS_MAC_VLAN;
+       case ECORE_TUNN_CLSS_MAC_VNI:
+               return TUNNEL_CLSS_MAC_VNI;
+       case ECORE_TUNN_CLSS_INNER_MAC_VLAN:
+               return TUNNEL_CLSS_INNER_MAC_VLAN;
+       case ECORE_TUNN_CLSS_INNER_MAC_VNI:
+               return TUNNEL_CLSS_INNER_MAC_VNI;
+       default:
+               return TUNNEL_CLSS_MAC_VLAN;
+       }
+}
+
+static void
+ecore_tunn_set_pf_fix_tunn_mode(struct ecore_hwfn *p_hwfn,
+                               struct ecore_tunn_update_params *p_src,
+                               struct pf_update_tunnel_config *p_tunn_cfg)
+{
+       unsigned long cached_tunn_mode = p_hwfn->p_dev->tunn_mode;
+       unsigned long update_mask = p_src->tunn_mode_update_mask;
+       unsigned long tunn_mode = p_src->tunn_mode;
+       unsigned long new_tunn_mode = 0;
+
+       if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &update_mask)) {
+               if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
+                       OSAL_SET_BIT(ECORE_MODE_L2GRE_TUNN, &new_tunn_mode);
+       } else {
+               if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &cached_tunn_mode))
+                       OSAL_SET_BIT(ECORE_MODE_L2GRE_TUNN, &new_tunn_mode);
+       }
+
+       if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &update_mask)) {
+               if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
+                       OSAL_SET_BIT(ECORE_MODE_IPGRE_TUNN, &new_tunn_mode);
+       } else {
+               if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &cached_tunn_mode))
+                       OSAL_SET_BIT(ECORE_MODE_IPGRE_TUNN, &new_tunn_mode);
+       }
+
+       if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &update_mask)) {
+               if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
+                       OSAL_SET_BIT(ECORE_MODE_VXLAN_TUNN, &new_tunn_mode);
+       } else {
+               if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &cached_tunn_mode))
+                       OSAL_SET_BIT(ECORE_MODE_VXLAN_TUNN, &new_tunn_mode);
+       }
+
+       if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
+               if (p_src->update_geneve_udp_port)
+                       DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
+               p_src->update_geneve_udp_port = 0;
+               p_src->tunn_mode = new_tunn_mode;
+               return;
+       }
+
+       if (p_src->update_geneve_udp_port) {
+               p_tunn_cfg->set_geneve_udp_port_flg = 1;
+               p_tunn_cfg->geneve_udp_port =
+                   OSAL_CPU_TO_LE16(p_src->geneve_udp_port);
+       }
+
+       if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &update_mask)) {
+               if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
+                       OSAL_SET_BIT(ECORE_MODE_L2GENEVE_TUNN, &new_tunn_mode);
+       } else {
+               if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &cached_tunn_mode))
+                       OSAL_SET_BIT(ECORE_MODE_L2GENEVE_TUNN, &new_tunn_mode);
+       }
+
+       if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &update_mask)) {
+               if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
+                       OSAL_SET_BIT(ECORE_MODE_IPGENEVE_TUNN, &new_tunn_mode);
+       } else {
+               if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &cached_tunn_mode))
+                       OSAL_SET_BIT(ECORE_MODE_IPGENEVE_TUNN, &new_tunn_mode);
+       }
+
+       p_src->tunn_mode = new_tunn_mode;
+}
+
+static void
+ecore_tunn_set_pf_update_params(struct ecore_hwfn *p_hwfn,
+                               struct ecore_tunn_update_params *p_src,
+                               struct pf_update_tunnel_config *p_tunn_cfg)
+{
+       unsigned long tunn_mode = p_src->tunn_mode;
+       enum tunnel_clss type;
+
+       ecore_tunn_set_pf_fix_tunn_mode(p_hwfn, p_src, p_tunn_cfg);
+       p_tunn_cfg->update_rx_pf_clss = p_src->update_rx_pf_clss;
+       p_tunn_cfg->update_tx_pf_clss = p_src->update_tx_pf_clss;
+
+       type = ecore_tunn_get_clss_type(p_src->tunn_clss_vxlan);
+       p_tunn_cfg->tunnel_clss_vxlan = type;
+       type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2gre);
+       p_tunn_cfg->tunnel_clss_l2gre = type;
+       type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgre);
+       p_tunn_cfg->tunnel_clss_ipgre = type;
+
+       if (p_src->update_vxlan_udp_port) {
+               p_tunn_cfg->set_vxlan_udp_port_flg = 1;
+               p_tunn_cfg->vxlan_udp_port =
+                   OSAL_CPU_TO_LE16(p_src->vxlan_udp_port);
+       }
+
+       if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
+               p_tunn_cfg->tx_enable_l2gre = 1;
+
+       if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
+               p_tunn_cfg->tx_enable_ipgre = 1;
+
+       if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
+               p_tunn_cfg->tx_enable_vxlan = 1;
+
+       if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
+               if (p_src->update_geneve_udp_port)
+                       DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
+               p_src->update_geneve_udp_port = 0;
+               return;
+       }
+
+       if (p_src->update_geneve_udp_port) {
+               p_tunn_cfg->set_geneve_udp_port_flg = 1;
+               p_tunn_cfg->geneve_udp_port =
+                   OSAL_CPU_TO_LE16(p_src->geneve_udp_port);
+       }
+
+       if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
+               p_tunn_cfg->tx_enable_l2geneve = 1;
+
+       if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
+               p_tunn_cfg->tx_enable_ipgeneve = 1;
+
+       type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
+       p_tunn_cfg->tunnel_clss_l2geneve = type;
+       type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
+       p_tunn_cfg->tunnel_clss_ipgeneve = type;
+}
+
+static void ecore_set_hw_tunn_mode(struct ecore_hwfn *p_hwfn,
+                                  struct ecore_ptt *p_ptt,
+                                  unsigned long tunn_mode)
+{
+       u8 l2gre_enable = 0, ipgre_enable = 0, vxlan_enable = 0;
+       u8 l2geneve_enable = 0, ipgeneve_enable = 0;
+
+       if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
+               l2gre_enable = 1;
+
+       if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
+               ipgre_enable = 1;
+
+       if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
+               vxlan_enable = 1;
+
+       ecore_set_gre_enable(p_hwfn, p_ptt, l2gre_enable, ipgre_enable);
+       ecore_set_vxlan_enable(p_hwfn, p_ptt, vxlan_enable);
+
+       if (ECORE_IS_BB_A0(p_hwfn->p_dev))
+               return;
+
+       if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
+               l2geneve_enable = 1;
+
+       if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
+               ipgeneve_enable = 1;
+
+       ecore_set_geneve_enable(p_hwfn, p_ptt, l2geneve_enable,
+                               ipgeneve_enable);
+}
+
+static void
+ecore_tunn_set_pf_start_params(struct ecore_hwfn *p_hwfn,
+                              struct ecore_tunn_start_params *p_src,
+                              struct pf_start_tunnel_config *p_tunn_cfg)
+{
+       unsigned long tunn_mode;
+       enum tunnel_clss type;
+
+       if (!p_src)
+               return;
+
+       tunn_mode = p_src->tunn_mode;
+       type = ecore_tunn_get_clss_type(p_src->tunn_clss_vxlan);
+       p_tunn_cfg->tunnel_clss_vxlan = type;
+       type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2gre);
+       p_tunn_cfg->tunnel_clss_l2gre = type;
+       type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgre);
+       p_tunn_cfg->tunnel_clss_ipgre = type;
+
+       if (p_src->update_vxlan_udp_port) {
+               p_tunn_cfg->set_vxlan_udp_port_flg = 1;
+               p_tunn_cfg->vxlan_udp_port =
+                   OSAL_CPU_TO_LE16(p_src->vxlan_udp_port);
+       }
+
+       if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
+               p_tunn_cfg->tx_enable_l2gre = 1;
+
+       if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
+               p_tunn_cfg->tx_enable_ipgre = 1;
+
+       if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
+               p_tunn_cfg->tx_enable_vxlan = 1;
+
+       if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
+               if (p_src->update_geneve_udp_port)
+                       DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
+               p_src->update_geneve_udp_port = 0;
+               return;
+       }
+
+       if (p_src->update_geneve_udp_port) {
+               p_tunn_cfg->set_geneve_udp_port_flg = 1;
+               p_tunn_cfg->geneve_udp_port =
+                   OSAL_CPU_TO_LE16(p_src->geneve_udp_port);
+       }
+
+       if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
+               p_tunn_cfg->tx_enable_l2geneve = 1;
+
+       if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
+               p_tunn_cfg->tx_enable_ipgeneve = 1;
+
+       type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
+       p_tunn_cfg->tunnel_clss_l2geneve = type;
+       type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
+       p_tunn_cfg->tunnel_clss_ipgeneve = type;
+}
+
+enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
+                                      struct ecore_tunn_start_params *p_tunn,
+                                      enum ecore_mf_mode mode,
+                                      bool allow_npar_tx_switch)
+{
+       struct pf_start_ramrod_data *p_ramrod = OSAL_NULL;
+       struct ecore_spq_entry *p_ent = OSAL_NULL;
+       u16 sb = ecore_int_get_sp_sb_id(p_hwfn);
+       u8 sb_index = p_hwfn->p_eq->eq_sb_index;
+       enum _ecore_status_t rc = ECORE_NOTIMPL;
+       struct ecore_sp_init_data init_data;
+       u8 page_cnt;
+
+       /* update initial eq producer */
+       ecore_eq_prod_update(p_hwfn,
+                            ecore_chain_get_prod_idx(&p_hwfn->p_eq->chain));
+
+       /* Initialize the SPQ entry for the ramrod */
+       OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+       init_data.cid = ecore_spq_get_cid(p_hwfn);
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+       rc = ecore_sp_init_request(p_hwfn, &p_ent,
+                                  COMMON_RAMROD_PF_START,
+                                  PROTOCOLID_COMMON, &init_data);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       /* Fill the ramrod data */
+       p_ramrod = &p_ent->ramrod.pf_start;
+       p_ramrod->event_ring_sb_id = OSAL_CPU_TO_LE16(sb);
+       p_ramrod->event_ring_sb_index = sb_index;
+       p_ramrod->path_id = ECORE_PATH_ID(p_hwfn);
+       p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
+
+       /* For easier debugging */
+       p_ramrod->dont_log_ramrods = 0;
+       p_ramrod->log_type_mask = OSAL_CPU_TO_LE16(0xf);
+
+       switch (mode) {
+       case ECORE_MF_DEFAULT:
+       case ECORE_MF_NPAR:
+               p_ramrod->mf_mode = MF_NPAR;
+               break;
+       case ECORE_MF_OVLAN:
+               p_ramrod->mf_mode = MF_OVLAN;
+               break;
+       default:
+               DP_NOTICE(p_hwfn, true,
+                         "Unsupported MF mode, init as DEFAULT\n");
+               p_ramrod->mf_mode = MF_NPAR;
+       }
+
+       /* Place EQ address in RAMROD */
+       DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
+                      p_hwfn->p_eq->chain.pbl.p_phys_table);
+       page_cnt = (u8)ecore_chain_get_page_cnt(&p_hwfn->p_eq->chain);
+       p_ramrod->event_ring_num_pages = page_cnt;
+       DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
+                      p_hwfn->p_consq->chain.pbl.p_phys_table);
+
+       ecore_tunn_set_pf_start_params(p_hwfn, p_tunn,
+                                      &p_ramrod->tunnel_config);
+
+       if (IS_MF_SI(p_hwfn))
+               p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
+
+       switch (p_hwfn->hw_info.personality) {
+       case ECORE_PCI_ETH:
+               p_ramrod->personality = PERSONALITY_ETH;
+               break;
+       default:
+               DP_NOTICE(p_hwfn, true, "Unknown personality %d\n",
+                         p_hwfn->hw_info.personality);
+               p_ramrod->personality = PERSONALITY_ETH;
+       }
+
+       p_ramrod->base_vf_id = (u8)p_hwfn->hw_info.first_vf_in_pf;
+       p_ramrod->num_vfs = (u8)p_hwfn->p_dev->sriov_info.total_vfs;
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+                  "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
+                  sb, sb_index, p_ramrod->outer_tag);
+
+       rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+
+       if (p_tunn) {
+               ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt,
+                                      p_tunn->tunn_mode);
+               p_hwfn->p_dev->tunn_mode = p_tunn->tunn_mode;
+       }
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_sp_pf_update(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_spq_entry *p_ent = OSAL_NULL;
+       enum _ecore_status_t rc = ECORE_NOTIMPL;
+       struct ecore_sp_init_data init_data;
+
+       /* Get SPQ entry */
+       OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+       init_data.cid = ecore_spq_get_cid(p_hwfn);
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = ECORE_SPQ_MODE_CB;
+
+       rc = ecore_sp_init_request(p_hwfn, &p_ent,
+                                  COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
+                                  &init_data);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+/* Set pf update ramrod command params */
+enum _ecore_status_t
+ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
+                           struct ecore_tunn_update_params *p_tunn,
+                           enum spq_mode comp_mode,
+                           struct ecore_spq_comp_cb *p_comp_data)
+{
+       struct ecore_spq_entry *p_ent = OSAL_NULL;
+       enum _ecore_status_t rc = ECORE_NOTIMPL;
+       struct ecore_sp_init_data init_data;
+
+       /* Get SPQ entry */
+       OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+       init_data.cid = ecore_spq_get_cid(p_hwfn);
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = comp_mode;
+       init_data.p_comp_data = p_comp_data;
+
+       rc = ecore_sp_init_request(p_hwfn, &p_ent,
+                                  COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
+                                  &init_data);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       ecore_tunn_set_pf_update_params(p_hwfn, p_tunn,
+                                       &p_ent->ramrod.pf_update.tunnel_config);
+
+       rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+
+       if ((rc == ECORE_SUCCESS) && p_tunn) {
+               if (p_tunn->update_vxlan_udp_port)
+                       ecore_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+                                                 p_tunn->vxlan_udp_port);
+               if (p_tunn->update_geneve_udp_port)
+                       ecore_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+                                                  p_tunn->geneve_udp_port);
+
+               ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt,
+                                      p_tunn->tunn_mode);
+               p_hwfn->p_dev->tunn_mode = p_tunn->tunn_mode;
+       }
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_sp_pf_stop(struct ecore_hwfn *p_hwfn)
+{
+       enum _ecore_status_t rc = ECORE_NOTIMPL;
+       struct ecore_spq_entry *p_ent = OSAL_NULL;
+       struct ecore_sp_init_data init_data;
+
+       /* Get SPQ entry */
+       OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+       init_data.cid = ecore_spq_get_cid(p_hwfn);
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+       rc = ecore_sp_init_request(p_hwfn, &p_ent,
+                                  COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON,
+                                  &init_data);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_spq_entry *p_ent = OSAL_NULL;
+       enum _ecore_status_t rc = ECORE_NOTIMPL;
+       struct ecore_sp_init_data init_data;
+
+       /* Get SPQ entry */
+       OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+       init_data.cid = ecore_spq_get_cid(p_hwfn);
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+       rc = ecore_sp_init_request(p_hwfn, &p_ent,
+                                  COMMON_RAMROD_EMPTY, PROTOCOLID_COMMON,
+                                  &init_data);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
diff --git a/drivers/net/qede/base/ecore_sp_commands.h b/drivers/net/qede/base/ecore_sp_commands.h
new file mode 100644 (file)
index 0000000..e281ab0
--- /dev/null
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_SP_COMMANDS_H__
+#define __ECORE_SP_COMMANDS_H__
+
+#include "ecore.h"
+#include "ecore_spq.h"
+#include "ecore_sp_api.h"
+
+#define ECORE_SP_EQ_COMPLETION  0x01
+#define ECORE_SP_CQE_COMPLETION 0x02
+
+struct ecore_sp_init_data {
+       /* The CID and FID aren't necessarily derived from hwfn,
+        * e.g., in IOV scenarios. CID might defer between SPQ and
+        * other elements.
+        */
+       u32 cid;
+       u16 opaque_fid;
+
+       /* Information regarding operation upon sending & completion */
+       enum spq_mode comp_mode;
+       struct ecore_spq_comp_cb *p_comp_data;
+
+};
+
+/**
+ * @brief Acquire and initialize and SPQ entry for a given ramrod.
+ *
+ * @param p_hwfn
+ * @param pp_ent - will be filled with a pointer to an entry upon success
+ * @param cmd - dependent upon protocol
+ * @param protocol
+ * @param p_data - various configuration required for ramrod
+ *
+ * @return ECORE_SUCCESS upon success, otherwise failure.
+ */
+enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
+                                          struct ecore_spq_entry **pp_ent,
+                                          u8 cmd,
+                                          u8 protocol,
+                                          struct ecore_sp_init_data *p_data);
+
+/**
+ * @brief ecore_sp_pf_start - PF Function Start Ramrod
+ *
+ * This ramrod is sent to initialize a physical function (PF). It will
+ * configure the function related parameters and write its completion to the
+ * event ring specified in the parameters.
+ *
+ * Ramrods complete on the common event ring for the PF. This ring is
+ * allocated by the driver on host memory and its parameters are written
+ * to the internal RAM of the UStorm by the Function Start Ramrod.
+ *
+ * @param p_hwfn
+ * @param p_tunn - pf start tunneling configuration
+ * @param mode
+ * @param allow_npar_tx_switch - npar tx switching to be used
+ *       for vports configured for tx-switching.
+ *
+ * @return enum _ecore_status_t
+ */
+
+enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
+                                      struct ecore_tunn_start_params *p_tunn,
+                                      enum ecore_mf_mode mode,
+                                      bool allow_npar_tx_switch);
+
+/**
+ * @brief ecore_sp_pf_update_tunn_cfg - PF Function Tunnel configuration
+ *                                     update  Ramrod
+ *
+ * This ramrod is sent to update a tunneling configuration
+ * for a physical function (PF).
+ *
+ * @param p_hwfn
+ * @param p_tunn - pf update tunneling parameters
+ * @param comp_mode - completion mode
+ * @param p_comp_data - callback function
+ *
+ * @return enum _ecore_status_t
+ */
+
+enum _ecore_status_t
+ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
+                           struct ecore_tunn_update_params *p_tunn,
+                           enum spq_mode comp_mode,
+                           struct ecore_spq_comp_cb *p_comp_data);
+
+/**
+ * @brief ecore_sp_pf_update - PF Function Update Ramrod
+ *
+ * This ramrod updates function-related parameters. Every parameter can be
+ * updated independently, according to configuration flags.
+ *
+ * @note Final phase API.
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+
+enum _ecore_status_t ecore_sp_pf_update(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_sp_pf_stop - PF Function Stop Ramrod
+ *
+ * This ramrod is sent to close a Physical Function (PF). It is the last ramrod
+ * sent and the last completion written to the PFs Event Ring. This ramrod also
+ * deletes the context for the Slowhwfn connection on this PF.
+ *
+ * @note Not required for first packet.
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+
+enum _ecore_status_t ecore_sp_pf_stop(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_sp_heartbeat_ramrod - Send empty Ramrod
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+
+enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn);
+
+#endif /*__ECORE_SP_COMMANDS_H__*/
diff --git a/drivers/net/qede/base/ecore_spq.c b/drivers/net/qede/base/ecore_spq.c
new file mode 100644 (file)
index 0000000..b7ba4dd
--- /dev/null
@@ -0,0 +1,937 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+#include "reg_addr.h"
+#include "ecore_gtt_reg_addr.h"
+#include "ecore_hsi_common.h"
+#include "ecore.h"
+#include "ecore_sp_api.h"
+#include "ecore_spq.h"
+#include "ecore_iro.h"
+#include "ecore_init_fw_funcs.h"
+#include "ecore_cxt.h"
+#include "ecore_int.h"
+#include "ecore_dev_api.h"
+#include "ecore_mcp.h"
+#include "ecore_hw.h"
+
+/***************************************************************************
+ * Structures & Definitions
+ ***************************************************************************/
+
+#define SPQ_HIGH_PRI_RESERVE_DEFAULT   (1)
+#define SPQ_BLOCK_SLEEP_LENGTH         (1000)
+
+/***************************************************************************
+ * Blocking Imp. (BLOCK/EBLOCK mode)
+ ***************************************************************************/
+static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn,
+                                 void *cookie,
+                                 union event_ring_data *data,
+                                 u8 fw_return_code)
+{
+       struct ecore_spq_comp_done *comp_done;
+
+       comp_done = (struct ecore_spq_comp_done *)cookie;
+
+       comp_done->done = 0x1;
+       comp_done->fw_return_code = fw_return_code;
+
+       /* make update visible to waiting thread */
+       OSAL_SMP_WMB(p_hwfn->p_dev);
+}
+
+static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
+                                           struct ecore_spq_entry *p_ent,
+                                           u8 *p_fw_ret)
+{
+       int sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
+       struct ecore_spq_comp_done *comp_done;
+       enum _ecore_status_t rc;
+
+       comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
+       while (sleep_count) {
+               OSAL_POLL_MODE_DPC(p_hwfn);
+               /* validate we receive completion update */
+               OSAL_SMP_RMB(p_hwfn->p_dev);
+               if (comp_done->done == 1) {
+                       if (p_fw_ret)
+                               *p_fw_ret = comp_done->fw_return_code;
+                       return ECORE_SUCCESS;
+               }
+               OSAL_MSLEEP(5);
+               sleep_count--;
+       }
+
+       DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
+       rc = ecore_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
+       if (rc != ECORE_SUCCESS)
+               DP_NOTICE(p_hwfn, true, "MCP drain failed\n");
+
+       /* Retry after drain */
+       sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
+       while (sleep_count) {
+               /* validate we receive completion update */
+               OSAL_SMP_RMB(p_hwfn->p_dev);
+               if (comp_done->done == 1) {
+                       if (p_fw_ret)
+                               *p_fw_ret = comp_done->fw_return_code;
+                       return ECORE_SUCCESS;
+               }
+               OSAL_MSLEEP(5);
+               sleep_count--;
+       }
+
+       if (comp_done->done == 1) {
+               if (p_fw_ret)
+                       *p_fw_ret = comp_done->fw_return_code;
+               return ECORE_SUCCESS;
+       }
+
+       DP_NOTICE(p_hwfn, true,
+                 "Ramrod is stuck [CID %08x cmd %02x proto %02x echo %04x]\n",
+                 OSAL_LE32_TO_CPU(p_ent->elem.hdr.cid),
+                 p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id,
+                 OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
+
+       ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL);
+
+       return ECORE_BUSY;
+}
+
+/***************************************************************************
+ * SPQ entries inner API
+ ***************************************************************************/
+static enum _ecore_status_t
+ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry *p_ent)
+{
+       p_ent->flags = 0;
+
+       switch (p_ent->comp_mode) {
+       case ECORE_SPQ_MODE_EBLOCK:
+       case ECORE_SPQ_MODE_BLOCK:
+               p_ent->comp_cb.function = ecore_spq_blocking_cb;
+               break;
+       case ECORE_SPQ_MODE_CB:
+               break;
+       default:
+               DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
+                         p_ent->comp_mode);
+               return ECORE_INVAL;
+       }
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+                  "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x]"
+                  " Data pointer: [%08x:%08x] Completion Mode: %s\n",
+                  p_ent->elem.hdr.cid, p_ent->elem.hdr.cmd_id,
+                  p_ent->elem.hdr.protocol_id,
+                  p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo,
+                  D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
+                          ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
+                          "MODE_CB"));
+
+       return ECORE_SUCCESS;
+}
+
+/***************************************************************************
+ * HSI access
+ ***************************************************************************/
+static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
+                                   struct ecore_spq *p_spq)
+{
+       u16 pq;
+       struct ecore_cxt_info cxt_info;
+       struct core_conn_context *p_cxt;
+       union ecore_qm_pq_params pq_params;
+       enum _ecore_status_t rc;
+
+       cxt_info.iid = p_spq->cid;
+
+       rc = ecore_cxt_get_cid_info(p_hwfn, &cxt_info);
+
+       if (rc < 0) {
+               DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d",
+                         p_spq->cid);
+               return;
+       }
+
+       p_cxt = cxt_info.p_cxt;
+
+       SET_FIELD(p_cxt->xstorm_ag_context.flags10,
+                 XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
+       SET_FIELD(p_cxt->xstorm_ag_context.flags1,
+                 XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
+       /* SET_FIELD(p_cxt->xstorm_ag_context.flags10,
+        *           XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1);
+        */
+       SET_FIELD(p_cxt->xstorm_ag_context.flags9,
+                 XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
+
+       /* CDU validation - FIXME currently disabled */
+
+       /* QM physical queue */
+       OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
+       pq_params.core.tc = LB_TC;
+       pq = ecore_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
+       p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(pq);
+
+       p_cxt->xstorm_st_context.spq_base_lo =
+           DMA_LO_LE(p_spq->chain.p_phys_addr);
+       p_cxt->xstorm_st_context.spq_base_hi =
+           DMA_HI_LE(p_spq->chain.p_phys_addr);
+
+       p_cxt->xstorm_st_context.consolid_base_addr.lo =
+           DMA_LO_LE(p_hwfn->p_consq->chain.p_phys_addr);
+       p_cxt->xstorm_st_context.consolid_base_addr.hi =
+           DMA_HI_LE(p_hwfn->p_consq->chain.p_phys_addr);
+}
+
+static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
+                                             struct ecore_spq *p_spq,
+                                             struct ecore_spq_entry *p_ent)
+{
+       struct ecore_chain *p_chain = &p_hwfn->p_spq->chain;
+       u16 echo = ecore_chain_get_prod_idx(p_chain);
+       struct slow_path_element *elem;
+       struct core_db_data db;
+
+       p_ent->elem.hdr.echo = OSAL_CPU_TO_LE16(echo);
+       elem = ecore_chain_produce(p_chain);
+       if (!elem) {
+               DP_NOTICE(p_hwfn, true, "Failed to produce from SPQ chain\n");
+               return ECORE_INVAL;
+       }
+
+       *elem = p_ent->elem;    /* struct assignment */
+
+       /* send a doorbell on the slow hwfn session */
+       OSAL_MEMSET(&db, 0, sizeof(db));
+       SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
+       SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
+       SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
+                 DQ_XCM_CORE_SPQ_PROD_CMD);
+       db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
+
+       /* validate producer is up to-date */
+       OSAL_RMB(p_hwfn->p_dev);
+
+       db.spq_prod = OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain));
+
+       /* do not reorder */
+       OSAL_BARRIER(p_hwfn->p_dev);
+
+       DOORBELL(p_hwfn, DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
+
+       /* make sure doorbell is rang */
+       OSAL_MMIOWB(p_hwfn->p_dev);
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+                  "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x"
+                  " agg_params: %02x, prod: %04x\n",
+                  DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY), p_spq->cid, db.params,
+                  db.agg_flags, ecore_chain_get_prod_idx(p_chain));
+
+       return ECORE_SUCCESS;
+}
+
+/***************************************************************************
+ * Asynchronous events
+ ***************************************************************************/
+
+static enum _ecore_status_t
+ecore_async_event_completion(struct ecore_hwfn *p_hwfn,
+                            struct event_ring_entry *p_eqe)
+{
+       switch (p_eqe->protocol_id) {
+       case PROTOCOLID_COMMON:
+               return ECORE_SUCCESS;
+       default:
+               DP_NOTICE(p_hwfn,
+                         true, "Unknown Async completion for protocol: %d\n",
+                         p_eqe->protocol_id);
+               return ECORE_INVAL;
+       }
+}
+
+/***************************************************************************
+ * EQ API
+ ***************************************************************************/
+void ecore_eq_prod_update(struct ecore_hwfn *p_hwfn, u16 prod)
+{
+       u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
+           USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
+
+       REG_WR16(p_hwfn, addr, prod);
+
+       /* keep prod updates ordered */
+       OSAL_MMIOWB(p_hwfn->p_dev);
+}
+
+enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
+                                        void *cookie)
+{
+       struct ecore_eq *p_eq = cookie;
+       struct ecore_chain *p_chain = &p_eq->chain;
+       enum _ecore_status_t rc = 0;
+
+       /* take a snapshot of the FW consumer */
+       u16 fw_cons_idx = OSAL_LE16_TO_CPU(*p_eq->p_fw_cons);
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
+
+       /* Need to guarantee the fw_cons index we use points to a usuable
+        * element (to comply with our chain), so our macros would comply
+        */
+       if ((fw_cons_idx & ecore_chain_get_usable_per_page(p_chain)) ==
+           ecore_chain_get_usable_per_page(p_chain)) {
+               fw_cons_idx += ecore_chain_get_unusable_per_page(p_chain);
+       }
+
+       /* Complete current segment of eq entries */
+       while (fw_cons_idx != ecore_chain_get_cons_idx(p_chain)) {
+               struct event_ring_entry *p_eqe = ecore_chain_consume(p_chain);
+               if (!p_eqe) {
+                       rc = ECORE_INVAL;
+                       break;
+               }
+
+               DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+                               "op %x prot %x res0 %x echo %x "
+                               "fwret %x flags %x\n", p_eqe->opcode,
+                          p_eqe->protocol_id,  /* Event Protocol ID */
+                          p_eqe->reserved0,    /* Reserved */
+                          OSAL_LE16_TO_CPU(p_eqe->echo),
+                          p_eqe->fw_return_code,       /* FW return code for SP
+                                                        * ramrods
+                                                        */
+                          p_eqe->flags);
+
+               if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
+                       if (ecore_async_event_completion(p_hwfn, p_eqe))
+                               rc = ECORE_INVAL;
+               } else if (ecore_spq_completion(p_hwfn,
+                                               p_eqe->echo,
+                                               p_eqe->fw_return_code,
+                                               &p_eqe->data)) {
+                       rc = ECORE_INVAL;
+               }
+
+               ecore_chain_recycle_consumed(p_chain);
+       }
+
+       ecore_eq_prod_update(p_hwfn, ecore_chain_get_prod_idx(p_chain));
+
+       return rc;
+}
+
+struct ecore_eq *ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
+{
+       struct ecore_eq *p_eq;
+
+       /* Allocate EQ struct */
+       p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_eq));
+       if (!p_eq) {
+               DP_NOTICE(p_hwfn, true,
+                         "Failed to allocate `struct ecore_eq'\n");
+               return OSAL_NULL;
+       }
+
+       /* Allocate and initialize EQ chain */
+       if (ecore_chain_alloc(p_hwfn->p_dev,
+                             ECORE_CHAIN_USE_TO_PRODUCE,
+                             ECORE_CHAIN_MODE_PBL,
+                             ECORE_CHAIN_CNT_TYPE_U16,
+                             num_elem,
+                             sizeof(union event_ring_element), &p_eq->chain)) {
+               DP_NOTICE(p_hwfn, true, "Failed to allocate eq chain");
+               goto eq_allocate_fail;
+       }
+
+       /* register EQ completion on the SP SB */
+       ecore_int_register_cb(p_hwfn,
+                             ecore_eq_completion,
+                             p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
+
+       return p_eq;
+
+eq_allocate_fail:
+       ecore_eq_free(p_hwfn, p_eq);
+       return OSAL_NULL;
+}
+
+void ecore_eq_setup(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq)
+{
+       ecore_chain_reset(&p_eq->chain);
+}
+
+void ecore_eq_free(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq)
+{
+       if (!p_eq)
+               return;
+       ecore_chain_free(p_hwfn->p_dev, &p_eq->chain);
+       OSAL_FREE(p_hwfn->p_dev, p_eq);
+}
+
+/***************************************************************************
+* CQE API - manipulate EQ functionality
+***************************************************************************/
+static enum _ecore_status_t ecore_cqe_completion(struct ecore_hwfn *p_hwfn,
+                                                struct eth_slow_path_rx_cqe
+                                                *cqe,
+                                                enum protocol_type protocol)
+{
+       /* @@@tmp - it's possible we'll eventually want to handle some
+        * actual commands that can arrive here, but for now this is only
+        * used to complete the ramrod using the echo value on the cqe
+        */
+       return ecore_spq_completion(p_hwfn, cqe->echo, 0, OSAL_NULL);
+}
+
+enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
+                                             struct eth_slow_path_rx_cqe *cqe)
+{
+       enum _ecore_status_t rc;
+
+       rc = ecore_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
+       if (rc) {
+               DP_NOTICE(p_hwfn, true,
+                         "Failed to handle RXQ CQE [cmd 0x%02x]\n",
+                         cqe->ramrod_cmd_id);
+       }
+
+       return rc;
+}
+
+/***************************************************************************
+ * Slow hwfn Queue (spq)
+ ***************************************************************************/
+void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_spq_entry *p_virt = OSAL_NULL;
+       struct ecore_spq *p_spq = p_hwfn->p_spq;
+       dma_addr_t p_phys = 0;
+       u32 i, capacity;
+
+       OSAL_LIST_INIT(&p_spq->pending);
+       OSAL_LIST_INIT(&p_spq->completion_pending);
+       OSAL_LIST_INIT(&p_spq->free_pool);
+       OSAL_LIST_INIT(&p_spq->unlimited_pending);
+       OSAL_SPIN_LOCK_INIT(&p_spq->lock);
+
+       /* SPQ empty pool */
+       p_phys = p_spq->p_phys + OFFSETOF(struct ecore_spq_entry, ramrod);
+       p_virt = p_spq->p_virt;
+
+       capacity = ecore_chain_get_capacity(&p_spq->chain);
+       for (i = 0; i < capacity; i++) {
+               p_virt->elem.data_ptr.hi = DMA_HI_LE(p_phys);
+               p_virt->elem.data_ptr.lo = DMA_LO_LE(p_phys);
+
+               OSAL_LIST_PUSH_TAIL(&p_virt->list, &p_spq->free_pool);
+
+               p_virt++;
+               p_phys += sizeof(struct ecore_spq_entry);
+       }
+
+       /* Statistics */
+       p_spq->normal_count = 0;
+       p_spq->comp_count = 0;
+       p_spq->comp_sent_count = 0;
+       p_spq->unlimited_pending_count = 0;
+
+       OSAL_MEM_ZERO(p_spq->p_comp_bitmap,
+                     SPQ_COMP_BMAP_SIZE * sizeof(unsigned long));
+       p_spq->comp_bitmap_idx = 0;
+
+       /* SPQ cid, cannot fail */
+       ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
+       ecore_spq_hw_initialize(p_hwfn, p_spq);
+
+       /* reset the chain itself */
+       ecore_chain_reset(&p_spq->chain);
+}
+
+enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_spq_entry *p_virt = OSAL_NULL;
+       struct ecore_spq *p_spq = OSAL_NULL;
+       dma_addr_t p_phys = 0;
+       u32 capacity;
+
+       /* SPQ struct */
+       p_spq =
+           OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq));
+       if (!p_spq) {
+               DP_NOTICE(p_hwfn, true,
+                         "Failed to allocate `struct ecore_spq'");
+               return ECORE_NOMEM;
+       }
+
+       /* SPQ ring  */
+       if (ecore_chain_alloc(p_hwfn->p_dev, ECORE_CHAIN_USE_TO_PRODUCE,
+                       ECORE_CHAIN_MODE_SINGLE, ECORE_CHAIN_CNT_TYPE_U16, 0,
+                       /* N/A when the mode is SINGLE */
+                       sizeof(struct slow_path_element), &p_spq->chain)) {
+               DP_NOTICE(p_hwfn, true, "Failed to allocate spq chain");
+               goto spq_allocate_fail;
+       }
+
+       /* allocate and fill the SPQ elements (incl. ramrod data list) */
+       capacity = ecore_chain_get_capacity(&p_spq->chain);
+       p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys,
+                                        capacity *
+                                        sizeof(struct ecore_spq_entry));
+       if (!p_virt)
+               goto spq_allocate_fail;
+
+       p_spq->p_virt = p_virt;
+       p_spq->p_phys = p_phys;
+
+       OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock);
+
+       p_hwfn->p_spq = p_spq;
+       return ECORE_SUCCESS;
+
+spq_allocate_fail:
+       ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
+       OSAL_FREE(p_hwfn->p_dev, p_spq);
+       return ECORE_NOMEM;
+}
+
+void ecore_spq_free(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_spq *p_spq = p_hwfn->p_spq;
+       u32 capacity;
+
+       if (!p_spq)
+               return;
+
+       if (p_spq->p_virt) {
+               capacity = ecore_chain_get_capacity(&p_spq->chain);
+               OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+                                      p_spq->p_virt,
+                                      p_spq->p_phys,
+                                      capacity *
+                                      sizeof(struct ecore_spq_entry));
+       }
+
+       ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
+       OSAL_SPIN_LOCK_DEALLOC(&p_spq->lock);
+       OSAL_FREE(p_hwfn->p_dev, p_spq);
+}
+
+enum _ecore_status_t
+ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent)
+{
+       struct ecore_spq *p_spq = p_hwfn->p_spq;
+       struct ecore_spq_entry *p_ent = OSAL_NULL;
+
+       OSAL_SPIN_LOCK(&p_spq->lock);
+
+       if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
+               p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
+                                   sizeof(struct ecore_spq_entry));
+               if (!p_ent) {
+                       OSAL_SPIN_UNLOCK(&p_spq->lock);
+                       DP_NOTICE(p_hwfn, true,
+                                 "Failed to allocate an SPQ entry"
+                                 " for a pending ramrod\n");
+                       return ECORE_NOMEM;
+               }
+               p_ent->queue = &p_spq->unlimited_pending;
+       } else {
+               p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
+                                             struct ecore_spq_entry, list);
+               OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->free_pool);
+               p_ent->queue = &p_spq->pending;
+       }
+
+       *pp_ent = p_ent;
+
+       OSAL_SPIN_UNLOCK(&p_spq->lock);
+
+       return ECORE_SUCCESS;
+}
+
+/* Locked variant; Should be called while the SPQ lock is taken */
+static void __ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
+                                    struct ecore_spq_entry *p_ent)
+{
+       OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_hwfn->p_spq->free_pool);
+}
+
+void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
+                           struct ecore_spq_entry *p_ent)
+{
+       OSAL_SPIN_LOCK(&p_hwfn->p_spq->lock);
+       __ecore_spq_return_entry(p_hwfn, p_ent);
+       OSAL_SPIN_UNLOCK(&p_hwfn->p_spq->lock);
+}
+
+/**
+ * @brief ecore_spq_add_entry - adds a new entry to the pending
+ *        list. Should be used while lock is being held.
+ *
+ * Addes an entry to the pending list is there is room (en empty
+ * element is available in the free_pool), or else places the
+ * entry in the unlimited_pending pool.
+ *
+ * @param p_hwfn
+ * @param p_ent
+ * @param priority
+ *
+ * @return enum _ecore_status_t
+ */
+static enum _ecore_status_t
+ecore_spq_add_entry(struct ecore_hwfn *p_hwfn,
+                   struct ecore_spq_entry *p_ent, enum spq_priority priority)
+{
+       struct ecore_spq *p_spq = p_hwfn->p_spq;
+
+       if (p_ent->queue == &p_spq->unlimited_pending) {
+               if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
+                       OSAL_LIST_PUSH_TAIL(&p_ent->list,
+                                           &p_spq->unlimited_pending);
+                       p_spq->unlimited_pending_count++;
+
+                       return ECORE_SUCCESS;
+               }
+
+               struct ecore_spq_entry *p_en2;
+
+               p_en2 = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
+                                             struct ecore_spq_entry,
+                                             list);
+               OSAL_LIST_REMOVE_ENTRY(&p_en2->list, &p_spq->free_pool);
+
+               /* Copy the ring element physical pointer to the new
+                * entry, since we are about to override the entire ring
+                * entry and don't want to lose the pointer.
+                */
+               p_ent->elem.data_ptr = p_en2->elem.data_ptr;
+
+               /* Setting the cookie to the comp_done of the
+                * new element.
+                */
+               if (p_ent->comp_cb.cookie == &p_ent->comp_done)
+                       p_ent->comp_cb.cookie = &p_en2->comp_done;
+
+               *p_en2 = *p_ent;
+
+               OSAL_FREE(p_hwfn->p_dev, p_ent);
+
+               p_ent = p_en2;
+       }
+
+       /* entry is to be placed in 'pending' queue */
+       switch (priority) {
+       case ECORE_SPQ_PRIORITY_NORMAL:
+               OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->pending);
+               p_spq->normal_count++;
+               break;
+       case ECORE_SPQ_PRIORITY_HIGH:
+               OSAL_LIST_PUSH_HEAD(&p_ent->list, &p_spq->pending);
+               p_spq->high_count++;
+               break;
+       default:
+               return ECORE_INVAL;
+       }
+
+       return ECORE_SUCCESS;
+}
+
+/***************************************************************************
+ * Accessor
+ ***************************************************************************/
+
+u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn)
+{
+       if (!p_hwfn->p_spq)
+               return 0xffffffff;      /* illegal */
+       return p_hwfn->p_spq->cid;
+}
+
+/***************************************************************************
+ * Posting new Ramrods
+ ***************************************************************************/
+
+static enum _ecore_status_t ecore_spq_post_list(struct ecore_hwfn *p_hwfn,
+                                               osal_list_t *head,
+                                               u32 keep_reserve)
+{
+       struct ecore_spq *p_spq = p_hwfn->p_spq;
+       enum _ecore_status_t rc;
+
+       /* TODO - implementation might be wasteful; will always keep room
+        * for an additional high priority ramrod (even if one is already
+        * pending FW)
+        */
+       while (ecore_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
+              !OSAL_LIST_IS_EMPTY(head)) {
+               struct ecore_spq_entry *p_ent =
+                   OSAL_LIST_FIRST_ENTRY(head, struct ecore_spq_entry, list);
+               OSAL_LIST_REMOVE_ENTRY(&p_ent->list, head);
+               OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->completion_pending);
+               p_spq->comp_sent_count++;
+
+               rc = ecore_spq_hw_post(p_hwfn, p_spq, p_ent);
+               if (rc) {
+                       OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
+                                              &p_spq->completion_pending);
+                       __ecore_spq_return_entry(p_hwfn, p_ent);
+                       return rc;
+               }
+       }
+
+       return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_spq_pend_post(struct ecore_hwfn *p_hwfn)
+{
+       enum _ecore_status_t rc = ECORE_NOTIMPL;
+       struct ecore_spq *p_spq = p_hwfn->p_spq;
+       struct ecore_spq_entry *p_ent = OSAL_NULL;
+
+       while (!OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
+               if (OSAL_LIST_IS_EMPTY(&p_spq->unlimited_pending))
+                       break;
+
+               p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->unlimited_pending,
+                                             struct ecore_spq_entry, list);
+               if (!p_ent)
+                       return ECORE_INVAL;
+
+               OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->unlimited_pending);
+
+               ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
+       }
+
+       rc = ecore_spq_post_list(p_hwfn,
+                                &p_spq->pending, SPQ_HIGH_PRI_RESERVE_DEFAULT);
+       if (rc)
+               return rc;
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
+                                   struct ecore_spq_entry *p_ent,
+                                   u8 *fw_return_code)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       struct ecore_spq *p_spq = p_hwfn ? p_hwfn->p_spq : OSAL_NULL;
+       bool b_ret_ent = true;
+
+       if (!p_hwfn)
+               return ECORE_INVAL;
+
+       if (!p_ent) {
+               DP_NOTICE(p_hwfn, true, "Got a NULL pointer\n");
+               return ECORE_INVAL;
+       }
+
+       if (p_hwfn->p_dev->recov_in_prog) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+                          "Recovery is in progress -> skip spq post"
+                          " [cmd %02x protocol %02x]",
+                          p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
+               /* Return success to let the flows to be completed successfully
+                * w/o any error handling.
+                */
+               return ECORE_SUCCESS;
+       }
+
+       OSAL_SPIN_LOCK(&p_spq->lock);
+
+       /* Complete the entry */
+       rc = ecore_spq_fill_entry(p_hwfn, p_ent);
+
+       /* Check return value after LOCK is taken for cleaner error flow */
+       if (rc)
+               goto spq_post_fail;
+
+       /* Add the request to the pending queue */
+       rc = ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
+       if (rc)
+               goto spq_post_fail;
+
+       rc = ecore_spq_pend_post(p_hwfn);
+       if (rc) {
+               /* Since it's possible that pending failed for a different
+                * entry [although unlikely], the failed entry was already
+                * dealt with; No need to return it here.
+                */
+               b_ret_ent = false;
+               goto spq_post_fail;
+       }
+
+       OSAL_SPIN_UNLOCK(&p_spq->lock);
+
+       if (p_ent->comp_mode == ECORE_SPQ_MODE_EBLOCK) {
+               /* For entries in ECORE BLOCK mode, the completion code cannot
+                * perform the necessary cleanup - if it did, we couldn't
+                * access p_ent here to see whether it's successful or not.
+                * Thus, after gaining the answer perform the cleanup here.
+                */
+               rc = ecore_spq_block(p_hwfn, p_ent, fw_return_code);
+               if (rc)
+                       goto spq_post_fail2;
+
+               /* return to pool */
+               ecore_spq_return_entry(p_hwfn, p_ent);
+       }
+       return rc;
+
+spq_post_fail2:
+       OSAL_SPIN_LOCK(&p_spq->lock);
+       OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->completion_pending);
+       ecore_chain_return_produced(&p_spq->chain);
+
+spq_post_fail:
+       /* return to the free pool */
+       if (b_ret_ent)
+               __ecore_spq_return_entry(p_hwfn, p_ent);
+       OSAL_SPIN_UNLOCK(&p_spq->lock);
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
+                                         __le16 echo,
+                                         u8 fw_return_code,
+                                         union event_ring_data *p_data)
+{
+       struct ecore_spq *p_spq;
+       struct ecore_spq_entry *p_ent = OSAL_NULL;
+       struct ecore_spq_entry *tmp;
+       struct ecore_spq_entry *found = OSAL_NULL;
+       enum _ecore_status_t rc;
+
+       if (!p_hwfn)
+               return ECORE_INVAL;
+
+       p_spq = p_hwfn->p_spq;
+       if (!p_spq)
+               return ECORE_INVAL;
+
+       OSAL_SPIN_LOCK(&p_spq->lock);
+       OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_ent,
+                                     tmp,
+                                     &p_spq->completion_pending,
+                                     list, struct ecore_spq_entry) {
+               if (p_ent->elem.hdr.echo == echo) {
+                       OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
+                                              &p_spq->completion_pending);
+
+                       /* Avoid overriding of SPQ entries when getting
+                        * out-of-order completions, by marking the completions
+                        * in a bitmap and increasing the chain consumer only
+                        * for the first successive completed entries.
+                        */
+                       SPQ_COMP_BMAP_SET_BIT(p_spq, echo);
+                       while (SPQ_COMP_BMAP_TEST_BIT(p_spq,
+                                                     p_spq->comp_bitmap_idx)) {
+                               SPQ_COMP_BMAP_CLEAR_BIT(p_spq,
+                                                       p_spq->comp_bitmap_idx);
+                               p_spq->comp_bitmap_idx++;
+                               ecore_chain_return_produced(&p_spq->chain);
+                       }
+
+                       p_spq->comp_count++;
+                       found = p_ent;
+                       break;
+               }
+
+               /* This is debug and should be relatively uncommon - depends
+                * on scenarios which have mutliple per-PF sent ramrods.
+                */
+               DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+                          "Got completion for echo %04x - doesn't match"
+                          " echo %04x in completion pending list\n",
+                          OSAL_LE16_TO_CPU(echo),
+                          OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
+       }
+
+       /* Release lock before callback, as callback may post
+        * an additional ramrod.
+        */
+       OSAL_SPIN_UNLOCK(&p_spq->lock);
+
+       if (!found) {
+               DP_NOTICE(p_hwfn, true,
+                         "Failed to find an entry this"
+                         " EQE [echo %04x] completes\n",
+                         OSAL_LE16_TO_CPU(echo));
+               return ECORE_EXISTS;
+       }
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+                  "Complete EQE [echo %04x]: func %p cookie %p)\n",
+                  OSAL_LE16_TO_CPU(echo),
+                  p_ent->comp_cb.function, p_ent->comp_cb.cookie);
+       if (found->comp_cb.function)
+               found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
+                                       fw_return_code);
+
+       if (found->comp_mode != ECORE_SPQ_MODE_EBLOCK) {
+               /* EBLOCK is responsible for freeing its own entry */
+               ecore_spq_return_entry(p_hwfn, found);
+       }
+
+       /* Attempt to post pending requests */
+       OSAL_SPIN_LOCK(&p_spq->lock);
+       rc = ecore_spq_pend_post(p_hwfn);
+       OSAL_SPIN_UNLOCK(&p_spq->lock);
+
+       return rc;
+}
+
+struct ecore_consq *ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_consq *p_consq;
+
+       /* Allocate ConsQ struct */
+       p_consq =
+           OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_consq));
+       if (!p_consq) {
+               DP_NOTICE(p_hwfn, true,
+                         "Failed to allocate `struct ecore_consq'\n");
+               return OSAL_NULL;
+       }
+
+       /* Allocate and initialize EQ chain */
+       if (ecore_chain_alloc(p_hwfn->p_dev,
+                             ECORE_CHAIN_USE_TO_PRODUCE,
+                             ECORE_CHAIN_MODE_PBL,
+                             ECORE_CHAIN_CNT_TYPE_U16,
+                             ECORE_CHAIN_PAGE_SIZE / 0x80,
+                             0x80, &p_consq->chain)) {
+               DP_NOTICE(p_hwfn, true, "Failed to allocate consq chain");
+               goto consq_allocate_fail;
+       }
+
+       return p_consq;
+
+consq_allocate_fail:
+       ecore_consq_free(p_hwfn, p_consq);
+       return OSAL_NULL;
+}
+
+void ecore_consq_setup(struct ecore_hwfn *p_hwfn, struct ecore_consq *p_consq)
+{
+       ecore_chain_reset(&p_consq->chain);
+}
+
+void ecore_consq_free(struct ecore_hwfn *p_hwfn, struct ecore_consq *p_consq)
+{
+       if (!p_consq)
+               return;
+       ecore_chain_free(p_hwfn->p_dev, &p_consq->chain);
+       OSAL_FREE(p_hwfn->p_dev, p_consq);
+}
diff --git a/drivers/net/qede/base/ecore_spq.h b/drivers/net/qede/base/ecore_spq.h
new file mode 100644 (file)
index 0000000..5c16865
--- /dev/null
@@ -0,0 +1,284 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_SPQ_H__
+#define __ECORE_SPQ_H__
+
+#include "ecore_hsi_common.h"
+#include "ecore_status.h"
+#include "ecore_hsi_eth.h"
+#include "ecore_chain.h"
+#include "ecore_sp_api.h"
+
+union ramrod_data {
+       struct pf_start_ramrod_data pf_start;
+       struct pf_update_ramrod_data pf_update;
+       struct rx_queue_start_ramrod_data rx_queue_start;
+       struct rx_queue_update_ramrod_data rx_queue_update;
+       struct rx_queue_stop_ramrod_data rx_queue_stop;
+       struct tx_queue_start_ramrod_data tx_queue_start;
+       struct tx_queue_stop_ramrod_data tx_queue_stop;
+       struct vport_start_ramrod_data vport_start;
+       struct vport_stop_ramrod_data vport_stop;
+       struct vport_update_ramrod_data vport_update;
+       struct core_rx_start_ramrod_data core_rx_queue_start;
+       struct core_rx_stop_ramrod_data core_rx_queue_stop;
+       struct core_tx_start_ramrod_data core_tx_queue_start;
+       struct core_tx_stop_ramrod_data core_tx_queue_stop;
+       struct vport_filter_update_ramrod_data vport_filter_update;
+
+       struct vf_start_ramrod_data vf_start;
+       struct vf_stop_ramrod_data vf_stop;
+};
+
+#define EQ_MAX_CREDIT  0xffffffff
+
+enum spq_priority {
+       ECORE_SPQ_PRIORITY_NORMAL,
+       ECORE_SPQ_PRIORITY_HIGH,
+};
+
+union ecore_spq_req_comp {
+       struct ecore_spq_comp_cb cb;
+       u64 *done_addr;
+};
+
+/* SPQ_MODE_EBLOCK */
+struct ecore_spq_comp_done {
+       u64 done;
+       u8 fw_return_code;
+};
+
+struct ecore_spq_entry {
+       osal_list_entry_t list;
+
+       u8 flags;
+
+       /* HSI slow path element */
+       struct slow_path_element elem;
+
+       union ramrod_data ramrod;
+
+       enum spq_priority priority;
+
+       /* pending queue for this entry */
+       osal_list_t *queue;
+
+       enum spq_mode comp_mode;
+       struct ecore_spq_comp_cb comp_cb;
+       struct ecore_spq_comp_done comp_done;   /* SPQ_MODE_EBLOCK */
+};
+
+struct ecore_eq {
+       struct ecore_chain chain;
+       u8 eq_sb_index;         /* index within the SB */
+       __le16 *p_fw_cons;      /* ptr to index value */
+};
+
+struct ecore_consq {
+       struct ecore_chain chain;
+};
+
+struct ecore_spq {
+       osal_spinlock_t lock;
+
+       osal_list_t unlimited_pending;
+       osal_list_t pending;
+       osal_list_t completion_pending;
+       osal_list_t free_pool;
+
+       struct ecore_chain chain;
+
+       /* allocated dma-able memory for spq entries (+ramrod data) */
+       dma_addr_t p_phys;
+       struct ecore_spq_entry *p_virt;
+
+       /* Bitmap for handling out-of-order completions */
+#define SPQ_RING_SIZE                                          \
+       (CORE_SPQE_PAGE_SIZE_BYTES / sizeof(struct slow_path_element))
+#define SPQ_COMP_BMAP_SIZE                                     \
+(SPQ_RING_SIZE / (sizeof(unsigned long) * 8 /* BITS_PER_LONG */))
+       unsigned long p_comp_bitmap[SPQ_COMP_BMAP_SIZE];
+       u8 comp_bitmap_idx;
+#define SPQ_COMP_BMAP_SET_BIT(p_spq, idx)                      \
+(OSAL_SET_BIT(((idx) % SPQ_RING_SIZE), (p_spq)->p_comp_bitmap))
+
+#define SPQ_COMP_BMAP_CLEAR_BIT(p_spq, idx)                    \
+(OSAL_CLEAR_BIT(((idx) % SPQ_RING_SIZE), (p_spq)->p_comp_bitmap))
+
+#define SPQ_COMP_BMAP_TEST_BIT(p_spq, idx)                     \
+(OSAL_TEST_BIT(((idx) % SPQ_RING_SIZE), (p_spq)->p_comp_bitmap))
+
+       /* Statistics */
+       u32 unlimited_pending_count;
+       u32 normal_count;
+       u32 high_count;
+       u32 comp_sent_count;
+       u32 comp_count;
+
+       u32 cid;
+};
+
+struct ecore_port;
+struct ecore_hwfn;
+
+/**
+ * @brief ecore_spq_post - Posts a Slow hwfn request to FW, or lacking that
+ *        Pends it to the future list.
+ *
+ * @param p_hwfn
+ * @param p_req
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
+                                   struct ecore_spq_entry *p_ent,
+                                   u8 *fw_return_code);
+
+/**
+ * @brief ecore_spq_allocate - Alloocates & initializes the SPQ and EQ.
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_spq_setup - Reset the SPQ to its start state.
+ *
+ * @param p_hwfn
+ */
+void ecore_spq_setup(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_spq_deallocate - Deallocates the given SPQ struct.
+ *
+ * @param p_hwfn
+ */
+void ecore_spq_free(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_spq_get_entry - Obtain an entrry from the spq
+ *        free pool list.
+ *
+ *
+ *
+ * @param p_hwfn
+ * @param pp_ent
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent);
+
+/**
+ * @brief ecore_spq_return_entry - Return an entry to spq free
+ *                                 pool list
+ *
+ * @param p_hwfn
+ * @param p_ent
+ */
+void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
+                           struct ecore_spq_entry *p_ent);
+/**
+ * @brief ecore_eq_allocate - Allocates & initializes an EQ struct
+ *
+ * @param p_hwfn
+ * @param num_elem number of elements in the eq
+ *
+ * @return struct ecore_eq* - a newly allocated structure; NULL upon error.
+ */
+struct ecore_eq *ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem);
+
+/**
+ * @brief ecore_eq_setup - Reset the SPQ to its start state.
+ *
+ * @param p_hwfn
+ * @param p_eq
+ */
+void ecore_eq_setup(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq);
+
+/**
+ * @brief ecore_eq_deallocate - deallocates the given EQ struct.
+ *
+ * @param p_hwfn
+ * @param p_eq
+ */
+void ecore_eq_free(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq);
+
+/**
+ * @brief ecore_eq_prod_update - update the FW with default EQ producer
+ *
+ * @param p_hwfn
+ * @param prod
+ */
+void ecore_eq_prod_update(struct ecore_hwfn *p_hwfn, u16 prod);
+
+/**
+ * @brief ecore_eq_completion - Completes currently pending EQ elements
+ *
+ * @param p_hwfn
+ * @param cookie
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
+                                        void *cookie);
+
+/**
+ * @brief ecore_spq_completion - Completes a single event
+ *
+ * @param p_hwfn
+ * @param echo - echo value from cookie (used for determining completion)
+ * @param p_data - data from cookie (used in callback function if applicable)
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
+                                         __le16 echo,
+                                         u8 fw_return_code,
+                                         union event_ring_data *p_data);
+
+/**
+ * @brief ecore_spq_get_cid - Given p_hwfn, return cid for the hwfn's SPQ
+ *
+ * @param p_hwfn
+ *
+ * @return u32 - SPQ CID
+ */
+u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_consq_alloc - Allocates & initializes an ConsQ
+ *        struct
+ *
+ * @param p_hwfn
+ *
+ * @return struct ecore_eq* - a newly allocated structure; NULL upon error.
+ */
+struct ecore_consq *ecore_consq_alloc(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_consq_setup - Reset the ConsQ to its start
+ *        state.
+ *
+ * @param p_hwfn
+ * @param p_eq
+ */
+void ecore_consq_setup(struct ecore_hwfn *p_hwfn, struct ecore_consq *p_consq);
+
+/**
+ * @brief ecore_consq_free - deallocates the given ConsQ struct.
+ *
+ * @param p_hwfn
+ * @param p_eq
+ */
+void ecore_consq_free(struct ecore_hwfn *p_hwfn, struct ecore_consq *p_consq);
+
+#endif /* __ECORE_SPQ_H__ */
diff --git a/drivers/net/qede/base/ecore_status.h b/drivers/net/qede/base/ecore_status.h
new file mode 100644 (file)
index 0000000..98d40bb
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_STATUS_H__
+#define __ECORE_STATUS_H__
+
+enum _ecore_status_t {
+       ECORE_UNKNOWN_ERROR = -12,
+       ECORE_NORESOURCES = -11,
+       ECORE_NODEV = -10,
+       ECORE_ABORTED = -9,
+       ECORE_AGAIN = -8,
+       ECORE_NOTIMPL = -7,
+       ECORE_EXISTS = -6,
+       ECORE_IO = -5,
+       ECORE_TIMEOUT = -4,
+       ECORE_INVAL = -3,
+       ECORE_BUSY = -2,
+       ECORE_NOMEM = -1,
+       ECORE_SUCCESS = 0,
+       /* PENDING is not an error and should be positive */
+       ECORE_PENDING = 1,
+};
+
+#endif /* __ECORE_STATUS_H__ */
diff --git a/drivers/net/qede/base/ecore_utils.h b/drivers/net/qede/base/ecore_utils.h
new file mode 100644 (file)
index 0000000..616b44c
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_UTILS_H__
+#define __ECORE_UTILS_H__
+
+/* dma_addr_t manip */
+#define DMA_LO(x)              ((u32)(((dma_addr_t)(x)) & 0xffffffff))
+#define DMA_HI(x)              ((u32)(((dma_addr_t)(x)) >> 32))
+
+#define DMA_LO_LE(x)           OSAL_CPU_TO_LE32(DMA_LO(x))
+#define DMA_HI_LE(x)           OSAL_CPU_TO_LE32(DMA_HI(x))
+
+/* It's assumed that whoever includes this has previously included an hsi
+ * file defining the regpair.
+ */
+#define DMA_REGPAIR_LE(x, val) (x).hi = DMA_HI_LE((val)); \
+                               (x).lo = DMA_LO_LE((val))
+
+#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo))
+#define HILO_DMA(hi, lo)       HILO_GEN(hi, lo, dma_addr_t)
+#define HILO_64(hi, lo)                HILO_GEN(hi, lo, u64)
+#define HILO_DMA_REGPAIR(regpair)      (HILO_DMA(regpair.hi, regpair.lo))
+#define HILO_64_REGPAIR(regpair)       (HILO_64(regpair.hi, regpair.lo))
+
+#endif
diff --git a/drivers/net/qede/base/eth_common.h b/drivers/net/qede/base/eth_common.h
new file mode 100644 (file)
index 0000000..046bbb2
--- /dev/null
@@ -0,0 +1,526 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ETH_COMMON__
+#define __ETH_COMMON__
+/********************/
+/* ETH FW CONSTANTS */
+/********************/
+#define ETH_CACHE_LINE_SIZE                 64
+#define ETH_RX_CQE_GAP                                         32
+#define ETH_MAX_RAMROD_PER_CON                         8
+#define ETH_TX_BD_PAGE_SIZE_BYTES                      4096
+#define ETH_RX_BD_PAGE_SIZE_BYTES                      4096
+#define ETH_RX_CQE_PAGE_SIZE_BYTES                     4096
+#define ETH_RX_NUM_NEXT_PAGE_BDS                       2
+
+#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT                         1
+#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET                      18
+#define ETH_TX_MAX_LSO_HDR_NBD                                         4
+#define ETH_TX_MIN_BDS_PER_LSO_PKT                                     3
+#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT      3
+#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT           2
+#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE         2
+#define ETH_TX_MAX_NON_LSO_PKT_LEN                  (9700 - (4 + 12 + 8))
+#define ETH_TX_MAX_LSO_HDR_BYTES                    510
+#define ETH_TX_LSO_WINDOW_BDS_NUM                   18
+#define ETH_TX_LSO_WINDOW_MIN_LEN                   9700
+#define ETH_TX_MAX_LSO_PAYLOAD_LEN                  0xFFFF
+
+#define ETH_NUM_STATISTIC_COUNTERS                     MAX_NUM_VPORTS
+
+#define ETH_RX_MAX_BUFF_PER_PKT             5
+
+/* num of MAC/VLAN filters */
+#define ETH_NUM_MAC_FILTERS                                    512
+#define ETH_NUM_VLAN_FILTERS                           512
+
+/* approx. multicast constants */
+#define ETH_MULTICAST_BIN_FROM_MAC_SEED            0
+#define ETH_MULTICAST_MAC_BINS                         256
+#define ETH_MULTICAST_MAC_BINS_IN_REGS         (ETH_MULTICAST_MAC_BINS / 32)
+
+/*  ethernet vport update constants */
+#define ETH_FILTER_RULES_COUNT                         10
+#define ETH_RSS_IND_TABLE_ENTRIES_NUM          128
+#define ETH_RSS_KEY_SIZE_REGS                      10
+#define ETH_RSS_ENGINE_NUM_K2               207
+#define ETH_RSS_ENGINE_NUM_BB               127
+
+/* TPA constants */
+#define ETH_TPA_MAX_AGGS_NUM              64
+#define ETH_TPA_CQE_START_LEN_LIST_SIZE   ETH_RX_MAX_BUFF_PER_PKT
+#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE    6
+#define ETH_TPA_CQE_END_LEN_LIST_SIZE     4
+
+/*
+ * Interrupt coalescing TimeSet
+ */
+struct coalescing_timeset {
+       u8 timeset;
+       u8 valid /* Only if this flag is set, timeset will take effect */;
+};
+
+/*
+ * Destination port mode
+ */
+enum dest_port_mode {
+       DEST_PORT_PHY /* Send to physical port. */,
+       DEST_PORT_LOOPBACK /* Send to loopback port. */,
+       DEST_PORT_PHY_LOOPBACK /* Send to physical and loopback port. */,
+       DEST_PORT_DROP /* Drop the packet in PBF. */,
+       MAX_DEST_PORT_MODE
+};
+
+/*
+ * Ethernet address type
+ */
+enum eth_addr_type {
+       BROADCAST_ADDRESS,
+       MULTICAST_ADDRESS,
+       UNICAST_ADDRESS,
+       UNKNOWN_ADDRESS,
+       MAX_ETH_ADDR_TYPE
+};
+
+struct eth_tx_1st_bd_flags {
+       u8 bitfields;
+#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK         0x1
+#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT        0
+#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK  0x1
+#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1
+#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK          0x1
+#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT         2
+#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK          0x1
+#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT         3
+#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK   0x1
+#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT  4
+#define ETH_TX_1ST_BD_FLAGS_LSO_MASK              0x1
+#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT             5
+#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK     0x1
+#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT    6
+#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK     0x1
+#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT    7
+};
+
+/*
+ * The parsing information data for the first tx bd of a given packet.
+ */
+struct eth_tx_data_1st_bd {
+       __le16 vlan /* VLAN to insert to packet (if needed). */;
+               /* Number of BDs in packet. Should be at least 2 in non-LSO
+               * packet and at least 3 in LSO (or Tunnel with IPv6+ext) packet.
+               */
+       u8 nbds;
+       struct eth_tx_1st_bd_flags bd_flags;
+       __le16 bitfields;
+#define ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_MASK  0x1
+#define ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_SHIFT 0
+#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK          0x1
+#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT         1
+#define ETH_TX_DATA_1ST_BD_FW_USE_ONLY_MASK        0x3FFF
+#define ETH_TX_DATA_1ST_BD_FW_USE_ONLY_SHIFT       2
+};
+
+/*
+ * The parsing information data for the second tx bd of a given packet.
+ */
+struct eth_tx_data_2nd_bd {
+       __le16 tunn_ip_size;
+       __le16 bitfields1;
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK  0xF
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK       0x3
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT      4
+#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK            0x3
+#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT           6
+#define ETH_TX_DATA_2ND_BD_START_BD_MASK                  0x1
+#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT                 8
+#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK                 0x3
+#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT                9
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK           0x1
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT          11
+#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK                  0x1
+#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT                 12
+#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK             0x1
+#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT            13
+#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK                    0x1
+#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT                   14
+#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK       0x1
+#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT      15
+       __le16 bitfields2;
+#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK     0x1FFF
+#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT    0
+#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK                 0x7
+#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT                13
+};
+
+/*
+ * Firmware data for L2-EDPM packet.
+ */
+struct eth_edpm_fw_data {
+       struct eth_tx_data_1st_bd data_1st_bd
+           /* Parsing information data from the 1st BD. */;
+       struct eth_tx_data_2nd_bd data_2nd_bd
+           /* Parsing information data from the 2nd BD. */;
+       __le32 reserved;
+};
+
+/*
+ * FW debug.
+ */
+struct eth_fast_path_cqe_fw_debug {
+       u8 reserved0 /* FW reserved. */;
+       u8 reserved1 /* FW reserved. */;
+       __le16 reserved2 /* FW reserved. */;
+};
+
+struct tunnel_parsing_flags {
+       u8 flags;
+#define TUNNEL_PARSING_FLAGS_TYPE_MASK              0x3
+#define TUNNEL_PARSING_FLAGS_TYPE_SHIFT             0
+#define TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_MASK  0x1
+#define TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_SHIFT 2
+#define TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK     0x3
+#define TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT    3
+#define TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_MASK   0x1
+#define TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_SHIFT  5
+#define TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK     0x1
+#define TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT    6
+#define TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_MASK      0x1
+#define TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT     7
+};
+
+/*
+ * Regular ETH Rx FP CQE.
+ */
+struct eth_fast_path_rx_reg_cqe {
+       u8 type /* CQE type */;
+       u8 bitfields;
+#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK  0x7
+#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0
+#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK             0xF
+#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT            3
+#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK      0x1
+#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT     7
+       __le16 pkt_len /* Total packet length (from the parser) */;
+       struct parsing_and_err_flags pars_flags
+           /* Parsing and error flags from the parser */;
+       __le16 vlan_tag /* 802.1q VLAN tag */;
+       __le32 rss_hash /* RSS hash result */;
+       __le16 len_on_first_bd /* Number of bytes placed on first BD */;
+       u8 placement_offset /* Offset of placement from BD start */;
+       struct tunnel_parsing_flags tunnel_pars_flags /* Tunnel Parsing Flags */
+         ;
+       u8 bd_num /* Number of BDs, used for packet */;
+       u8 reserved[7];
+       struct eth_fast_path_cqe_fw_debug fw_debug /* FW reserved. */;
+       u8 reserved1[3];
+       u8 flags;
+#define ETH_FAST_PATH_RX_REG_CQE_VALID_MASK          0x1
+#define ETH_FAST_PATH_RX_REG_CQE_VALID_SHIFT         0
+#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_MASK   0x1
+#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_SHIFT  1
+#define ETH_FAST_PATH_RX_REG_CQE_RESERVED2_MASK      0x3F
+#define ETH_FAST_PATH_RX_REG_CQE_RESERVED2_SHIFT     2
+};
+
+/*
+ * TPA-continue ETH Rx FP CQE.
+ */
+struct eth_fast_path_rx_tpa_cont_cqe {
+       u8 type /* CQE type */;
+       u8 tpa_agg_index /* TPA aggregation index */;
+       __le16 len_list[ETH_TPA_CQE_CONT_LEN_LIST_SIZE]
+           /* List of the segment sizes */;
+       u8 reserved[5];
+       u8 reserved1 /* FW reserved. */;
+       __le16 reserved2[ETH_TPA_CQE_CONT_LEN_LIST_SIZE] /* FW reserved. */;
+};
+
+/*
+ * TPA-end ETH Rx FP CQE .
+ */
+struct eth_fast_path_rx_tpa_end_cqe {
+       u8 type /* CQE type */;
+       u8 tpa_agg_index /* TPA aggregation index */;
+       __le16 total_packet_len /* Total aggregated packet length */;
+       u8 num_of_bds /* Total number of BDs comprising the packet */;
+       u8 end_reason /* Aggregation end reason. Use enum eth_tpa_end_reason */
+         ;
+       __le16 num_of_coalesced_segs /* Number of coalesced TCP segments */;
+       __le32 ts_delta /* TCP timestamp delta */;
+       __le16 len_list[ETH_TPA_CQE_END_LEN_LIST_SIZE]
+           /* List of the segment sizes */;
+       u8 reserved1[3];
+       u8 reserved2 /* FW reserved. */;
+       __le16 reserved3[ETH_TPA_CQE_END_LEN_LIST_SIZE] /* FW reserved. */;
+};
+
+/*
+ * TPA-start ETH Rx FP CQE.
+ */
+struct eth_fast_path_rx_tpa_start_cqe {
+       u8 type /* CQE type */;
+       u8 bitfields;
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK  0x7
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0
+#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK             0xF
+#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT            3
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK      0x1
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT     7
+       __le16 seg_len /* Segment length (packetLen from the parser) */;
+       struct parsing_and_err_flags pars_flags
+           /* Parsing and error flags from the parser */;
+       __le16 vlan_tag /* 802.1q VLAN tag */;
+       __le32 rss_hash /* RSS hash result */;
+       __le16 len_on_first_bd /* Number of bytes placed on first BD */;
+       u8 placement_offset /* Offset of placement from BD start */;
+       struct tunnel_parsing_flags tunnel_pars_flags /* Tunnel Parsing Flags */
+         ;
+       u8 tpa_agg_index /* TPA aggregation index */;
+       u8 header_len /* Packet L2+L3+L4 header length */;
+       __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE]
+           /* Additional BDs length list. */;
+       struct eth_fast_path_cqe_fw_debug fw_debug /* FW reserved. */;
+};
+
+/*
+ * The L4 pseudo checksum mode for Ethernet
+ */
+enum eth_l4_pseudo_checksum_mode {
+       ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH
+               /* Pseudo Header checksum on packet is calculated
+                * with the correct packet length field.
+               */
+          ,
+       ETH_L4_PSEUDO_CSUM_ZERO_LENGTH
+           /* Pseudo Hdr checksum on packet is calc with zero len field. */
+          ,
+       MAX_ETH_L4_PSEUDO_CHECKSUM_MODE
+};
+
+struct eth_rx_bd {
+       struct regpair addr /* single continues buffer */;
+};
+
+/*
+ * regular ETH Rx SP CQE
+ */
+struct eth_slow_path_rx_cqe {
+       u8 type /* CQE type */;
+       u8 ramrod_cmd_id;
+       u8 error_flag;
+       u8 reserved[25];
+       __le16 echo;
+       u8 reserved1;
+       u8 flags;
+#define ETH_SLOW_PATH_RX_CQE_VALID_MASK         0x1
+#define ETH_SLOW_PATH_RX_CQE_VALID_SHIFT        0
+#define ETH_SLOW_PATH_RX_CQE_VALID_TOGGLE_MASK  0x1
+#define ETH_SLOW_PATH_RX_CQE_VALID_TOGGLE_SHIFT 1
+#define ETH_SLOW_PATH_RX_CQE_RESERVED2_MASK     0x3F
+#define ETH_SLOW_PATH_RX_CQE_RESERVED2_SHIFT    2
+};
+
+/*
+ * union for all ETH Rx CQE types
+ */
+union eth_rx_cqe {
+       struct eth_fast_path_rx_reg_cqe fast_path_regular /* Regular FP CQE */;
+       struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start
+           /* TPA-start CQE */;
+       struct eth_fast_path_rx_tpa_cont_cqe fast_path_tpa_cont
+           /* TPA-continue CQE */;
+       struct eth_fast_path_rx_tpa_end_cqe fast_path_tpa_end /* TPA-end CQE */
+         ;
+       struct eth_slow_path_rx_cqe slow_path /* SP CQE */;
+};
+
+/*
+ * ETH Rx CQE type
+ */
+enum eth_rx_cqe_type {
+       ETH_RX_CQE_TYPE_UNUSED,
+       ETH_RX_CQE_TYPE_REGULAR /* Regular FP ETH Rx CQE */,
+       ETH_RX_CQE_TYPE_SLOW_PATH /* Slow path ETH Rx CQE */,
+       ETH_RX_CQE_TYPE_TPA_START /* TPA start ETH Rx CQE */,
+       ETH_RX_CQE_TYPE_TPA_CONT /* TPA Continue ETH Rx CQE */,
+       ETH_RX_CQE_TYPE_TPA_END /* TPA end ETH Rx CQE */,
+       MAX_ETH_RX_CQE_TYPE
+};
+
+/*
+ * Wrapp for PD RX CQE used in order to cover full cache line when writing CQE
+ */
+struct eth_rx_pmd_cqe {
+       union eth_rx_cqe cqe /* CQE data itself */;
+       u8 reserved[ETH_RX_CQE_GAP];
+};
+
+/*
+ * ETH Rx producers data
+ */
+struct eth_rx_prod_data {
+       __le16 bd_prod /* BD producer */;
+       __le16 cqe_prod /* CQE producer */;
+       __le16 reserved;
+       __le16 reserved1 /* FW reserved. */;
+};
+
+/*
+ * Aggregation end reason.
+ */
+enum eth_tpa_end_reason {
+       ETH_AGG_END_UNUSED,
+       ETH_AGG_END_SP_UPDATE /* SP configuration update */,
+       ETH_AGG_END_MAX_LEN
+           /* Maximum aggregation length or maximum buffer number used. */,
+       ETH_AGG_END_LAST_SEG
+           /* TCP PSH flag or TCP payload length below continue threshold. */,
+       ETH_AGG_END_TIMEOUT /* Timeout expiration. */,
+       ETH_AGG_END_NOT_CONSISTENT,
+       ETH_AGG_END_OUT_OF_ORDER,
+       ETH_AGG_END_NON_TPA_SEG,
+       MAX_ETH_TPA_END_REASON
+};
+
+/*
+ * Eth Tunnel Type
+ */
+enum eth_tunn_type {
+       ETH_TUNN_GENEVE /* GENEVE Tunnel. */,
+       ETH_TUNN_TTAG /* T-Tag Tunnel. */,
+       ETH_TUNN_GRE /* GRE Tunnel. */,
+       ETH_TUNN_VXLAN /* VXLAN Tunnel. */,
+       MAX_ETH_TUNN_TYPE
+};
+
+/*
+ * The first tx bd of a given packet
+ */
+struct eth_tx_1st_bd {
+       struct regpair addr /* Single continuous buffer */;
+       __le16 nbytes /* Number of bytes in this BD. */;
+       struct eth_tx_data_1st_bd data /* Parsing information data. */;
+};
+
+/*
+ * The second tx bd of a given packet
+ */
+struct eth_tx_2nd_bd {
+       struct regpair addr /* Single continuous buffer */;
+       __le16 nbytes /* Number of bytes in this BD. */;
+       struct eth_tx_data_2nd_bd data /* Parsing information data. */;
+};
+
+/*
+ * The parsing information data for the third tx bd of a given packet.
+ */
+struct eth_tx_data_3rd_bd {
+       __le16 lso_mss /* For LSO packet - the MSS in bytes. */;
+       __le16 bitfields;
+#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK  0xF
+#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0
+#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK         0xF
+#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT        4
+#define ETH_TX_DATA_3RD_BD_START_BD_MASK        0x1
+#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT       8
+#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK       0x7F
+#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT      9
+       u8 tunn_l4_hdr_start_offset_w;
+       u8 tunn_hdr_size_w;
+};
+
+/*
+ * The third tx bd of a given packet
+ */
+struct eth_tx_3rd_bd {
+       struct regpair addr /* Single continuous buffer */;
+       __le16 nbytes /* Number of bytes in this BD. */;
+       struct eth_tx_data_3rd_bd data /* Parsing information data. */;
+};
+
+/*
+ * Complementary information for the regular tx bd of a given packet.
+ */
+struct eth_tx_data_bd {
+       __le16 reserved0;
+       __le16 bitfields;
+#define ETH_TX_DATA_BD_RESERVED1_MASK  0xFF
+#define ETH_TX_DATA_BD_RESERVED1_SHIFT 0
+#define ETH_TX_DATA_BD_START_BD_MASK   0x1
+#define ETH_TX_DATA_BD_START_BD_SHIFT  8
+#define ETH_TX_DATA_BD_RESERVED2_MASK  0x7F
+#define ETH_TX_DATA_BD_RESERVED2_SHIFT 9
+       __le16 reserved3;
+};
+
+/*
+ * The common regular TX BD ring element
+ */
+struct eth_tx_bd {
+       struct regpair addr /* Single continuous buffer */;
+       __le16 nbytes /* Number of bytes in this BD. */;
+       struct eth_tx_data_bd data /* Complementary information. */;
+};
+
+union eth_tx_bd_types {
+       struct eth_tx_1st_bd first_bd /* The first tx bd of a given packet */;
+       struct eth_tx_2nd_bd second_bd /* The second tx bd of a given packet */
+         ;
+       struct eth_tx_3rd_bd third_bd /* The third tx bd of a given packet */;
+       struct eth_tx_bd reg_bd /* The common non-special bd */;
+};
+
+/*
+ * Mstorm Queue Zone
+ */
+struct mstorm_eth_queue_zone {
+       struct eth_rx_prod_data rx_producers;
+       __le32 reserved[2];
+};
+
+/*
+ * Ustorm Queue Zone
+ */
+struct ustorm_eth_queue_zone {
+       struct coalescing_timeset int_coalescing_timeset
+           /* Rx interrupt coalescing TimeSet */;
+       __le16 reserved[3];
+};
+
+/*
+ * Ystorm Queue Zone
+ */
+struct ystorm_eth_queue_zone {
+       struct coalescing_timeset int_coalescing_timeset
+           /* Tx interrupt coalescing TimeSet */;
+       __le16 reserved[3];
+};
+
+/*
+ * ETH doorbell data
+ */
+struct eth_db_data {
+       u8 params;
+#define ETH_DB_DATA_DEST_MASK         0x3
+#define ETH_DB_DATA_DEST_SHIFT        0
+#define ETH_DB_DATA_AGG_CMD_MASK      0x3
+#define ETH_DB_DATA_AGG_CMD_SHIFT     2
+#define ETH_DB_DATA_BYPASS_EN_MASK    0x1
+#define ETH_DB_DATA_BYPASS_EN_SHIFT   4
+#define ETH_DB_DATA_RESERVED_MASK     0x1
+#define ETH_DB_DATA_RESERVED_SHIFT    5
+#define ETH_DB_DATA_AGG_VAL_SEL_MASK  0x3
+#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6
+       u8 agg_flags;
+       __le16 bd_prod;
+};
+
+#endif /* __ETH_COMMON__ */
diff --git a/drivers/net/qede/base/mcp_public.h b/drivers/net/qede/base/mcp_public.h
new file mode 100644 (file)
index 0000000..f74e506
--- /dev/null
@@ -0,0 +1,1000 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+/****************************************************************************
+ *
+ * Name:        mcp_public.h
+ *
+ * Description: MCP public data
+ *
+ * Created:     13/01/2013 yanivr
+ *
+ ****************************************************************************/
+
+#ifndef MCP_PUBLIC_H
+#define MCP_PUBLIC_H
+
+#define VF_MAX_STATIC 192      /* In case of AH */
+
+#define MCP_GLOB_PATH_MAX      2
+#define MCP_PORT_MAX           2       /* Global */
+#define MCP_GLOB_PORT_MAX      4       /* Global */
+#define MCP_GLOB_FUNC_MAX      16      /* Global */
+
+typedef u32 offsize_t;         /* In DWORDS !!! */
+/* Offset from the beginning of the MCP scratchpad */
+#define OFFSIZE_OFFSET_SHIFT   0
+#define OFFSIZE_OFFSET_MASK    0x0000ffff
+/* Size of specific element (not the whole array if any) */
+#define OFFSIZE_SIZE_SHIFT     16
+#define OFFSIZE_SIZE_MASK      0xffff0000
+
+/* SECTION_OFFSET is calculating the offset in bytes out of offsize */
+#define SECTION_OFFSET(_offsize) \
+((((_offsize & OFFSIZE_OFFSET_MASK) >> OFFSIZE_OFFSET_SHIFT) << 2))
+
+/* SECTION_SIZE is calculating the size in bytes out of offsize */
+#define SECTION_SIZE(_offsize) \
+(((_offsize & OFFSIZE_SIZE_MASK) >> OFFSIZE_SIZE_SHIFT) << 2)
+
+#define SECTION_ADDR(_offsize, idx) \
+(MCP_REG_SCRATCH + SECTION_OFFSET(_offsize) + (SECTION_SIZE(_offsize) * idx))
+
+#define SECTION_OFFSIZE_ADDR(_pub_base, _section) \
+(_pub_base + offsetof(struct mcp_public_data, sections[_section]))
+
+/* PHY configuration */
+struct pmm_phy_cfg {
+       u32 speed; /* 0 = autoneg, 1000/10000/20000/25000/40000/50000/100000 */
+#define PMM_SPEED_AUTONEG   0
+#define PMM_SPEED_SMARTLINQ  0x8
+
+       u32 pause;              /* bitmask */
+#define PMM_PAUSE_NONE         0x0
+#define PMM_PAUSE_AUTONEG      0x1
+#define PMM_PAUSE_RX           0x2
+#define PMM_PAUSE_TX           0x4
+
+       u32 adv_speed;          /* Default should be the speed_cap_mask */
+       u32 loopback_mode;
+#define PMM_LOOPBACK_NONE              0
+#define PMM_LOOPBACK_INT_PHY           1
+#define PMM_LOOPBACK_EXT_PHY           2
+#define PMM_LOOPBACK_EXT               3
+#define PMM_LOOPBACK_MAC               4
+#define PMM_LOOPBACK_CNIG_AH_ONLY_0123 5       /* Port to itself */
+#define PMM_LOOPBACK_CNIG_AH_ONLY_2301 6       /* Port to Port */
+
+       /* features */
+       u32 feature_config_flags;
+
+};
+
+struct port_mf_cfg {
+       u32 dynamic_cfg;        /* device control channel */
+#define PORT_MF_CFG_OV_TAG_MASK              0x0000ffff
+#define PORT_MF_CFG_OV_TAG_SHIFT             0
+#define PORT_MF_CFG_OV_TAG_DEFAULT         PORT_MF_CFG_OV_TAG_MASK
+
+       u32 reserved[1];
+};
+
+/* DO NOT add new fields in the middle
+ * MUST be synced with struct pmm_stats_map
+ */
+struct pmm_stats {
+       u64 r64; /* 0x00 (Offset 0x00 ) RX 64-byte frame counter */
+       u64 r127; /* 0x01 (Offset 0x08 ) RX 65 to 127 byte frame counter */
+       u64 r255; /* 0x02 (Offset 0x10 ) RX 128 to 255 byte frame counter */
+       u64 r511; /* 0x03 (Offset 0x18 ) RX 256 to 511 byte frame counter */
+       u64 r1023; /* 0x04 (Offset 0x20 ) RX 512 to 1023 byte frame counter */
+       u64 r1518; /* 0x05 (Offset 0x28 ) RX 1024 to 1518 byte frame counter */
+       u64 r1522; /* 0x06 (Offset 0x30 ) RX 1519 to 1522 byte VLAN-tagged  */
+       u64 r2047; /* 0x07 (Offset 0x38 ) RX 1519 to 2047 byte frame counter */
+       u64 r4095; /* 0x08 (Offset 0x40 ) RX 2048 to 4095 byte frame counter */
+       u64 r9216; /* 0x09 (Offset 0x48 ) RX 4096 to 9216 byte frame counter */
+       u64 r16383; /* 0x0A (Offset 0x50 ) RX 9217 to 16383 byte frame ctr */
+       u64 rfcs; /* 0x0F (Offset 0x58 ) RX FCS error frame counter */
+       u64 rxcf; /* 0x10 (Offset 0x60 ) RX control frame counter */
+       u64 rxpf; /* 0x11 (Offset 0x68 ) RX pause frame counter */
+       u64 rxpp; /* 0x12 (Offset 0x70 ) RX PFC frame counter */
+       u64 raln; /* 0x16 (Offset 0x78 ) RX alignment error counter */
+       u64 rfcr; /* 0x19 (Offset 0x80 ) RX false carrier counter */
+       u64 rovr; /* 0x1A (Offset 0x88 ) RX oversized frame counter */
+       u64 rjbr; /* 0x1B (Offset 0x90 ) RX jabber frame counter */
+       u64 rund; /* 0x34 (Offset 0x98 ) RX undersized frame counter */
+       u64 rfrg; /* 0x35 (Offset 0xa0 ) RX fragment counter */
+       u64 t64; /* 0x40 (Offset 0xa8 ) TX 64-byte frame counter */
+       u64 t127; /* 0x41 (Offset 0xb0 ) TX 65 to 127 byte frame counter */
+       u64 t255; /* 0x42 (Offset 0xb8 ) TX 128 to 255 byte frame counter */
+       u64 t511; /* 0x43 (Offset 0xc0 ) TX 256 to 511 byte frame counter */
+       u64 t1023; /* 0x44 (Offset 0xc8 ) TX 512 to 1023 byte frame counter */
+       u64 t1518; /* 0x45 (Offset 0xd0 ) TX 1024 to 1518 byte frame counter */
+       u64 t2047; /* 0x47 (Offset 0xd8 ) TX 1519 to 2047 byte frame counter */
+       u64 t4095; /* 0x48 (Offset 0xe0 ) TX 2048 to 4095 byte frame counter */
+       u64 t9216; /* 0x49 (Offset 0xe8 ) TX 4096 to 9216 byte frame counter */
+       u64 t16383; /* 0x4A (Offset 0xf0 ) TX 9217 to 16383 byte frame ctr */
+       u64 txpf; /* 0x50 (Offset 0xf8 ) TX pause frame counter */
+       u64 txpp; /* 0x51 (Offset 0x100) TX PFC frame counter */
+       u64 tlpiec; /* 0x6C (Offset 0x108) Transmit Logical Type LLFC */
+       u64 tncl; /* 0x6E (Offset 0x110) Transmit Total Collision Counter */
+       u64 rbyte; /* 0x3d (Offset 0x118) RX byte counter */
+       u64 rxuca; /* 0x0c (Offset 0x120) RX UC frame counter */
+       u64 rxmca; /* 0x0d (Offset 0x128) RX MC frame counter */
+       u64 rxbca; /* 0x0e (Offset 0x130) RX BC frame counter */
+       u64 rxpok; /* 0x22 (Offset 0x138) RX good frame */
+       u64 tbyte; /* 0x6f (Offset 0x140) TX byte counter */
+       u64 txuca; /* 0x4d (Offset 0x148) TX UC frame counter */
+       u64 txmca; /* 0x4e (Offset 0x150) TX MC frame counter */
+       u64 txbca; /* 0x4f (Offset 0x158) TX BC frame counter */
+       u64 txcf; /* 0x54 (Offset 0x160) TX control frame counter */
+};
+
+struct brb_stats {
+       u64 brb_truncate[8];
+       u64 brb_discard[8];
+};
+
+struct port_stats {
+       struct brb_stats brb;
+       struct pmm_stats pmm;
+};
+
+/*-----+-----------------------------------------------------------------------
+ * Chip | Number and       | Ports in| Ports in|2 PHY-s |# of ports|# of engines
+ *      | rate of physical | team #1 | team #2 |are used|per path  | (paths)
+ *      | ports            |         |         |        |          |
+ *======+==================+=========+=========+========+======================
+ * BB   | 1x100G           | This is special mode, where there are 2 HW func
+ * BB   | 2x10/20Gbps      | 0,1     | NA      |  No    | 1        | 1
+ * BB   | 2x40 Gbps        | 0,1     | NA      |  Yes   | 1        | 1
+ * BB   | 2x50Gbps         | 0,1     | NA      |  No    | 1        | 1
+ * BB   | 4x10Gbps         | 0,2     | 1,3     |  No    | 1/2      | 1,2
+ * BB   | 4x10Gbps         | 0,1     | 2,3     |  No    | 1/2      | 1,2
+ * BB   | 4x10Gbps         | 0,3     | 1,2     |  No    | 1/2      | 1,2
+ * BB   | 4x10Gbps         | 0,1,2,3 | NA      |  No    | 1        | 1
+ * AH   | 2x10/20Gbps      | 0,1     | NA      |  NA    | 1        | NA
+ * AH   | 4x10Gbps         | 0,1     | 2,3     |  NA    | 2        | NA
+ * AH   | 4x10Gbps         | 0,2     | 1,3     |  NA    | 2        | NA
+ * AH   | 4x10Gbps         | 0,3     | 1,2     |  NA    | 2        | NA
+ * AH   | 4x10Gbps         | 0,1,2,3 | NA      |  NA    | 1        | NA
+ *======+==================+=========+=========+========+=======================
+ */
+
+#define CMT_TEAM0 0
+#define CMT_TEAM1 1
+#define CMT_TEAM_MAX 2
+
+struct couple_mode_teaming {
+       u8 port_cmt[MCP_GLOB_PORT_MAX];
+#define PORT_CMT_IN_TEAM            (1 << 0)
+
+#define PORT_CMT_PORT_ROLE          (1 << 1)
+#define PORT_CMT_PORT_INACTIVE      (0 << 1)
+#define PORT_CMT_PORT_ACTIVE        (1 << 1)
+
+#define PORT_CMT_TEAM_MASK          (1 << 2)
+#define PORT_CMT_TEAM0              (0 << 2)
+#define PORT_CMT_TEAM1              (1 << 2)
+};
+
+/**************************************/
+/*                                    */
+/*     P U B L I C      G L O B A L   */
+/*                                    */
+/**************************************/
+struct public_global {
+       u32 max_path; /* 32bit is wasty, but this will be used often */
+       u32 max_ports; /* (Global) 32bit is wasty, this will be used often */
+#define MODE_1P        1 /* TBD - NEED TO THINK OF A BETTER NAME */
+#define MODE_2P        2
+#define MODE_3P        3
+#define MODE_4P        4
+       u32 debug_mb_offset;
+       u32 phymod_dbg_mb_offset;
+       struct couple_mode_teaming cmt;
+       s32 internal_temperature;
+       u32 mfw_ver;
+       u32 running_bundle_id;
+       s32 external_temperature;
+};
+
+/**************************************/
+/*                                    */
+/*     P U B L I C      P A T H       */
+/*                                    */
+/**************************************/
+
+/****************************************************************************
+ * Shared Memory 2 Region                                                   *
+ ****************************************************************************/
+/* The fw_flr_ack is actually built in the following way:                   */
+/* 8 bit:  PF ack                                                           */
+/* 128 bit: VF ack                                                           */
+/* 8 bit:  ios_dis_ack                                                      */
+/* In order to maintain endianity in the mailbox hsi, we want to keep using */
+/* u32. The fw must have the VF right after the PF since this is how it     */
+/* access arrays(it expects always the VF to reside after the PF, and that  */
+/* makes the calculation much easier for it. )                              */
+/* In order to answer both limitations, and keep the struct small, the code */
+/* will abuse the structure defined here to achieve the actual partition    */
+/* above                                                                    */
+/****************************************************************************/
+struct fw_flr_mb {
+       u32 aggint;
+       u32 opgen_addr;
+       u32 accum_ack;          /* 0..15:PF, 16..207:VF, 256..271:IOV_DIS */
+#define ACCUM_ACK_PF_BASE      0
+#define ACCUM_ACK_PF_SHIFT     0
+
+#define ACCUM_ACK_VF_BASE      8
+#define ACCUM_ACK_VF_SHIFT     3
+
+#define ACCUM_ACK_IOV_DIS_BASE 256
+#define ACCUM_ACK_IOV_DIS_SHIFT        8
+
+};
+
+struct public_path {
+       struct fw_flr_mb flr_mb;
+       /*
+        * mcp_vf_disabled is set by the MCP to indicate the driver about VFs
+        * which were disabled/flred
+        */
+       u32 mcp_vf_disabled[VF_MAX_STATIC / 32];        /* 0x003c */
+
+       u32 process_kill;
+       /* Reset on mcp reset, and incremented for eveny process kill event. */
+#define PROCESS_KILL_COUNTER_MASK              0x0000ffff
+#define PROCESS_KILL_COUNTER_SHIFT             0
+#define PROCESS_KILL_GLOB_AEU_BIT_MASK         0xffff0000
+#define PROCESS_KILL_GLOB_AEU_BIT_SHIFT                16
+#define GLOBAL_AEU_BIT(aeu_reg_id, aeu_bit) (aeu_reg_id * 32 + aeu_bit)
+};
+
+/**************************************/
+/*                                    */
+/*     P U B L I C      P O R T       */
+/*                                    */
+/**************************************/
+#define FC_NPIV_WWPN_SIZE 8
+#define FC_NPIV_WWNN_SIZE 8
+struct dci_npiv_settings {
+       u8 npiv_wwpn[FC_NPIV_WWPN_SIZE];
+       u8 npiv_wwnn[FC_NPIV_WWNN_SIZE];
+};
+
+struct dci_fc_npiv_cfg {
+       /* hdr used internally by the MFW */
+       u32 hdr;
+       u32 num_of_npiv;
+};
+
+#define MAX_NUMBER_NPIV 64
+struct dci_fc_npiv_tbl {
+       struct dci_fc_npiv_cfg fc_npiv_cfg;
+       struct dci_npiv_settings settings[MAX_NUMBER_NPIV];
+};
+
+/****************************************************************************
+ * Driver <-> FW Mailbox                                                    *
+ ****************************************************************************/
+
+struct public_port {
+       u32 validity_map;       /* 0x0 (4*2 = 0x8) */
+
+       /* validity bits */
+#define MCP_VALIDITY_PCI_CFG                    0x00100000
+#define MCP_VALIDITY_MB                         0x00200000
+#define MCP_VALIDITY_DEV_INFO                   0x00400000
+#define MCP_VALIDITY_RESERVED                   0x00000007
+
+       /* One licensing bit should be set */
+#define MCP_VALIDITY_LIC_KEY_IN_EFFECT_MASK     0x00000038 /* yaniv - tbd  */
+#define MCP_VALIDITY_LIC_MANUF_KEY_IN_EFFECT    0x00000008
+#define MCP_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT  0x00000010
+#define MCP_VALIDITY_LIC_NO_KEY_IN_EFFECT       0x00000020
+
+       /* Active MFW */
+#define MCP_VALIDITY_ACTIVE_MFW_UNKNOWN         0x00000000
+#define MCP_VALIDITY_ACTIVE_MFW_MASK            0x000001c0
+#define MCP_VALIDITY_ACTIVE_MFW_NCSI            0x00000040
+#define MCP_VALIDITY_ACTIVE_MFW_NONE            0x000001c0
+
+       u32 link_status;
+#define LINK_STATUS_LINK_UP                    0x00000001
+#define LINK_STATUS_SPEED_AND_DUPLEX_MASK                      0x0000001e
+#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD           (1 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD           (2 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_10G                       (3 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_20G                       (4 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_40G                       (5 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_50G                       (6 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_100G                      (7 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_25G                       (8 << 1)
+
+#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED                     0x00000020
+
+#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE                    0x00000040
+#define LINK_STATUS_PARALLEL_DETECTION_USED                    0x00000080
+
+#define LINK_STATUS_PFC_ENABLED                                0x00000100
+#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE       0x00000200
+#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE       0x00000400
+#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE           0x00000800
+#define LINK_STATUS_LINK_PARTNER_20G_CAPABLE           0x00001000
+#define LINK_STATUS_LINK_PARTNER_40G_CAPABLE           0x00002000
+#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE           0x00004000
+#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE          0x00008000
+#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE           0x00010000
+
+#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK     0x000C0000
+#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE     (0 << 18)
+#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE       (1 << 18)
+#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE      (2 << 18)
+#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE                    (3 << 18)
+
+#define LINK_STATUS_SFP_TX_FAULT                               0x00100000
+#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED                    0x00200000
+#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED                    0x00400000
+#define LINK_STATUS_RX_SIGNAL_PRESENT               0x00800000
+#define LINK_STATUS_MAC_LOCAL_FAULT                 0x01000000
+#define LINK_STATUS_MAC_REMOTE_FAULT                0x02000000
+#define LINK_STATUS_UNSUPPORTED_SPD_REQ                                0x04000000
+
+       u32 link_status1;
+       u32 ext_phy_fw_version;
+       u32 drv_phy_cfg_addr;   /* Points to pmm_phy_cfg (For READ-ONLY) */
+
+       u32 port_stx;
+
+       u32 stat_nig_timer;
+
+       struct port_mf_cfg port_mf_config;
+       struct port_stats stats;
+
+       u32 media_type;
+#define        MEDIA_UNSPECIFIED               0x0
+#define        MEDIA_SFPP_10G_FIBER    0x1
+#define        MEDIA_XFP_FIBER                 0x2
+#define        MEDIA_DA_TWINAX                 0x3
+#define        MEDIA_BASE_T                    0x4
+#define MEDIA_SFP_1G_FIBER             0x5
+#define MEDIA_MODULE_FIBER             0x6
+#define        MEDIA_KR                                0xf0
+#define        MEDIA_NOT_PRESENT               0xff
+
+       u32 lfa_status;
+#define LFA_LINK_FLAP_REASON_OFFSET            0
+#define LFA_LINK_FLAP_REASON_MASK              0x000000ff
+#define LFA_NO_REASON                                  (0 << 0)
+#define LFA_LINK_DOWN                                  (1 << 0)
+#define LFA_FORCE_INIT                                 (1 << 1)
+#define LFA_LOOPBACK_MISMATCH                          (1 << 2)
+#define LFA_SPEED_MISMATCH                             (1 << 3)
+#define LFA_FLOW_CTRL_MISMATCH                         (1 << 4)
+#define LFA_ADV_SPEED_MISMATCH                         (1 << 5)
+#define LINK_FLAP_AVOIDANCE_COUNT_OFFSET       8
+#define LINK_FLAP_AVOIDANCE_COUNT_MASK         0x0000ff00
+#define LINK_FLAP_COUNT_OFFSET                 16
+#define LINK_FLAP_COUNT_MASK                   0x00ff0000
+
+       u32 link_change_count;
+
+       /* FC_NPIV table offset & size in NVRAM value of 0 means not present */
+       u32 fc_npiv_nvram_tbl_addr;
+       u32 fc_npiv_nvram_tbl_size;
+       u32 transceiver_data;
+#define PMM_TRANSCEIVER_STATE_MASK             0x000000FF
+#define PMM_TRANSCEIVER_STATE_SHIFT            0x00000000
+#define PMM_TRANSCEIVER_STATE_UNPLUGGED                0x00000000
+#define PMM_TRANSCEIVER_STATE_PRESENT          0x00000001
+#define PMM_TRANSCEIVER_STATE_VALID            0x00000003
+#define PMM_TRANSCEIVER_STATE_UPDATING         0x00000008
+#define PMM_TRANSCEIVER_TYPE_MASK              0x0000FF00
+#define PMM_TRANSCEIVER_TYPE_SHIFT             0x00000008
+#define PMM_TRANSCEIVER_TYPE_NONE              0x00000000
+#define PMM_TRANSCEIVER_TYPE_UNKNOWN           0x000000FF
+#define PMM_TRANSCEIVER_TYPE_1G_PCC    0x01    /* 1G Passive copper cable */
+#define PMM_TRANSCEIVER_TYPE_1G_ACC    0x02    /* 1G Active copper cable  */
+#define PMM_TRANSCEIVER_TYPE_1G_LX                             0x03
+#define PMM_TRANSCEIVER_TYPE_1G_SX                             0x04
+#define PMM_TRANSCEIVER_TYPE_10G_SR                            0x05
+#define PMM_TRANSCEIVER_TYPE_10G_LR                            0x06
+#define PMM_TRANSCEIVER_TYPE_10G_LRM                   0x07
+#define PMM_TRANSCEIVER_TYPE_10G_ER                            0x08
+#define PMM_TRANSCEIVER_TYPE_10G_PCC   0x09    /* 10G Passive copper cable */
+#define PMM_TRANSCEIVER_TYPE_10G_ACC   0x0a    /* 10G Active copper cable  */
+#define PMM_TRANSCEIVER_TYPE_XLPPI                             0x0b
+#define PMM_TRANSCEIVER_TYPE_40G_LR4                   0x0c
+#define PMM_TRANSCEIVER_TYPE_40G_SR4                   0x0d
+#define PMM_TRANSCEIVER_TYPE_40G_CR4                   0x0e
+#define PMM_TRANSCEIVER_TYPE_100G_AOC  0x0f    /* Active optical cable */
+#define PMM_TRANSCEIVER_TYPE_100G_SR4                  0x10
+#define PMM_TRANSCEIVER_TYPE_100G_LR4                  0x11
+#define PMM_TRANSCEIVER_TYPE_100G_ER4                  0x12
+#define PMM_TRANSCEIVER_TYPE_100G_ACC  0x13    /* Active copper cable */
+#define PMM_TRANSCEIVER_TYPE_100G_CR4                  0x14
+#define PMM_TRANSCEIVER_TYPE_4x10G_SR                  0x15
+#define PMM_TRANSCEIVER_TYPE_25G_PCC_S 0x16
+#define PMM_TRANSCEIVER_TYPE_25G_ACC_S 0x17
+#define PMM_TRANSCEIVER_TYPE_25G_PCC_M 0x18
+#define PMM_TRANSCEIVER_TYPE_25G_ACC_M 0x19
+#define PMM_TRANSCEIVER_TYPE_25G_PCC_L 0x1a
+#define PMM_TRANSCEIVER_TYPE_25G_ACC_L 0x1b
+#define PMM_TRANSCEIVER_TYPE_25G_SR                            0x1c
+#define PMM_TRANSCEIVER_TYPE_25G_LR                            0x1d
+#define PMM_TRANSCEIVER_TYPE_25G_AOC                   0x1e
+
+#define PMM_TRANSCEIVER_TYPE_4x10G                                     0x1d
+#define PMM_TRANSCEIVER_TYPE_4x25G_CR                                  0x1e
+#define PMM_TRANSCEIVER_TYPE_MULTI_RATE_10G_40GR                       0x30
+#define PMM_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR                     0x31
+#define PMM_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR                     0x32
+#define PMM_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR                    0x33
+#define PMM_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR                    0x34
+#define PMM_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR                    0x35
+#define PMM_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC                   0x36
+};
+
+/**************************************/
+/*                                    */
+/*     P U B L I C      F U N C       */
+/*                                    */
+/**************************************/
+
+struct public_func {
+       u32 dpdk_rsvd1[2];
+
+       /* MTU size per funciton is needed for the OV feature */
+       u32 mtu_size;
+       /* 9 entires for the C2S PCP map for each inner VLAN PCP + 1 default */
+       /* For PCP values 0-3 use the map lower */
+       /* 0xFF000000 - PCP 0, 0x00FF0000 - PCP 1,
+        * 0x0000FF00 - PCP 2, 0x000000FF PCP 3
+        */
+       u32 c2s_pcp_map_lower;
+       /* For PCP values 4-7 use the map upper */
+       /* 0xFF000000 - PCP 4, 0x00FF0000 - PCP 5,
+        * 0x0000FF00 - PCP 6, 0x000000FF PCP 7
+        */
+       u32 c2s_pcp_map_upper;
+
+       /* For PCP default value get the MSB byte of the map default */
+       u32 c2s_pcp_map_default;
+
+       u32 reserved[4];
+
+       /* replace old mf_cfg */
+       u32 config;
+       /* E/R/I/D */
+       /* function 0 of each port cannot be hidden */
+#define FUNC_MF_CFG_FUNC_HIDE                   0x00000001
+#define FUNC_MF_CFG_PAUSE_ON_HOST_RING          0x00000002
+#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_SHIFT    0x00000001
+
+#define FUNC_MF_CFG_PROTOCOL_MASK               0x000000f0
+#define FUNC_MF_CFG_PROTOCOL_SHIFT              4
+#define FUNC_MF_CFG_PROTOCOL_ETHERNET           0x00000000
+#define FUNC_MF_CFG_PROTOCOL_MAX               0x00000000
+
+       /* MINBW, MAXBW */
+       /* value range - 0..100, increments in 1 %  */
+#define FUNC_MF_CFG_MIN_BW_MASK                 0x0000ff00
+#define FUNC_MF_CFG_MIN_BW_SHIFT                8
+#define FUNC_MF_CFG_MIN_BW_DEFAULT              0x00000000
+#define FUNC_MF_CFG_MAX_BW_MASK                 0x00ff0000
+#define FUNC_MF_CFG_MAX_BW_SHIFT                16
+#define FUNC_MF_CFG_MAX_BW_DEFAULT              0x00640000
+
+       u32 status;
+#define FUNC_STATUS_VLINK_DOWN                 0x00000001
+
+       u32 mac_upper;          /* MAC */
+#define FUNC_MF_CFG_UPPERMAC_MASK               0x0000ffff
+#define FUNC_MF_CFG_UPPERMAC_SHIFT              0
+#define FUNC_MF_CFG_UPPERMAC_DEFAULT            FUNC_MF_CFG_UPPERMAC_MASK
+       u32 mac_lower;
+#define FUNC_MF_CFG_LOWERMAC_DEFAULT            0xffffffff
+
+       u32 dpdk_rsvd2[4];
+
+       u32 ovlan_stag;         /* tags */
+#define FUNC_MF_CFG_OV_STAG_MASK              0x0000ffff
+#define FUNC_MF_CFG_OV_STAG_SHIFT             0
+#define FUNC_MF_CFG_OV_STAG_DEFAULT           FUNC_MF_CFG_OV_STAG_MASK
+
+       u32 pf_allocation;      /* vf per pf */
+
+       u32 preserve_data;      /* Will be used bt CCM */
+
+       u32 driver_last_activity_ts;
+
+       /*
+        * drv_ack_vf_disabled is set by the PF driver to ack handled disabled
+        * VFs
+        */
+       u32 drv_ack_vf_disabled[VF_MAX_STATIC / 32];    /* 0x0044 */
+
+       u32 drv_id;
+#define DRV_ID_PDA_COMP_VER_MASK       0x0000ffff
+#define DRV_ID_PDA_COMP_VER_SHIFT      0
+
+#define DRV_ID_MCP_HSI_VER_MASK                0x00ff0000
+#define DRV_ID_MCP_HSI_VER_SHIFT       16
+#define DRV_ID_MCP_HSI_VER_CURRENT     (1 << DRV_ID_MCP_HSI_VER_SHIFT)
+
+#define DRV_ID_DRV_TYPE_MASK           0x7f000000
+#define DRV_ID_DRV_TYPE_SHIFT          24
+#define DRV_ID_DRV_TYPE_UNKNOWN                (0 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_LINUX          (1 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_WINDOWS                (2 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_DIAG           (3 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_PREBOOT                (4 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_SOLARIS                (5 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_VMWARE         (6 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_FREEBSD                (7 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_AIX            (8 << DRV_ID_DRV_TYPE_SHIFT)
+
+#define DRV_ID_DRV_INIT_HW_MASK                0x80000000
+#define DRV_ID_DRV_INIT_HW_SHIFT       31
+#define DRV_ID_DRV_INIT_HW_FLAG                (1 << DRV_ID_DRV_INIT_HW_SHIFT)
+};
+
+/**************************************/
+/*                                    */
+/*     P U B L I C       M B          */
+/*                                    */
+/**************************************/
+/* This is the only section that the driver can write to, and each */
+/* Basically each driver request to set feature parameters,
+ * will be done using a different command, which will be linked
+ * to a specific data structure from the union below.
+ * For huge strucuture, the common blank structure should be used.
+ */
+
+struct mcp_mac {
+       u32 mac_upper;          /* Upper 16 bits are always zeroes */
+       u32 mac_lower;
+};
+
+struct mcp_val64 {
+       u32 lo;
+       u32 hi;
+};
+
+struct mcp_file_att {
+       u32 nvm_start_addr;
+       u32 len;
+};
+
+#define MCP_DRV_VER_STR_SIZE 16
+#define MCP_DRV_VER_STR_SIZE_DWORD (MCP_DRV_VER_STR_SIZE / sizeof(u32))
+#define MCP_DRV_NVM_BUF_LEN 32
+struct drv_version_stc {
+       u32 version;
+       u8 name[MCP_DRV_VER_STR_SIZE - 4];
+};
+
+/* statistics for ncsi */
+struct lan_stats_stc {
+       u64 ucast_rx_pkts;
+       u64 ucast_tx_pkts;
+       u32 fcs_err;
+       u32 rserved;
+};
+
+struct ocbb_data_stc {
+       u32 ocbb_host_addr;
+       u32 ocsd_host_addr;
+       u32 ocsd_req_update_interval;
+};
+
+union drv_union_data {
+       u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD];        /* LOAD_REQ */
+       struct mcp_mac wol_mac; /* UNLOAD_DONE */
+
+       struct pmm_phy_cfg drv_phy_cfg;
+
+       struct mcp_val64 val64; /* For PHY / AVS commands */
+
+       u8 raw_data[MCP_DRV_NVM_BUF_LEN];
+
+       struct mcp_file_att file_att;
+
+       u32 ack_vf_disabled[VF_MAX_STATIC / 32];
+
+       struct drv_version_stc drv_version;
+
+       struct lan_stats_stc lan_stats;
+       u32 dpdk_rsvd[3];
+       struct ocbb_data_stc ocbb_info;
+
+       /* ... */
+};
+
+struct public_drv_mb {
+       u32 drv_mb_header;
+#define DRV_MSG_CODE_MASK                       0xffff0000
+#define DRV_MSG_CODE_LOAD_REQ                   0x10000000
+#define DRV_MSG_CODE_LOAD_DONE                  0x11000000
+#define DRV_MSG_CODE_INIT_HW                    0x12000000
+#define DRV_MSG_CODE_UNLOAD_REQ                        0x20000000
+#define DRV_MSG_CODE_UNLOAD_DONE                0x21000000
+#define DRV_MSG_CODE_INIT_PHY                  0x22000000
+       /* Params - FORCE - Reinitialize the link regardless of LFA */
+       /*        - DONT_CARE - Don't flap the link if up */
+#define DRV_MSG_CODE_LINK_RESET                        0x23000000
+
+       /* OneView feature driver HSI */
+#define DRV_MSG_CODE_OV_UPDATE_CURR_CFG                0x26000000
+#define DRV_MSG_CODE_OV_UPDATE_BUS_NUM         0x27000000
+#define DRV_MSG_CODE_OV_UPDATE_BOOT_PROGRESS   0x28000000
+#define DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER    0x29000000
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE    0x31000000
+#define DRV_MSG_CODE_BW_UPDATE_ACK             0x32000000
+#define DRV_MSG_CODE_OV_UPDATE_MTU             0x33000000
+
+#define DRV_MSG_CODE_NIG_DRAIN                 0x30000000
+
+#define DRV_MSG_CODE_INITIATE_FLR               0x02000000
+#define DRV_MSG_CODE_VF_DISABLED_DONE           0xc0000000
+#define DRV_MSG_CODE_CFG_VF_MSIX                0xc0010000
+#define DRV_MSG_CODE_NVM_PUT_FILE_BEGIN                0x00010000
+#define DRV_MSG_CODE_NVM_PUT_FILE_DATA         0x00020000
+#define DRV_MSG_CODE_NVM_GET_FILE_ATT          0x00030000
+#define DRV_MSG_CODE_NVM_READ_NVRAM            0x00050000
+#define DRV_MSG_CODE_NVM_WRITE_NVRAM           0x00060000
+#define DRV_MSG_CODE_NVM_DEL_FILE              0x00080000
+#define DRV_MSG_CODE_MCP_RESET                 0x00090000
+#define DRV_MSG_CODE_SET_SECURE_MODE           0x000a0000
+#define DRV_MSG_CODE_PHY_RAW_READ              0x000b0000
+#define DRV_MSG_CODE_PHY_RAW_WRITE             0x000c0000
+#define DRV_MSG_CODE_PHY_CORE_READ             0x000d0000
+#define DRV_MSG_CODE_PHY_CORE_WRITE            0x000e0000
+#define DRV_MSG_CODE_SET_VERSION               0x000f0000
+#define DRV_MSG_CODE_MCP_HALT                  0x00100000
+#define DRV_MSG_CODE_PMD_DIAG_DUMP             0x00140000
+#define DRV_MSG_CODE_PMD_DIAG_EYE              0x00150000
+#define DRV_MSG_CODE_TRANSCEIVER_READ          0x00160000
+#define DRV_MSG_CODE_TRANSCEIVER_WRITE         0x00170000
+
+#define DRV_MSG_CODE_SET_VMAC                   0x00110000
+#define DRV_MSG_CODE_GET_VMAC                   0x00120000
+#define DRV_MSG_CODE_VMAC_TYPE_MAC              1
+#define DRV_MSG_CODE_VMAC_TYPE_WWNN             2
+#define DRV_MSG_CODE_VMAC_TYPE_WWPN             3
+
+#define DRV_MSG_CODE_GET_STATS                  0x00130000
+#define DRV_MSG_CODE_STATS_TYPE_LAN             1
+
+#define DRV_MSG_CODE_OCBB_DATA                 0x00180000
+#define DRV_MSG_CODE_SET_BW                    0x00190000
+#define DRV_MSG_CODE_MASK_PARITIES             0x001a0000
+#define DRV_MSG_CODE_INDUCE_FAILURE            0x001b0000
+#define DRV_MSG_FAN_FAILURE_TYPE               (1 << 0)
+#define DRV_MSG_TEMPERATURE_FAILURE_TYPE       (1 << 1)
+
+#define DRV_MSG_CODE_GPIO_READ                 0x001c0000
+#define DRV_MSG_CODE_GPIO_WRITE                        0x001d0000
+
+#define DRV_MSG_CODE_SET_LED_MODE              0x00200000
+#define DRV_MSG_CODE_EMPTY_MB                  0x00220000
+
+#define DRV_MSG_SEQ_NUMBER_MASK                 0x0000ffff
+
+       u32 drv_mb_param;
+       /* UNLOAD_REQ params */
+#define DRV_MB_PARAM_UNLOAD_WOL_UNKNOWN         0x00000000
+#define DRV_MB_PARAM_UNLOAD_WOL_MCP            0x00000001
+#define DRV_MB_PARAM_UNLOAD_WOL_DISABLED        0x00000002
+#define DRV_MB_PARAM_UNLOAD_WOL_ENABLED         0x00000003
+
+       /* UNLOAD_DONE_params */
+#define DRV_MB_PARAM_UNLOAD_NON_D3_POWER        0x00000001
+
+       /* INIT_PHY params */
+#define DRV_MB_PARAM_INIT_PHY_FORCE            0x00000001
+#define DRV_MB_PARAM_INIT_PHY_DONT_CARE                0x00000002
+
+#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_MASK  0x000000FF
+#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_SHIFT 0
+
+#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MFW    0x1
+#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_IMAGE  0x2
+
+#define DRV_MB_PARAM_NVM_OFFSET_SHIFT          0
+#define DRV_MB_PARAM_NVM_OFFSET_MASK           0x00FFFFFF
+#define DRV_MB_PARAM_NVM_LEN_SHIFT             24
+#define DRV_MB_PARAM_NVM_LEN_MASK              0xFF000000
+
+#define DRV_MB_PARAM_PHY_ADDR_SHIFT            0
+#define DRV_MB_PARAM_PHY_ADDR_MASK             0x1FF0FFFF
+#define DRV_MB_PARAM_PHY_LANE_SHIFT            16
+#define DRV_MB_PARAM_PHY_LANE_MASK             0x000F0000
+#define DRV_MB_PARAM_PHY_SELECT_PORT_SHIFT     29
+#define DRV_MB_PARAM_PHY_SELECT_PORT_MASK      0x20000000
+#define DRV_MB_PARAM_PHY_PORT_SHIFT            30
+#define DRV_MB_PARAM_PHY_PORT_MASK             0xc0000000
+
+#define DRV_MB_PARAM_PHYMOD_LANE_SHIFT         0
+#define DRV_MB_PARAM_PHYMOD_LANE_MASK          0x000000FF
+#define DRV_MB_PARAM_PHYMOD_SIZE_SHIFT         8
+#define DRV_MB_PARAM_PHYMOD_SIZE_MASK          0x000FFF00
+       /* configure vf MSIX params */
+#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT   0
+#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK    0x000000FF
+#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT  8
+#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK   0x0000FF00
+
+       /* OneView configuration parametres */
+#define DRV_MB_PARAM_OV_CURR_CFG_SHIFT         0
+#define DRV_MB_PARAM_OV_CURR_CFG_MASK          0x0000000F
+#define DRV_MB_PARAM_OV_CURR_CFG_NONE          0
+#define DRV_MB_PARAM_OV_CURR_CFG_OS                    1
+#define DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC   2
+#define DRV_MB_PARAM_OV_CURR_CFG_OTHER         3
+#define DRV_MB_PARAM_OV_CURR_CFG_VC_CLP                4
+#define DRV_MB_PARAM_OV_CURR_CFG_CNU           5
+#define DRV_MB_PARAM_OV_CURR_CFG_DCI           6
+#define DRV_MB_PARAM_OV_CURR_CFG_HII           7
+
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_SHIFT                 0
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_MASK                  0x000000FF
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_NONE                  (1 << 0)
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_TRARGET_FOUND                 (1 << 2)
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_LOGGED_INTO_TGT               (1 << 4)
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_IMG_DOWNLOADED                        (1 << 5)
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_OS_HANDOFF                    (1 << 6)
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_COMPLETED                          0
+
+#define DRV_MB_PARAM_OV_PCI_BUS_NUM_SHIFT              0
+#define DRV_MB_PARAM_OV_PCI_BUS_NUM_MASK               0x000000FF
+
+#define DRV_MB_PARAM_OV_STORM_FW_VER_SHIFT             0
+#define DRV_MB_PARAM_OV_STORM_FW_VER_MASK                      0xFFFFFFFF
+#define DRV_MB_PARAM_OV_STORM_FW_VER_MAJOR_MASK                0xFF000000
+#define DRV_MB_PARAM_OV_STORM_FW_VER_MINOR_MASK                0x00FF0000
+#define DRV_MB_PARAM_OV_STORM_FW_VER_BUILD_MASK                0x0000FF00
+#define DRV_MB_PARAM_OV_STORM_FW_VER_DROP_MASK         0x000000FF
+
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_SHIFT              0
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_MASK               0xF
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_UNKNOWN            0x1
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED 0x2
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_LOADING            0x3
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED   0x4
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE             0x5
+
+#define DRV_MB_PARAM_OV_MTU_SIZE_SHIFT         0
+#define DRV_MB_PARAM_OV_MTU_SIZE_MASK          0xFFFFFFFF
+
+#define DRV_MB_PARAM_SET_LED_MODE_OPER         0x0
+#define DRV_MB_PARAM_SET_LED_MODE_ON           0x1
+#define DRV_MB_PARAM_SET_LED_MODE_OFF          0x2
+
+#define DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT            0
+#define DRV_MB_PARAM_TRANSCEIVER_PORT_MASK             0x00000003
+#define DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT            2
+#define DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK             0x000000FC
+#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT     8
+#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK      0x0000FF00
+#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT          16
+#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK           0xFFFF0000
+
+#define DRV_MB_PARAM_GPIO_NUMBER_SHIFT         0
+#define DRV_MB_PARAM_GPIO_NUMBER_MASK          0x0000FFFF
+#define DRV_MB_PARAM_GPIO_VALUE_SHIFT          16
+#define DRV_MB_PARAM_GPIO_VALUE_MASK           0xFFFF0000
+
+       u32 fw_mb_header;
+#define FW_MSG_CODE_MASK                        0xffff0000
+#define FW_MSG_CODE_DRV_LOAD_ENGINE            0x10100000
+#define FW_MSG_CODE_DRV_LOAD_PORT               0x10110000
+#define FW_MSG_CODE_DRV_LOAD_FUNCTION           0x10120000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA        0x10200000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI        0x10210000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG       0x10220000
+#define FW_MSG_CODE_DRV_LOAD_DONE               0x11100000
+#define FW_MSG_CODE_DRV_UNLOAD_ENGINE           0x20110000
+#define FW_MSG_CODE_DRV_UNLOAD_PORT             0x20120000
+#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION         0x20130000
+#define FW_MSG_CODE_DRV_UNLOAD_DONE             0x21100000
+#define FW_MSG_CODE_INIT_PHY_DONE              0x21200000
+#define FW_MSG_CODE_INIT_PHY_ERR_INVALID_ARGS  0x21300000
+#define FW_MSG_CODE_LINK_RESET_DONE            0x23000000
+#define FW_MSG_CODE_UPDATE_CURR_CFG_DONE        0x26000000
+#define FW_MSG_CODE_UPDATE_BUS_NUM_DONE         0x27000000
+#define FW_MSG_CODE_UPDATE_BOOT_PROGRESS_DONE   0x28000000
+#define FW_MSG_CODE_UPDATE_STORM_FW_VER_DONE    0x29000000
+#define FW_MSG_CODE_UPDATE_DRIVER_STATE_DONE    0x31000000
+#define FW_MSG_CODE_DRV_MSG_CODE_BW_UPDATE_DONE 0x32000000
+#define FW_MSG_CODE_DRV_MSG_CODE_MTU_SIZE_DONE  0x33000000
+#define FW_MSG_CODE_NIG_DRAIN_DONE              0x30000000
+#define FW_MSG_CODE_VF_DISABLED_DONE            0xb0000000
+#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE        0xb0010000
+#define FW_MSG_CODE_FLR_ACK                     0x02000000
+#define FW_MSG_CODE_FLR_NACK                    0x02100000
+#define FW_MSG_CODE_SET_DRIVER_DONE            0x02200000
+#define FW_MSG_CODE_SET_VMAC_SUCCESS            0x02300000
+#define FW_MSG_CODE_SET_VMAC_FAIL               0x02400000
+
+#define FW_MSG_CODE_NVM_OK                     0x00010000
+#define FW_MSG_CODE_NVM_INVALID_MODE           0x00020000
+#define FW_MSG_CODE_NVM_PREV_CMD_WAS_NOT_FINISHED      0x00030000
+#define FW_MSG_CODE_NVM_FAILED_TO_ALLOCATE_PAGE        0x00040000
+#define FW_MSG_CODE_NVM_INVALID_DIR_FOUND      0x00050000
+#define FW_MSG_CODE_NVM_PAGE_NOT_FOUND         0x00060000
+#define FW_MSG_CODE_NVM_FAILED_PARSING_BNDLE_HEADER 0x00070000
+#define FW_MSG_CODE_NVM_FAILED_PARSING_IMAGE_HEADER 0x00080000
+#define FW_MSG_CODE_NVM_PARSING_OUT_OF_SYNC    0x00090000
+#define FW_MSG_CODE_NVM_FAILED_UPDATING_DIR    0x000a0000
+#define FW_MSG_CODE_NVM_FAILED_TO_FREE_PAGE    0x000b0000
+#define FW_MSG_CODE_NVM_FILE_NOT_FOUND         0x000c0000
+#define FW_MSG_CODE_NVM_OPERATION_FAILED       0x000d0000
+#define FW_MSG_CODE_NVM_FAILED_UNALIGNED       0x000e0000
+#define FW_MSG_CODE_NVM_BAD_OFFSET             0x000f0000
+#define FW_MSG_CODE_NVM_BAD_SIGNATURE          0x00100000
+#define FW_MSG_CODE_NVM_FILE_READ_ONLY         0x00200000
+#define FW_MSG_CODE_NVM_UNKNOWN_FILE           0x00300000
+#define FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK     0x00400000
+#define FW_MSG_CODE_MCP_RESET_REJECT           0x00600000
+#define FW_MSG_CODE_PHY_OK                     0x00110000
+#define FW_MSG_CODE_PHY_ERROR                  0x00120000
+#define FW_MSG_CODE_SET_SECURE_MODE_ERROR      0x00130000
+#define FW_MSG_CODE_SET_SECURE_MODE_OK         0x00140000
+#define FW_MSG_MODE_PHY_PRIVILEGE_ERROR                0x00150000
+#define FW_MSG_CODE_OK                         0x00160000
+#define FW_MSG_CODE_LED_MODE_INVALID           0x00170000
+#define FW_MSG_CODE_PHY_DIAG_OK           0x00160000
+#define FW_MSG_CODE_PHY_DIAG_ERROR        0x00170000
+#define FW_MSG_CODE_INIT_HW_FAILED_TO_ALLOCATE_PAGE    0x00040000
+#define FW_MSG_CODE_INIT_HW_FAILED_BAD_STATE    0x00170000
+#define FW_MSG_CODE_INIT_HW_FAILED_TO_SET_WINDOW 0x000d0000
+#define FW_MSG_CODE_INIT_HW_FAILED_NO_IMAGE    0x000c0000
+#define FW_MSG_CODE_INIT_HW_FAILED_VERSION_MISMATCH    0x00100000
+#define FW_MSG_CODE_TRANSCEIVER_DIAG_OK           0x00160000
+#define FW_MSG_CODE_TRANSCEIVER_DIAG_ERROR        0x00170000
+#define FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT            0x00020000
+#define FW_MSG_CODE_TRANSCEIVER_BAD_BUFFER_SIZE                0x000f0000
+#define FW_MSG_CODE_GPIO_OK           0x00160000
+#define FW_MSG_CODE_GPIO_DIRECTION_ERR        0x00170000
+#define FW_MSG_CODE_GPIO_CTRL_ERR              0x00020000
+#define FW_MSG_CODE_GPIO_INVALID               0x000f0000
+#define FW_MSG_CODE_GPIO_INVALID_VALUE 0x00050000
+
+#define FW_MSG_SEQ_NUMBER_MASK                  0x0000ffff
+
+       u32 fw_mb_param;
+
+       u32 drv_pulse_mb;
+#define DRV_PULSE_SEQ_MASK                      0x00007fff
+#define DRV_PULSE_SYSTEM_TIME_MASK              0xffff0000
+       /*
+        * The system time is in the format of
+        * (year-2001)*12*32 + month*32 + day.
+        */
+#define DRV_PULSE_ALWAYS_ALIVE                  0x00008000
+       /*
+        * Indicate to the firmware not to go into the
+        * OS-absent when it is not getting driver pulse.
+        * This is used for debugging as well for PXE(MBA).
+        */
+
+       u32 mcp_pulse_mb;
+#define MCP_PULSE_SEQ_MASK                      0x00007fff
+#define MCP_PULSE_ALWAYS_ALIVE                  0x00008000
+       /* Indicates to the driver not to assert due to lack
+        * of MCP response
+        */
+#define MCP_EVENT_MASK                          0xffff0000
+#define MCP_EVENT_OTHER_DRIVER_RESET_REQ        0x00010000
+
+       union drv_union_data union_data;
+};
+
+/* MFW - DRV MB */
+/**********************************************************************
+ * Description
+ *   Incremental Aggregative
+ *   8-bit MFW counter per message
+ *   8-bit ack-counter per message
+ * Capabilities
+ *   Provides up to 256 aggregative message per type
+ *   Provides 4 message types in dword
+ *   Message type pointers to byte offset
+ *   Backward Compatibility by using sizeof for the counters.
+ *   No lock requires for 32bit messages
+ * Limitations:
+ * In case of messages greater than 32bit, a dedicated mechanism(e.g lock)
+ * is required to prevent data corruption.
+ **********************************************************************/
+enum MFW_DRV_MSG_TYPE {
+       MFW_DRV_MSG_LINK_CHANGE,
+       MFW_DRV_MSG_FLR_FW_ACK_FAILED,
+       MFW_DRV_MSG_VF_DISABLED,
+       MFW_DRV_MSG_ERROR_RECOVERY,
+       MFW_DRV_MSG_BW_UPDATE,
+       MFW_DRV_MSG_S_TAG_UPDATE,
+       MFW_DRV_MSG_GET_LAN_STATS,
+       MFW_DRV_MSG_GET_FCOE_STATS,
+       MFW_DRV_MSG_GET_ISCSI_STATS,
+       MFW_DRV_MSG_GET_RDMA_STATS,
+       MFW_DRV_MSG_FAILURE_DETECTED,
+       MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
+       MFW_DRV_MSG_MAX
+};
+
+#define MFW_DRV_MSG_MAX_DWORDS(msgs)   (((msgs - 1) >> 2) + 1)
+#define MFW_DRV_MSG_DWORD(msg_id)      (msg_id >> 2)
+#define MFW_DRV_MSG_OFFSET(msg_id)     ((msg_id & 0x3) << 3)
+#define MFW_DRV_MSG_MASK(msg_id)       (0xff << MFW_DRV_MSG_OFFSET(msg_id))
+
+#ifdef BIG_ENDIAN              /* Like MFW */
+#define DRV_ACK_MSG(msg_p, msg_id) \
+((u8)((u8 *)msg_p)[msg_id]++;)
+#else
+#define DRV_ACK_MSG(msg_p, msg_id) \
+((u8)((u8 *)msg_p)[((msg_id & ~3) | ((~msg_id) & 3))]++;)
+#endif
+
+#define MFW_DRV_UPDATE(shmem_func, msg_id) \
+((u8)((u8 *)(MFW_MB_P(shmem_func)->msg))[msg_id]++;)
+
+struct public_mfw_mb {
+       u32 sup_msgs;           /* Assigend with MFW_DRV_MSG_MAX */
+       u32 msg[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
+       u32 ack[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
+};
+
+/**************************************/
+/*                                    */
+/*     P U B L I C       D A T A      */
+/*                                    */
+/**************************************/
+enum public_sections {
+       PUBLIC_DRV_MB,          /* Points to the first drv_mb of path0 */
+       PUBLIC_MFW_MB,          /* Points to the first mfw_mb of path0 */
+       PUBLIC_GLOBAL,
+       PUBLIC_PATH,
+       PUBLIC_PORT,
+       PUBLIC_FUNC,
+       PUBLIC_MAX_SECTIONS
+};
+
+struct drv_ver_info_stc {
+       u32 ver;
+       u8 name[32];
+};
+
+/* Runtime data needs about 1/2K. We use 2K to be on the safe side.
+ * Please make sure data does not exceed this size.
+ */
+#define NUM_RUNTIME_DWORDS 16
+struct drv_init_hw_stc {
+       u32 init_hw_bitmask[NUM_RUNTIME_DWORDS];
+       u32 init_hw_data[NUM_RUNTIME_DWORDS * 32];
+};
+
+struct mcp_public_data {
+       /* The sections fields is an array */
+       u32 num_sections;
+       offsize_t sections[PUBLIC_MAX_SECTIONS];
+       struct public_drv_mb drv_mb[MCP_GLOB_FUNC_MAX];
+       struct public_mfw_mb mfw_mb[MCP_GLOB_FUNC_MAX];
+       struct public_global global;
+       struct public_path path[MCP_GLOB_PATH_MAX];
+       struct public_port port[MCP_GLOB_PORT_MAX];
+       struct public_func func[MCP_GLOB_FUNC_MAX];
+};
+
+#define I2C_TRANSCEIVER_ADDR   0xa0
+#define MAX_I2C_TRANSACTION_SIZE       16
+#define MAX_I2C_TRANSCEIVER_PAGE_SIZE  256
+
+#endif /* MCP_PUBLIC_H */
diff --git a/drivers/net/qede/base/nvm_cfg.h b/drivers/net/qede/base/nvm_cfg.h
new file mode 100644 (file)
index 0000000..907994b
--- /dev/null
@@ -0,0 +1,913 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+/****************************************************************************
+ *
+ * Name:        nvm_cfg.h
+ *
+ * Description: NVM config file - Generated file from nvm cfg excel.
+ *              DO NOT MODIFY !!!
+ *
+ * Created:     1/14/2016
+ *
+ ****************************************************************************/
+
+#ifndef NVM_CFG_H
+#define NVM_CFG_H
+
+struct nvm_cfg_mac_address {
+       u32 mac_addr_hi;
+#define NVM_CFG_MAC_ADDRESS_HI_MASK                             0x0000FFFF
+#define NVM_CFG_MAC_ADDRESS_HI_OFFSET                           0
+       u32 mac_addr_lo;
+};
+
+/******************************************
+ * nvm_cfg1 structs
+ ******************************************/
+struct nvm_cfg1_glob {
+       u32 generic_cont0;      /* 0x0 */
+#define NVM_CFG1_GLOB_BOARD_SWAP_MASK                           0x0000000F
+#define NVM_CFG1_GLOB_BOARD_SWAP_OFFSET                         0
+#define NVM_CFG1_GLOB_BOARD_SWAP_NONE                           0x0
+#define NVM_CFG1_GLOB_BOARD_SWAP_PATH                           0x1
+#define NVM_CFG1_GLOB_BOARD_SWAP_PORT                           0x2
+#define NVM_CFG1_GLOB_BOARD_SWAP_BOTH                           0x3
+#define NVM_CFG1_GLOB_MF_MODE_MASK                              0x00000FF0
+#define NVM_CFG1_GLOB_MF_MODE_OFFSET                            4
+#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED                        0x0
+#define NVM_CFG1_GLOB_MF_MODE_DEFAULT                           0x1
+#define NVM_CFG1_GLOB_MF_MODE_SPIO4                             0x2
+#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0                           0x3
+#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5                           0x4
+#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0                           0x5
+#define NVM_CFG1_GLOB_MF_MODE_BD                                0x6
+#define NVM_CFG1_GLOB_MF_MODE_UFP                               0x7
+#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_MASK              0x00001000
+#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_OFFSET            12
+#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_DISABLED          0x0
+#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_ENABLED           0x1
+#define NVM_CFG1_GLOB_AVS_MARGIN_LOW_MASK                       0x001FE000
+#define NVM_CFG1_GLOB_AVS_MARGIN_LOW_OFFSET                     13
+#define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_MASK                      0x1FE00000
+#define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_OFFSET                    21
+#define NVM_CFG1_GLOB_ENABLE_SRIOV_MASK                         0x20000000
+#define NVM_CFG1_GLOB_ENABLE_SRIOV_OFFSET                       29
+#define NVM_CFG1_GLOB_ENABLE_SRIOV_DISABLED                     0x0
+#define NVM_CFG1_GLOB_ENABLE_SRIOV_ENABLED                      0x1
+#define NVM_CFG1_GLOB_ENABLE_ATC_MASK                           0x40000000
+#define NVM_CFG1_GLOB_ENABLE_ATC_OFFSET                         30
+#define NVM_CFG1_GLOB_ENABLE_ATC_DISABLED                       0x0
+#define NVM_CFG1_GLOB_ENABLE_ATC_ENABLED                        0x1
+#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_MASK                       0x80000000
+#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_OFFSET                     31
+#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_DISABLED                   0x0
+#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_ENABLED                    0x1
+       u32 engineering_change[3];      /* 0x4 */
+       u32 manufacturing_id;   /* 0x10 */
+       u32 serial_number[4];   /* 0x14 */
+       u32 pcie_cfg;           /* 0x24 */
+#define NVM_CFG1_GLOB_PCI_GEN_MASK                              0x00000003
+#define NVM_CFG1_GLOB_PCI_GEN_OFFSET                            0
+#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN1                          0x0
+#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN2                          0x1
+#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN3                          0x2
+#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_MASK                   0x00000004
+#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_OFFSET                 2
+#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_DISABLED               0x0
+#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_ENABLED                0x1
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_MASK                         0x00000018
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_OFFSET                       3
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_L1_ENABLED               0x0
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_L1_DISABLED                  0x2
+#define NVM_CFG1_GLOB_RESERVED_MPREVENT_PCIE_L1_MENTRY_MASK     0x00000020
+#define NVM_CFG1_GLOB_RESERVED_MPREVENT_PCIE_L1_MENTRY_OFFSET   5
+#define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_MASK                 0x000003C0
+#define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_OFFSET               6
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_MASK                     0x00001C00
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_OFFSET                   10
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_HW                       0x0
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_0DB                      0x1
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_3_5DB                    0x2
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_6_0DB                    0x3
+#define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_MASK                     0x001FE000
+#define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_OFFSET                   13
+#define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_MASK                     0x1FE00000
+#define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_OFFSET                   21
+#define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_MASK                      0x60000000
+#define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_OFFSET                    29
+       /* Set the duration, in seconds, fan failure signal should be
+        * sampled
+        */
+#define NVM_CFG1_GLOB_RESERVED_FAN_FAILURE_DURATION_MASK        0x80000000
+#define NVM_CFG1_GLOB_RESERVED_FAN_FAILURE_DURATION_OFFSET      31
+       u32 mgmt_traffic;       /* 0x28 */
+#define NVM_CFG1_GLOB_RESERVED60_MASK                           0x00000001
+#define NVM_CFG1_GLOB_RESERVED60_OFFSET                         0
+#define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_MASK                     0x000001FE
+#define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_OFFSET                   1
+#define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_MASK                     0x0001FE00
+#define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_OFFSET                   9
+#define NVM_CFG1_GLOB_SMBUS_ADDRESS_MASK                        0x01FE0000
+#define NVM_CFG1_GLOB_SMBUS_ADDRESS_OFFSET                      17
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_MASK                        0x06000000
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_OFFSET                      25
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_DISABLED                    0x0
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_RMII                        0x1
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_SGMII                       0x2
+#define NVM_CFG1_GLOB_AUX_MODE_MASK                             0x78000000
+#define NVM_CFG1_GLOB_AUX_MODE_OFFSET                           27
+#define NVM_CFG1_GLOB_AUX_MODE_DEFAULT                          0x0
+#define NVM_CFG1_GLOB_AUX_MODE_SMBUS_ONLY                       0x1
+       /*  Indicates whether external thermal sonsor is available */
+#define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_MASK              0x80000000
+#define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_OFFSET            31
+#define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_DISABLED          0x0
+#define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_ENABLED           0x1
+       u32 core_cfg;           /* 0x2C */
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK                    0x000000FF
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET                  0
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G                0x0
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G                0x1
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G               0x2
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F              0x3
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E              0x4
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G                0x5
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G                0xB
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G                0xC
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G                0xD
+#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_MASK             0x00000100
+#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_OFFSET           8
+#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_DISABLED         0x0
+#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_ENABLED          0x1
+#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_MASK            0x00000200
+#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_OFFSET          9
+#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_DISABLED        0x0
+#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_ENABLED         0x1
+#define NVM_CFG1_GLOB_EAGLE_CORE_ADDR_MASK                      0x0003FC00
+#define NVM_CFG1_GLOB_EAGLE_CORE_ADDR_OFFSET                    10
+#define NVM_CFG1_GLOB_FALCON_CORE_ADDR_MASK                     0x03FC0000
+#define NVM_CFG1_GLOB_FALCON_CORE_ADDR_OFFSET                   18
+#define NVM_CFG1_GLOB_AVS_MODE_MASK                             0x1C000000
+#define NVM_CFG1_GLOB_AVS_MODE_OFFSET                           26
+#define NVM_CFG1_GLOB_AVS_MODE_CLOSE_LOOP                       0x0
+#define NVM_CFG1_GLOB_AVS_MODE_OPEN_LOOP_CFG                    0x1
+#define NVM_CFG1_GLOB_AVS_MODE_OPEN_LOOP_OTP                    0x2
+#define NVM_CFG1_GLOB_AVS_MODE_DISABLED                         0x3
+#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_MASK                 0x60000000
+#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_OFFSET               29
+#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_DISABLED             0x0
+#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_ENABLED              0x1
+       u32 e_lane_cfg1;        /* 0x30 */
+#define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK                        0x0000000F
+#define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET                      0
+#define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK                        0x000000F0
+#define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET                      4
+#define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK                        0x00000F00
+#define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET                      8
+#define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK                        0x0000F000
+#define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET                      12
+#define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK                        0x000F0000
+#define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET                      16
+#define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK                        0x00F00000
+#define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET                      20
+#define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK                        0x0F000000
+#define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET                      24
+#define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK                        0xF0000000
+#define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET                      28
+       u32 e_lane_cfg2;        /* 0x34 */
+#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK                    0x00000001
+#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET                  0
+#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK                    0x00000002
+#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET                  1
+#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK                    0x00000004
+#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET                  2
+#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK                    0x00000008
+#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET                  3
+#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK                    0x00000010
+#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET                  4
+#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK                    0x00000020
+#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET                  5
+#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK                    0x00000040
+#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET                  6
+#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK                    0x00000080
+#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET                  7
+#define NVM_CFG1_GLOB_SMBUS_MODE_MASK                           0x00000F00
+#define NVM_CFG1_GLOB_SMBUS_MODE_OFFSET                         8
+#define NVM_CFG1_GLOB_SMBUS_MODE_DISABLED                       0x0
+#define NVM_CFG1_GLOB_SMBUS_MODE_100KHZ                         0x1
+#define NVM_CFG1_GLOB_SMBUS_MODE_400KHZ                         0x2
+#define NVM_CFG1_GLOB_NCSI_MASK                                 0x0000F000
+#define NVM_CFG1_GLOB_NCSI_OFFSET                               12
+#define NVM_CFG1_GLOB_NCSI_DISABLED                             0x0
+#define NVM_CFG1_GLOB_NCSI_ENABLED                              0x1
+       /*  Maximum advertised pcie link width */
+#define NVM_CFG1_GLOB_MAX_LINK_WIDTH_MASK                       0x000F0000
+#define NVM_CFG1_GLOB_MAX_LINK_WIDTH_OFFSET                     16
+#define NVM_CFG1_GLOB_MAX_LINK_WIDTH_16_LANES                   0x0
+#define NVM_CFG1_GLOB_MAX_LINK_WIDTH_1_LANE                     0x1
+#define NVM_CFG1_GLOB_MAX_LINK_WIDTH_2_LANES                    0x2
+#define NVM_CFG1_GLOB_MAX_LINK_WIDTH_4_LANES                    0x3
+#define NVM_CFG1_GLOB_MAX_LINK_WIDTH_8_LANES                    0x4
+       /*  ASPM L1 mode */
+#define NVM_CFG1_GLOB_ASPM_L1_MODE_MASK                         0x00300000
+#define NVM_CFG1_GLOB_ASPM_L1_MODE_OFFSET                       20
+#define NVM_CFG1_GLOB_ASPM_L1_MODE_FORCED                       0x0
+#define NVM_CFG1_GLOB_ASPM_L1_MODE_DYNAMIC_LOW_LATENCY          0x1
+#define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_MASK                  0x01C00000
+#define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_OFFSET                22
+#define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_DISABLED              0x0
+#define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_INT_EXT_I2C           0x1
+#define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_INT_ONLY              0x2
+#define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_INT_EXT_SMBUS         0x3
+#define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_MASK          0x06000000
+#define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_OFFSET        25
+#define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_DISABLE       0x0
+#define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_INTERNAL      0x1
+#define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_EXTERNAL      0x2
+#define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_BOTH          0x3
+       /*  Set the PLDM sensor modes */
+#define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_MASK                     0x38000000
+#define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_OFFSET                   27
+#define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_INTERNAL                 0x0
+#define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_EXTERNAL                 0x1
+#define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_BOTH                     0x2
+       u32 f_lane_cfg1;        /* 0x38 */
+#define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK                        0x0000000F
+#define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET                      0
+#define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK                        0x000000F0
+#define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET                      4
+#define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK                        0x00000F00
+#define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET                      8
+#define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK                        0x0000F000
+#define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET                      12
+#define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK                        0x000F0000
+#define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET                      16
+#define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK                        0x00F00000
+#define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET                      20
+#define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK                        0x0F000000
+#define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET                      24
+#define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK                        0xF0000000
+#define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET                      28
+       u32 f_lane_cfg2;        /* 0x3C */
+#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK                    0x00000001
+#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET                  0
+#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK                    0x00000002
+#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET                  1
+#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK                    0x00000004
+#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET                  2
+#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK                    0x00000008
+#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET                  3
+#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK                    0x00000010
+#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET                  4
+#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK                    0x00000020
+#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET                  5
+#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK                    0x00000040
+#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET                  6
+#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK                    0x00000080
+#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET                  7
+       /*  Control the period between two successive checks */
+#define NVM_CFG1_GLOB_TEMPERATURE_PERIOD_BETWEEN_CHECKS_MASK    0x0000FF00
+#define NVM_CFG1_GLOB_TEMPERATURE_PERIOD_BETWEEN_CHECKS_OFFSET  8
+       /*  Set shutdown temperature */
+#define NVM_CFG1_GLOB_SHUTDOWN_THRESHOLD_TEMPERATURE_MASK       0x00FF0000
+#define NVM_CFG1_GLOB_SHUTDOWN_THRESHOLD_TEMPERATURE_OFFSET     16
+       /*  Set max. count for over operational temperature */
+#define NVM_CFG1_GLOB_MAX_COUNT_OPER_THRESHOLD_MASK             0xFF000000
+#define NVM_CFG1_GLOB_MAX_COUNT_OPER_THRESHOLD_OFFSET           24
+       u32 eagle_preemphasis;  /* 0x40 */
+#define NVM_CFG1_GLOB_LANE0_PREEMP_MASK                         0x000000FF
+#define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET                       0
+#define NVM_CFG1_GLOB_LANE1_PREEMP_MASK                         0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET                       8
+#define NVM_CFG1_GLOB_LANE2_PREEMP_MASK                         0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET                       16
+#define NVM_CFG1_GLOB_LANE3_PREEMP_MASK                         0xFF000000
+#define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET                       24
+       u32 eagle_driver_current;       /* 0x44 */
+#define NVM_CFG1_GLOB_LANE0_AMP_MASK                            0x000000FF
+#define NVM_CFG1_GLOB_LANE0_AMP_OFFSET                          0
+#define NVM_CFG1_GLOB_LANE1_AMP_MASK                            0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_AMP_OFFSET                          8
+#define NVM_CFG1_GLOB_LANE2_AMP_MASK                            0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_AMP_OFFSET                          16
+#define NVM_CFG1_GLOB_LANE3_AMP_MASK                            0xFF000000
+#define NVM_CFG1_GLOB_LANE3_AMP_OFFSET                          24
+       u32 falcon_preemphasis; /* 0x48 */
+#define NVM_CFG1_GLOB_LANE0_PREEMP_MASK                         0x000000FF
+#define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET                       0
+#define NVM_CFG1_GLOB_LANE1_PREEMP_MASK                         0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET                       8
+#define NVM_CFG1_GLOB_LANE2_PREEMP_MASK                         0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET                       16
+#define NVM_CFG1_GLOB_LANE3_PREEMP_MASK                         0xFF000000
+#define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET                       24
+       u32 falcon_driver_current;      /* 0x4C */
+#define NVM_CFG1_GLOB_LANE0_AMP_MASK                            0x000000FF
+#define NVM_CFG1_GLOB_LANE0_AMP_OFFSET                          0
+#define NVM_CFG1_GLOB_LANE1_AMP_MASK                            0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_AMP_OFFSET                          8
+#define NVM_CFG1_GLOB_LANE2_AMP_MASK                            0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_AMP_OFFSET                          16
+#define NVM_CFG1_GLOB_LANE3_AMP_MASK                            0xFF000000
+#define NVM_CFG1_GLOB_LANE3_AMP_OFFSET                          24
+       u32 pci_id;             /* 0x50 */
+#define NVM_CFG1_GLOB_VENDOR_ID_MASK                            0x0000FFFF
+#define NVM_CFG1_GLOB_VENDOR_ID_OFFSET                          0
+       /*  Set caution temperature */
+#define NVM_CFG1_GLOB_CAUTION_THRESHOLD_TEMPERATURE_MASK        0x00FF0000
+#define NVM_CFG1_GLOB_CAUTION_THRESHOLD_TEMPERATURE_OFFSET      16
+       /*  Set external thermal sensor I2C address */
+#define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_ADDRESS_MASK      0xFF000000
+#define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_ADDRESS_OFFSET    24
+       u32 pci_subsys_id;      /* 0x54 */
+#define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_MASK                  0x0000FFFF
+#define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_OFFSET                0
+#define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_MASK                  0xFFFF0000
+#define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_OFFSET                16
+       u32 bar;                /* 0x58 */
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_MASK                   0x0000000F
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_OFFSET                 0
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_DISABLED               0x0
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2K                     0x1
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4K                     0x2
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8K                     0x3
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16K                    0x4
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32K                    0x5
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_64K                    0x6
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_128K                   0x7
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_256K                   0x8
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_512K                   0x9
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_1M                     0xA
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2M                     0xB
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4M                     0xC
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8M                     0xD
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16M                    0xE
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32M                    0xF
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_MASK                     0x000000F0
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_OFFSET                   4
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_DISABLED                 0x0
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4K                       0x1
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8K                       0x2
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16K                      0x3
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32K                      0x4
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64K                      0x5
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_128K                     0x6
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_256K                     0x7
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_512K                     0x8
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_1M                       0x9
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_2M                       0xA
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4M                       0xB
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8M                       0xC
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16M                      0xD
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32M                      0xE
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64M                      0xF
+#define NVM_CFG1_GLOB_BAR2_SIZE_MASK                            0x00000F00
+#define NVM_CFG1_GLOB_BAR2_SIZE_OFFSET                          8
+#define NVM_CFG1_GLOB_BAR2_SIZE_DISABLED                        0x0
+#define NVM_CFG1_GLOB_BAR2_SIZE_64K                             0x1
+#define NVM_CFG1_GLOB_BAR2_SIZE_128K                            0x2
+#define NVM_CFG1_GLOB_BAR2_SIZE_256K                            0x3
+#define NVM_CFG1_GLOB_BAR2_SIZE_512K                            0x4
+#define NVM_CFG1_GLOB_BAR2_SIZE_1M                              0x5
+#define NVM_CFG1_GLOB_BAR2_SIZE_2M                              0x6
+#define NVM_CFG1_GLOB_BAR2_SIZE_4M                              0x7
+#define NVM_CFG1_GLOB_BAR2_SIZE_8M                              0x8
+#define NVM_CFG1_GLOB_BAR2_SIZE_16M                             0x9
+#define NVM_CFG1_GLOB_BAR2_SIZE_32M                             0xA
+#define NVM_CFG1_GLOB_BAR2_SIZE_64M                             0xB
+#define NVM_CFG1_GLOB_BAR2_SIZE_128M                            0xC
+#define NVM_CFG1_GLOB_BAR2_SIZE_256M                            0xD
+#define NVM_CFG1_GLOB_BAR2_SIZE_512M                            0xE
+#define NVM_CFG1_GLOB_BAR2_SIZE_1G                              0xF
+       /* Set the duration, in seconds, fan failure signal should be
+        * sampled
+        */
+#define NVM_CFG1_GLOB_FAN_FAILURE_DURATION_MASK                 0x0000F000
+#define NVM_CFG1_GLOB_FAN_FAILURE_DURATION_OFFSET               12
+       u32 eagle_txfir_main;   /* 0x5C */
+#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK                     0x000000FF
+#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET                   0
+#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK                     0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET                   8
+#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK                     0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET                   16
+#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK                     0xFF000000
+#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET                   24
+       u32 eagle_txfir_post;   /* 0x60 */
+#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK                     0x000000FF
+#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET                   0
+#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK                     0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET                   8
+#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK                     0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET                   16
+#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK                     0xFF000000
+#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET                   24
+       u32 falcon_txfir_main;  /* 0x64 */
+#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK                     0x000000FF
+#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET                   0
+#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK                     0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET                   8
+#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK                     0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET                   16
+#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK                     0xFF000000
+#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET                   24
+       u32 falcon_txfir_post;  /* 0x68 */
+#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK                     0x000000FF
+#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET                   0
+#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK                     0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET                   8
+#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK                     0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET                   16
+#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK                     0xFF000000
+#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET                   24
+       u32 manufacture_ver;    /* 0x6C */
+#define NVM_CFG1_GLOB_MANUF0_VER_MASK                           0x0000003F
+#define NVM_CFG1_GLOB_MANUF0_VER_OFFSET                         0
+#define NVM_CFG1_GLOB_MANUF1_VER_MASK                           0x00000FC0
+#define NVM_CFG1_GLOB_MANUF1_VER_OFFSET                         6
+#define NVM_CFG1_GLOB_MANUF2_VER_MASK                           0x0003F000
+#define NVM_CFG1_GLOB_MANUF2_VER_OFFSET                         12
+#define NVM_CFG1_GLOB_MANUF3_VER_MASK                           0x00FC0000
+#define NVM_CFG1_GLOB_MANUF3_VER_OFFSET                         18
+#define NVM_CFG1_GLOB_MANUF4_VER_MASK                           0x3F000000
+#define NVM_CFG1_GLOB_MANUF4_VER_OFFSET                         24
+       u32 manufacture_time;   /* 0x70 */
+#define NVM_CFG1_GLOB_MANUF0_TIME_MASK                          0x0000003F
+#define NVM_CFG1_GLOB_MANUF0_TIME_OFFSET                        0
+#define NVM_CFG1_GLOB_MANUF1_TIME_MASK                          0x00000FC0
+#define NVM_CFG1_GLOB_MANUF1_TIME_OFFSET                        6
+#define NVM_CFG1_GLOB_MANUF2_TIME_MASK                          0x0003F000
+#define NVM_CFG1_GLOB_MANUF2_TIME_OFFSET                        12
+       u32 led_global_settings;        /* 0x74 */
+#define NVM_CFG1_GLOB_LED_SWAP_0_MASK                           0x0000000F
+#define NVM_CFG1_GLOB_LED_SWAP_0_OFFSET                         0
+#define NVM_CFG1_GLOB_LED_SWAP_1_MASK                           0x000000F0
+#define NVM_CFG1_GLOB_LED_SWAP_1_OFFSET                         4
+#define NVM_CFG1_GLOB_LED_SWAP_2_MASK                           0x00000F00
+#define NVM_CFG1_GLOB_LED_SWAP_2_OFFSET                         8
+#define NVM_CFG1_GLOB_LED_SWAP_3_MASK                           0x0000F000
+#define NVM_CFG1_GLOB_LED_SWAP_3_OFFSET                         12
+       u32 generic_cont1;      /* 0x78 */
+#define NVM_CFG1_GLOB_AVS_DAC_CODE_MASK                         0x000003FF
+#define NVM_CFG1_GLOB_AVS_DAC_CODE_OFFSET                       0
+       u32 mbi_version;        /* 0x7C */
+#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK                        0x000000FF
+#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET                      0
+#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK                        0x0000FF00
+#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET                      8
+#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK                        0x00FF0000
+#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET                      16
+       u32 mbi_date;           /* 0x80 */
+       u32 misc_sig;           /* 0x84 */
+       /*  Define the GPIO mapping to switch i2c mux */
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_MASK                   0x000000FF
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_OFFSET                 0
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_MASK                   0x0000FF00
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_OFFSET                 8
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__NA                      0x0
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO0                   0x1
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO1                   0x2
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO2                   0x3
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO3                   0x4
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO4                   0x5
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO5                   0x6
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO6                   0x7
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO7                   0x8
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO8                   0x9
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO9                   0xA
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO10                  0xB
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO11                  0xC
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO12                  0xD
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO13                  0xE
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO14                  0xF
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO15                  0x10
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO16                  0x11
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO17                  0x12
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO18                  0x13
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO19                  0x14
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO20                  0x15
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO21                  0x16
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO22                  0x17
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO23                  0x18
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO24                  0x19
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO25                  0x1A
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO26                  0x1B
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO27                  0x1C
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO28                  0x1D
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO29                  0x1E
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO30                  0x1F
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO31                  0x20
+       u32 device_capabilities;        /* 0x88 */
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET              0x1
+       u32 power_dissipated;   /* 0x8C */
+#define NVM_CFG1_GLOB_POWER_DIS_D0_MASK                         0x000000FF
+#define NVM_CFG1_GLOB_POWER_DIS_D0_OFFSET                       0
+#define NVM_CFG1_GLOB_POWER_DIS_D1_MASK                         0x0000FF00
+#define NVM_CFG1_GLOB_POWER_DIS_D1_OFFSET                       8
+#define NVM_CFG1_GLOB_POWER_DIS_D2_MASK                         0x00FF0000
+#define NVM_CFG1_GLOB_POWER_DIS_D2_OFFSET                       16
+#define NVM_CFG1_GLOB_POWER_DIS_D3_MASK                         0xFF000000
+#define NVM_CFG1_GLOB_POWER_DIS_D3_OFFSET                       24
+       u32 power_consumed;     /* 0x90 */
+#define NVM_CFG1_GLOB_POWER_CONS_D0_MASK                        0x000000FF
+#define NVM_CFG1_GLOB_POWER_CONS_D0_OFFSET                      0
+#define NVM_CFG1_GLOB_POWER_CONS_D1_MASK                        0x0000FF00
+#define NVM_CFG1_GLOB_POWER_CONS_D1_OFFSET                      8
+#define NVM_CFG1_GLOB_POWER_CONS_D2_MASK                        0x00FF0000
+#define NVM_CFG1_GLOB_POWER_CONS_D2_OFFSET                      16
+#define NVM_CFG1_GLOB_POWER_CONS_D3_MASK                        0xFF000000
+#define NVM_CFG1_GLOB_POWER_CONS_D3_OFFSET                      24
+       u32 efi_version;        /* 0x94 */
+       u32 reserved[42];       /* 0x98 */
+};
+
+struct nvm_cfg1_path {
+       u32 reserved[30];       /* 0x0 */
+};
+
+struct nvm_cfg1_port {
+       u32 reserved__m_relocated_to_option_123;        /* 0x0 */
+       u32 reserved__m_relocated_to_option_124;        /* 0x4 */
+       u32 generic_cont0;      /* 0x8 */
+#define NVM_CFG1_PORT_LED_MODE_MASK                             0x000000FF
+#define NVM_CFG1_PORT_LED_MODE_OFFSET                           0
+#define NVM_CFG1_PORT_LED_MODE_MAC1                             0x0
+#define NVM_CFG1_PORT_LED_MODE_PHY1                             0x1
+#define NVM_CFG1_PORT_LED_MODE_PHY2                             0x2
+#define NVM_CFG1_PORT_LED_MODE_PHY3                             0x3
+#define NVM_CFG1_PORT_LED_MODE_MAC2                             0x4
+#define NVM_CFG1_PORT_LED_MODE_PHY4                             0x5
+#define NVM_CFG1_PORT_LED_MODE_PHY5                             0x6
+#define NVM_CFG1_PORT_LED_MODE_PHY6                             0x7
+#define NVM_CFG1_PORT_LED_MODE_MAC3                             0x8
+#define NVM_CFG1_PORT_LED_MODE_PHY7                             0x9
+#define NVM_CFG1_PORT_LED_MODE_PHY8                             0xA
+#define NVM_CFG1_PORT_LED_MODE_PHY9                             0xB
+#define NVM_CFG1_PORT_LED_MODE_MAC4                             0xC
+#define NVM_CFG1_PORT_LED_MODE_PHY10                            0xD
+#define NVM_CFG1_PORT_LED_MODE_PHY11                            0xE
+#define NVM_CFG1_PORT_LED_MODE_PHY12                            0xF
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK            0x00F00000
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET          20
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET        0x1
+       u32 pcie_cfg;           /* 0xC */
+#define NVM_CFG1_PORT_RESERVED15_MASK                           0x00000007
+#define NVM_CFG1_PORT_RESERVED15_OFFSET                         0
+       u32 features;           /* 0x10 */
+#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_MASK           0x00000001
+#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_OFFSET         0
+#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_DISABLED       0x0
+#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_ENABLED        0x1
+#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_MASK                     0x00000002
+#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_OFFSET                   1
+#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_DISABLED                 0x0
+#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_ENABLED                  0x1
+       u32 speed_cap_mask;     /* 0x14 */
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK            0x0000FFFF
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET          0
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G              0x1
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G             0x2
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G             0x8
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G             0x10
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G             0x20
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G            0x40
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_MASK            0xFFFF0000
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_OFFSET          16
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_1G              0x1
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_10G             0x2
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_25G             0x8
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_40G             0x10
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_50G             0x20
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_100G            0x40
+       u32 link_settings;      /* 0x18 */
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK                       0x0000000F
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET                     0
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG                    0x0
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G                         0x1
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G                        0x2
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G                        0x4
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G                        0x5
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G                        0x6
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_100G                       0x7
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_SMARTLINQ                  0x8
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK                     0x00000070
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET                   4
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG                  0x1
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX                       0x2
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX                       0x4
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_MASK                       0x00000780
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_OFFSET                     7
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_AUTONEG                    0x0
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_1G                         0x1
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_10G                        0x2
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_25G                        0x4
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_40G                        0x5
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_50G                        0x6
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_100G                       0x7
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_SMARTLINQ                  0x8
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_MASK                     0x00003800
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_OFFSET                   11
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_AUTONEG                  0x1
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_RX                       0x2
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_TX                       0x4
+#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_MASK      0x00004000
+#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_OFFSET    14
+#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_DISABLED  0x0
+#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_ENABLED   0x1
+#define NVM_CFG1_PORT_AN_25G_50G_OUI_MASK                       0x00018000
+#define NVM_CFG1_PORT_AN_25G_50G_OUI_OFFSET                     15
+#define NVM_CFG1_PORT_AN_25G_50G_OUI_CONSORTIUM                 0x0
+#define NVM_CFG1_PORT_AN_25G_50G_OUI_BAM                        0x1
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_MASK                       0x000E0000
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_OFFSET                     17
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_FEC_FORCE_NONE             0x0
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_FEC_FORCE_FIRECODE         0x1
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_FEC_FORCE_RS               0x2
+       u32 phy_cfg;            /* 0x1C */
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_MASK                  0x0000FFFF
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_OFFSET                0
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_HIGIG                 0x1
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_SCRAMBLER             0x2
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_FIBER                 0x4
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_CL72_AN       0x8
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_FEC_AN        0x10
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_MASK                 0x00FF0000
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_OFFSET               16
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_BYPASS               0x0
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR                   0x2
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR2                  0x3
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR4                  0x4
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XFI                  0x8
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SFI                  0x9
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_1000X                0xB
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SGMII                0xC
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLAUI                0x11
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLPPI                0x12
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CAUI                 0x21
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CPPI                 0x22
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_25GAUI               0x31
+#define NVM_CFG1_PORT_AN_MODE_MASK                              0xFF000000
+#define NVM_CFG1_PORT_AN_MODE_OFFSET                            24
+#define NVM_CFG1_PORT_AN_MODE_NONE                              0x0
+#define NVM_CFG1_PORT_AN_MODE_CL73                              0x1
+#define NVM_CFG1_PORT_AN_MODE_CL37                              0x2
+#define NVM_CFG1_PORT_AN_MODE_CL73_BAM                          0x3
+#define NVM_CFG1_PORT_AN_MODE_CL37_BAM                          0x4
+#define NVM_CFG1_PORT_AN_MODE_HPAM                              0x5
+#define NVM_CFG1_PORT_AN_MODE_SGMII                             0x6
+       u32 mgmt_traffic;       /* 0x20 */
+#define NVM_CFG1_PORT_RESERVED61_MASK                           0x0000000F
+#define NVM_CFG1_PORT_RESERVED61_OFFSET                         0
+       u32 ext_phy;            /* 0x24 */
+#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_MASK                    0x000000FF
+#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_OFFSET                  0
+#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_NONE                    0x0
+#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_BCM84844                0x1
+#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_MASK                 0x0000FF00
+#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_OFFSET               8
+       u32 mba_cfg1;           /* 0x28 */
+#define NVM_CFG1_PORT_PREBOOT_OPROM_MASK                        0x00000001
+#define NVM_CFG1_PORT_PREBOOT_OPROM_OFFSET                      0
+#define NVM_CFG1_PORT_PREBOOT_OPROM_DISABLED                    0x0
+#define NVM_CFG1_PORT_PREBOOT_OPROM_ENABLED                     0x1
+#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_TYPE_MASK            0x00000006
+#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_TYPE_OFFSET          1
+#define NVM_CFG1_PORT_MBA_DELAY_TIME_MASK                       0x00000078
+#define NVM_CFG1_PORT_MBA_DELAY_TIME_OFFSET                     3
+#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_MASK                    0x00000080
+#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_OFFSET                  7
+#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_S                  0x0
+#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_B                  0x1
+#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_MASK                0x00000100
+#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_OFFSET              8
+#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_DISABLED            0x0
+#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_ENABLED             0x1
+#define NVM_CFG1_PORT_RESERVED5_MASK                            0x0001FE00
+#define NVM_CFG1_PORT_RESERVED5_OFFSET                          9
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_MASK                   0x001E0000
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_OFFSET                 17
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_AUTONEG                0x0
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_1G                     0x1
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_10G                    0x2
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_25G                    0x4
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_40G                    0x5
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_50G                    0x6
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_100G                   0x7
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_SMARTLINQ              0x8
+#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_MASK     0x00E00000
+#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_OFFSET   21
+       u32 mba_cfg2;           /* 0x2C */
+#define NVM_CFG1_PORT_RESERVED65_MASK                           0x0000FFFF
+#define NVM_CFG1_PORT_RESERVED65_OFFSET                         0
+#define NVM_CFG1_PORT_RESERVED66_MASK                           0x00010000
+#define NVM_CFG1_PORT_RESERVED66_OFFSET                         16
+       u32 vf_cfg;             /* 0x30 */
+#define NVM_CFG1_PORT_RESERVED8_MASK                            0x0000FFFF
+#define NVM_CFG1_PORT_RESERVED8_OFFSET                          0
+#define NVM_CFG1_PORT_RESERVED6_MASK                            0x000F0000
+#define NVM_CFG1_PORT_RESERVED6_OFFSET                          16
+       struct nvm_cfg_mac_address lldp_mac_address;    /* 0x34 */
+       u32 led_port_settings;  /* 0x3C */
+#define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_MASK                   0x000000FF
+#define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_OFFSET                 0
+#define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_MASK                   0x0000FF00
+#define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_OFFSET                 8
+#define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_MASK                   0x00FF0000
+#define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_OFFSET                 16
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_1G                      0x1
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_10G                     0x2
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_25G                     0x8
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_40G                     0x10
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_50G                     0x20
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_100G                    0x40
+       u32 transceiver_00;     /* 0x40 */
+       /*  Define for mapping of transceiver signal module absent */
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_MASK                     0x000000FF
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_OFFSET                   0
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_NA                       0x0
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO0                    0x1
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO1                    0x2
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO2                    0x3
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO3                    0x4
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO4                    0x5
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO5                    0x6
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO6                    0x7
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO7                    0x8
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO8                    0x9
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO9                    0xA
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO10                   0xB
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO11                   0xC
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO12                   0xD
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO13                   0xE
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO14                   0xF
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO15                   0x10
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO16                   0x11
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO17                   0x12
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO18                   0x13
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO19                   0x14
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO20                   0x15
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO21                   0x16
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO22                   0x17
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO23                   0x18
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO24                   0x19
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO25                   0x1A
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO26                   0x1B
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO27                   0x1C
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO28                   0x1D
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO29                   0x1E
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO30                   0x1F
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO31                   0x20
+       /*  Define the GPIO mux settings  to switch i2c mux to this port */
+#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_MASK                  0x00000F00
+#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_OFFSET                8
+#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_MASK                  0x0000F000
+#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_OFFSET                12
+       u32 device_ids;         /* 0x44 */
+#define NVM_CFG1_PORT_ETH_DID_SUFFIX_MASK                       0x000000FF
+#define NVM_CFG1_PORT_ETH_DID_SUFFIX_OFFSET                     0
+#define NVM_CFG1_PORT_RESERVED_DID_SUFFIX_MASK                  0xFF000000
+#define NVM_CFG1_PORT_RESERVED_DID_SUFFIX_OFFSET                24
+       u32 board_cfg;          /* 0x48 */
+       /* This field defines the board technology
+        * (backpane,transceiver,external PHY)
+        */
+#define NVM_CFG1_PORT_PORT_TYPE_MASK                            0x000000FF
+#define NVM_CFG1_PORT_PORT_TYPE_OFFSET                          0
+#define NVM_CFG1_PORT_PORT_TYPE_UNDEFINED                       0x0
+#define NVM_CFG1_PORT_PORT_TYPE_MODULE                          0x1
+#define NVM_CFG1_PORT_PORT_TYPE_BACKPLANE                       0x2
+#define NVM_CFG1_PORT_PORT_TYPE_EXT_PHY                         0x3
+#define NVM_CFG1_PORT_PORT_TYPE_MODULE_SLAVE                    0x4
+       /*  This field defines the GPIO mapped to tx_disable signal in SFP */
+#define NVM_CFG1_PORT_TX_DISABLE_MASK                           0x0000FF00
+#define NVM_CFG1_PORT_TX_DISABLE_OFFSET                         8
+#define NVM_CFG1_PORT_TX_DISABLE_NA                             0x0
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO0                          0x1
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO1                          0x2
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO2                          0x3
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO3                          0x4
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO4                          0x5
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO5                          0x6
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO6                          0x7
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO7                          0x8
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO8                          0x9
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO9                          0xA
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO10                         0xB
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO11                         0xC
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO12                         0xD
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO13                         0xE
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO14                         0xF
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO15                         0x10
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO16                         0x11
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO17                         0x12
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO18                         0x13
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO19                         0x14
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO20                         0x15
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO21                         0x16
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO22                         0x17
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO23                         0x18
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO24                         0x19
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO25                         0x1A
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO26                         0x1B
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO27                         0x1C
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO28                         0x1D
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO29                         0x1E
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO30                         0x1F
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO31                         0x20
+       u32 reserved[131];      /* 0x4C */
+};
+
+struct nvm_cfg1_func {
+       struct nvm_cfg_mac_address mac_address; /* 0x0 */
+       u32 rsrv1;              /* 0x8 */
+#define NVM_CFG1_FUNC_RESERVED1_MASK                            0x0000FFFF
+#define NVM_CFG1_FUNC_RESERVED1_OFFSET                          0
+#define NVM_CFG1_FUNC_RESERVED2_MASK                            0xFFFF0000
+#define NVM_CFG1_FUNC_RESERVED2_OFFSET                          16
+       u32 rsrv2;              /* 0xC */
+#define NVM_CFG1_FUNC_RESERVED3_MASK                            0x0000FFFF
+#define NVM_CFG1_FUNC_RESERVED3_OFFSET                          0
+#define NVM_CFG1_FUNC_RESERVED4_MASK                            0xFFFF0000
+#define NVM_CFG1_FUNC_RESERVED4_OFFSET                          16
+       u32 device_id;          /* 0x10 */
+#define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK                  0x0000FFFF
+#define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET                0
+#define NVM_CFG1_FUNC_RESERVED77_MASK                           0xFFFF0000
+#define NVM_CFG1_FUNC_RESERVED77_OFFSET                         16
+       u32 cmn_cfg;            /* 0x14 */
+#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_MASK                0x00000007
+#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_OFFSET              0
+#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_PXE                 0x0
+#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_NONE                0x7
+#define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_MASK                     0x0007FFF8
+#define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_OFFSET                   3
+#define NVM_CFG1_FUNC_PERSONALITY_MASK                          0x00780000
+#define NVM_CFG1_FUNC_PERSONALITY_OFFSET                        19
+#define NVM_CFG1_FUNC_PERSONALITY_ETHERNET                      0x0
+#define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_MASK                     0x7F800000
+#define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_OFFSET                   23
+#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_MASK                   0x80000000
+#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_OFFSET                 31
+#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_DISABLED               0x0
+#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_ENABLED                0x1
+       u32 pci_cfg;            /* 0x18 */
+#define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_MASK                 0x0000007F
+#define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_OFFSET               0
+#define NVM_CFG1_FUNC_RESERVESD12_MASK                          0x00003F80
+#define NVM_CFG1_FUNC_RESERVESD12_OFFSET                        7
+#define NVM_CFG1_FUNC_BAR1_SIZE_MASK                            0x0003C000
+#define NVM_CFG1_FUNC_BAR1_SIZE_OFFSET                          14
+#define NVM_CFG1_FUNC_BAR1_SIZE_DISABLED                        0x0
+#define NVM_CFG1_FUNC_BAR1_SIZE_64K                             0x1
+#define NVM_CFG1_FUNC_BAR1_SIZE_128K                            0x2
+#define NVM_CFG1_FUNC_BAR1_SIZE_256K                            0x3
+#define NVM_CFG1_FUNC_BAR1_SIZE_512K                            0x4
+#define NVM_CFG1_FUNC_BAR1_SIZE_1M                              0x5
+#define NVM_CFG1_FUNC_BAR1_SIZE_2M                              0x6
+#define NVM_CFG1_FUNC_BAR1_SIZE_4M                              0x7
+#define NVM_CFG1_FUNC_BAR1_SIZE_8M                              0x8
+#define NVM_CFG1_FUNC_BAR1_SIZE_16M                             0x9
+#define NVM_CFG1_FUNC_BAR1_SIZE_32M                             0xA
+#define NVM_CFG1_FUNC_BAR1_SIZE_64M                             0xB
+#define NVM_CFG1_FUNC_BAR1_SIZE_128M                            0xC
+#define NVM_CFG1_FUNC_BAR1_SIZE_256M                            0xD
+#define NVM_CFG1_FUNC_BAR1_SIZE_512M                            0xE
+#define NVM_CFG1_FUNC_BAR1_SIZE_1G                              0xF
+#define NVM_CFG1_FUNC_MAX_BANDWIDTH_MASK                        0x03FC0000
+#define NVM_CFG1_FUNC_MAX_BANDWIDTH_OFFSET                      18
+       u32 preboot_generic_cfg;        /* 0x2C */
+#define NVM_CFG1_FUNC_PREBOOT_VLAN_VALUE_MASK                   0x0000FFFF
+#define NVM_CFG1_FUNC_PREBOOT_VLAN_VALUE_OFFSET                 0
+#define NVM_CFG1_FUNC_PREBOOT_VLAN_MASK                         0x00010000
+#define NVM_CFG1_FUNC_PREBOOT_VLAN_OFFSET                       16
+       u32 reserved[8];        /* 0x30 */
+};
+
+struct nvm_cfg1 {
+       struct nvm_cfg1_glob glob;      /* 0x0 */
+       struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX];   /* 0x140 */
+       struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX];   /* 0x230 */
+       struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX];   /* 0xB90 */
+};
+
+/******************************************
+ * nvm_cfg structs
+ ******************************************/
+enum nvm_cfg_sections {
+       NVM_CFG_SECTION_NVM_CFG1,
+       NVM_CFG_SECTION_MAX
+};
+
+struct nvm_cfg {
+       u32 num_sections;
+       u32 sections_offset[NVM_CFG_SECTION_MAX];
+       struct nvm_cfg1 cfg1;
+};
+
+#endif /* NVM_CFG_H */
diff --git a/drivers/net/qede/base/reg_addr.h b/drivers/net/qede/base/reg_addr.h
new file mode 100644 (file)
index 0000000..3b25e1a
--- /dev/null
@@ -0,0 +1,1107 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#define  CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT \
+       0
+
+#define  CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE          ( \
+               0xfff << 0)
+
+#define  CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT \
+       12
+
+#define  CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE           ( \
+               0xfff << 12)
+
+#define  CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT \
+       24
+
+#define  CDU_REG_CID_ADDR_PARAMS_NCIB                  ( \
+               0xff << 24)
+
+#define  XSDM_REG_OPERATION_GEN \
+       0xf80408UL
+#define  NIG_REG_RX_BRB_OUT_EN \
+       0x500e18UL
+#define  NIG_REG_STORM_OUT_EN \
+       0x500e08UL
+#define  PSWRQ2_REG_L2P_VALIDATE_VFID \
+       0x240c50UL
+#define  PGLUE_B_REG_USE_CLIENTID_IN_TAG       \
+       0x2aae04UL
+#define  PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER       \
+       0x2aa16cUL
+#define  BAR0_MAP_REG_MSDM_RAM \
+       0x1d00000UL
+#define  BAR0_MAP_REG_USDM_RAM \
+       0x1d80000UL
+#define  BAR0_MAP_REG_PSDM_RAM \
+       0x1f00000UL
+#define  BAR0_MAP_REG_TSDM_RAM \
+       0x1c80000UL
+#define  NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF \
+       0x5011f4UL
+#define  PRS_REG_SEARCH_TCP \
+       0x1f0400UL
+#define  PRS_REG_SEARCH_UDP \
+       0x1f0404UL
+#define  PRS_REG_SEARCH_OPENFLOW       \
+       0x1f0434UL
+#define  TM_REG_PF_ENABLE_CONN \
+       0x2c043cUL
+#define  TM_REG_PF_ENABLE_TASK \
+       0x2c0444UL
+#define  TM_REG_PF_SCAN_ACTIVE_CONN \
+       0x2c04fcUL
+#define  TM_REG_PF_SCAN_ACTIVE_TASK \
+       0x2c0500UL
+#define  IGU_REG_LEADING_EDGE_LATCH \
+       0x18082cUL
+#define  IGU_REG_TRAILING_EDGE_LATCH \
+       0x180830UL
+#define  QM_REG_USG_CNT_PF_TX \
+       0x2f2eacUL
+#define  QM_REG_USG_CNT_PF_OTHER       \
+       0x2f2eb0UL
+#define  DORQ_REG_PF_DB_ENABLE \
+       0x100508UL
+#define  QM_REG_PF_EN \
+       0x2f2ea4UL
+#define  TCFC_REG_STRONG_ENABLE_PF \
+       0x2d0708UL
+#define  CCFC_REG_STRONG_ENABLE_PF \
+       0x2e0708UL
+#define  PGLUE_B_REG_PGL_ADDR_88_F0 \
+       0x2aa404UL
+#define  PGLUE_B_REG_PGL_ADDR_8C_F0 \
+       0x2aa408UL
+#define  PGLUE_B_REG_PGL_ADDR_90_F0 \
+       0x2aa40cUL
+#define  PGLUE_B_REG_PGL_ADDR_94_F0 \
+       0x2aa410UL
+#define  PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR \
+       0x2aa138UL
+#define  PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ \
+       0x2aa174UL
+#define  MISC_REG_GEN_PURP_CR0 \
+       0x008c80UL
+#define  MCP_REG_SCRATCH       \
+       0xe20000UL
+#define  CNIG_REG_NW_PORT_MODE_BB_B0 \
+       0x218200UL
+#define  MISCS_REG_CHIP_NUM \
+       0x00976cUL
+#define  MISCS_REG_CHIP_REV \
+       0x009770UL
+#define  MISCS_REG_CMT_ENABLED_FOR_PAIR \
+       0x00971cUL
+#define  MISCS_REG_CHIP_TEST_REG       \
+       0x009778UL
+#define  MISCS_REG_CHIP_METAL \
+       0x009774UL
+#define  BRB_REG_HEADER_SIZE \
+       0x340804UL
+#define  BTB_REG_HEADER_SIZE \
+       0xdb0804UL
+#define  CAU_REG_LONG_TIMEOUT_THRESHOLD \
+       0x1c0708UL
+#define  CCFC_REG_ACTIVITY_COUNTER \
+       0x2e8800UL
+#define  CDU_REG_CID_ADDR_PARAMS       \
+       0x580900UL
+#define  DBG_REG_CLIENT_ENABLE \
+       0x010004UL
+#define  DMAE_REG_INIT \
+       0x00c000UL
+#define  DORQ_REG_IFEN \
+       0x100040UL
+#define  GRC_REG_TIMEOUT_EN \
+       0x050404UL
+#define  IGU_REG_BLOCK_CONFIGURATION \
+       0x180040UL
+#define  MCM_REG_INIT \
+       0x1200000UL
+#define  MCP2_REG_DBG_DWORD_ENABLE \
+       0x052404UL
+#define  MISC_REG_PORT_MODE \
+       0x008c00UL
+#define MISC_REG_BLOCK_256B_EN \
+       0x008c14UL
+#define MISCS_REG_RESET_PL_HV \
+       0x009060UL
+#define  MISCS_REG_CLK_100G_MODE       \
+       0x009070UL
+#define MISCS_REG_RESET_PL_HV_2 \
+       0x009150UL
+#define  MSDM_REG_ENABLE_IN1 \
+       0xfc0004UL
+#define  MSEM_REG_ENABLE_IN \
+       0x1800004UL
+#define  NIG_REG_CM_HDR \
+       0x500840UL
+#define  NCSI_REG_CONFIG       \
+       0x040200UL
+#define PSWRQ2_REG_RBC_DONE \
+       0x240000UL
+#define PSWRQ2_REG_CFG_DONE \
+       0x240004UL
+#define  PBF_REG_INIT \
+       0xd80000UL
+#define  PTU_REG_ATC_INIT_ARRAY \
+       0x560000UL
+#define  PCM_REG_INIT \
+       0x1100000UL
+#define  PGLUE_B_REG_ADMIN_PER_PF_REGION       \
+       0x2a9000UL
+#define  PRM_REG_DISABLE_PRM \
+       0x230000UL
+#define  PRS_REG_SOFT_RST \
+       0x1f0000UL
+#define  PSDM_REG_ENABLE_IN1 \
+       0xfa0004UL
+#define  PSEM_REG_ENABLE_IN \
+       0x1600004UL
+#define  PSWRQ_REG_DBG_SELECT \
+       0x280020UL
+#define  PSWRQ2_REG_CDUT_P_SIZE \
+       0x24000cUL
+#define  PSWHST_REG_DISCARD_INTERNAL_WRITES \
+       0x2a0040UL
+#define  PSWHST2_REG_DBGSYN_ALMOST_FULL_THR \
+       0x29e050UL
+#define  PSWRD_REG_DBG_SELECT \
+       0x29c040UL
+#define  PSWRD2_REG_CONF11 \
+       0x29d064UL
+#define  PSWWR_REG_USDM_FULL_TH \
+       0x29a040UL
+#define  PSWWR2_REG_CDU_FULL_TH2       \
+       0x29b040UL
+#define  QM_REG_MAXPQSIZE_0 \
+       0x2f0434UL
+#define  RSS_REG_RSS_INIT_EN \
+       0x238804UL
+#define  RDIF_REG_STOP_ON_ERROR \
+       0x300040UL
+#define  SRC_REG_SOFT_RST \
+       0x23874cUL
+#define  TCFC_REG_ACTIVITY_COUNTER \
+       0x2d8800UL
+#define  TCM_REG_INIT \
+       0x1180000UL
+#define  TM_REG_PXP_READ_DATA_FIFO_INIT \
+       0x2c0014UL
+#define  TSDM_REG_ENABLE_IN1 \
+       0xfb0004UL
+#define  TSEM_REG_ENABLE_IN \
+       0x1700004UL
+#define  TDIF_REG_STOP_ON_ERROR \
+       0x310040UL
+#define  UCM_REG_INIT \
+       0x1280000UL
+#define  UMAC_REG_IPG_HD_BKP_CNTL_BB_B0 \
+       0x051004UL
+#define  USDM_REG_ENABLE_IN1 \
+       0xfd0004UL
+#define  USEM_REG_ENABLE_IN \
+       0x1900004UL
+#define  XCM_REG_INIT \
+       0x1000000UL
+#define  XSDM_REG_ENABLE_IN1 \
+       0xf80004UL
+#define  XSEM_REG_ENABLE_IN \
+       0x1400004UL
+#define  YCM_REG_INIT \
+       0x1080000UL
+#define  YSDM_REG_ENABLE_IN1 \
+       0xf90004UL
+#define  YSEM_REG_ENABLE_IN \
+       0x1500004UL
+#define  XYLD_REG_SCBD_STRICT_PRIO \
+       0x4c0000UL
+#define  TMLD_REG_SCBD_STRICT_PRIO \
+       0x4d0000UL
+#define  MULD_REG_SCBD_STRICT_PRIO \
+       0x4e0000UL
+#define  YULD_REG_SCBD_STRICT_PRIO \
+       0x4c8000UL
+#define  MISC_REG_SHARED_MEM_ADDR \
+       0x008c20UL
+#define  DMAE_REG_GO_C0 \
+       0x00c048UL
+#define  DMAE_REG_GO_C1 \
+       0x00c04cUL
+#define  DMAE_REG_GO_C2 \
+       0x00c050UL
+#define  DMAE_REG_GO_C3 \
+       0x00c054UL
+#define  DMAE_REG_GO_C4 \
+       0x00c058UL
+#define  DMAE_REG_GO_C5 \
+       0x00c05cUL
+#define  DMAE_REG_GO_C6 \
+       0x00c060UL
+#define  DMAE_REG_GO_C7 \
+       0x00c064UL
+#define  DMAE_REG_GO_C8 \
+       0x00c068UL
+#define  DMAE_REG_GO_C9 \
+       0x00c06cUL
+#define  DMAE_REG_GO_C10       \
+       0x00c070UL
+#define  DMAE_REG_GO_C11       \
+       0x00c074UL
+#define  DMAE_REG_GO_C12       \
+       0x00c078UL
+#define  DMAE_REG_GO_C13       \
+       0x00c07cUL
+#define  DMAE_REG_GO_C14       \
+       0x00c080UL
+#define  DMAE_REG_GO_C15       \
+       0x00c084UL
+#define  DMAE_REG_GO_C16       \
+       0x00c088UL
+#define  DMAE_REG_GO_C17       \
+       0x00c08cUL
+#define  DMAE_REG_GO_C18       \
+       0x00c090UL
+#define  DMAE_REG_GO_C19       \
+       0x00c094UL
+#define  DMAE_REG_GO_C20       \
+       0x00c098UL
+#define  DMAE_REG_GO_C21       \
+       0x00c09cUL
+#define  DMAE_REG_GO_C22       \
+       0x00c0a0UL
+#define  DMAE_REG_GO_C23       \
+       0x00c0a4UL
+#define  DMAE_REG_GO_C24       \
+       0x00c0a8UL
+#define  DMAE_REG_GO_C25       \
+       0x00c0acUL
+#define  DMAE_REG_GO_C26       \
+       0x00c0b0UL
+#define  DMAE_REG_GO_C27       \
+       0x00c0b4UL
+#define  DMAE_REG_GO_C28       \
+       0x00c0b8UL
+#define  DMAE_REG_GO_C29       \
+       0x00c0bcUL
+#define  DMAE_REG_GO_C30       \
+       0x00c0c0UL
+#define  DMAE_REG_GO_C31       \
+       0x00c0c4UL
+#define  DMAE_REG_CMD_MEM \
+       0x00c800UL
+#define  QM_REG_MAXPQSIZETXSEL_0       \
+       0x2f0440UL
+#define  QM_REG_SDMCMDREADY \
+       0x2f1e10UL
+#define  QM_REG_SDMCMDADDR \
+       0x2f1e04UL
+#define  QM_REG_SDMCMDDATALSB \
+       0x2f1e08UL
+#define  QM_REG_SDMCMDDATAMSB \
+       0x2f1e0cUL
+#define  QM_REG_SDMCMDGO       \
+       0x2f1e14UL
+#define  QM_REG_RLPFCRD \
+       0x2f4d80UL
+#define  QM_REG_RLPFINCVAL \
+       0x2f4c80UL
+#define  QM_REG_RLGLBLCRD \
+       0x2f4400UL
+#define  QM_REG_RLGLBLINCVAL \
+       0x2f3400UL
+#define  IGU_REG_ATTENTION_ENABLE \
+       0x18083cUL
+#define  IGU_REG_ATTN_MSG_ADDR_L       \
+       0x180820UL
+#define  IGU_REG_ATTN_MSG_ADDR_H       \
+       0x180824UL
+#define  MISC_REG_AEU_GENERAL_ATTN_0 \
+       0x008400UL
+#define  CAU_REG_SB_ADDR_MEMORY \
+       0x1c8000UL
+#define  CAU_REG_SB_VAR_MEMORY \
+       0x1c6000UL
+#define  CAU_REG_PI_MEMORY \
+       0x1d0000UL
+#define  IGU_REG_PF_CONFIGURATION \
+       0x180800UL
+#define  MISC_REG_AEU_ENABLE1_IGU_OUT_0 \
+       0x00849cUL
+#define  MISC_REG_AEU_MASK_ATTN_IGU \
+       0x008494UL
+#define  IGU_REG_CLEANUP_STATUS_0 \
+       0x180980UL
+#define  IGU_REG_CLEANUP_STATUS_1 \
+       0x180a00UL
+#define  IGU_REG_CLEANUP_STATUS_2 \
+       0x180a80UL
+#define  IGU_REG_CLEANUP_STATUS_3 \
+       0x180b00UL
+#define  IGU_REG_CLEANUP_STATUS_4 \
+       0x180b80UL
+#define  IGU_REG_COMMAND_REG_32LSB_DATA \
+       0x180840UL
+#define  IGU_REG_COMMAND_REG_CTRL \
+       0x180848UL
+#define  IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN     ( \
+               0x1 << 1)
+#define  IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN      ( \
+               0x1 << 0)
+#define  IGU_REG_MAPPING_MEMORY \
+       0x184000UL
+#define  MISCS_REG_GENERIC_POR_0       \
+       0x0096d4UL
+#define  MCP_REG_NVM_CFG4 \
+       0xe0642cUL
+#define  MCP_REG_NVM_CFG4_FLASH_SIZE   ( \
+               0x7 << 0)
+#define  MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT \
+       0
+#define CCFC_REG_STRONG_ENABLE_VF 0x2e070cUL
+#define CNIG_REG_PMEG_IF_CMD_BB_B0 0x21821cUL
+#define CNIG_REG_PMEG_IF_ADDR_BB_B0 0x218224UL
+#define CNIG_REG_PMEG_IF_WRDATA_BB_B0 0x218228UL
+#define NWM_REG_MAC0 0x800400UL
+#define NWM_REG_MAC0_SIZE 256
+#define CNIG_REG_NIG_PORT0_CONF_K2 0x218200UL
+#define CNIG_REG_NIG_PORT0_CONF_NIG_PORT_ENABLE_0_SHIFT 0
+#define CNIG_REG_NIG_PORT0_CONF_NIG_PORT_NWM_PORT_MAP_0_SHIFT 1
+#define CNIG_REG_NIG_PORT0_CONF_NIG_PORT_RATE_0_SHIFT 3
+#define ETH_MAC_REG_XIF_MODE 0x000080UL
+#define ETH_MAC_REG_XIF_MODE_XGMII_SHIFT 0
+#define ETH_MAC_REG_FRM_LENGTH 0x000014UL
+#define ETH_MAC_REG_FRM_LENGTH_FRM_LENGTH_SHIFT 0
+#define ETH_MAC_REG_TX_IPG_LENGTH 0x000044UL
+#define ETH_MAC_REG_TX_IPG_LENGTH_TXIPG_SHIFT 0
+#define ETH_MAC_REG_RX_FIFO_SECTIONS 0x00001cUL
+#define ETH_MAC_REG_RX_FIFO_SECTIONS_RX_SECTION_FULL_SHIFT 0
+#define ETH_MAC_REG_TX_FIFO_SECTIONS 0x000020UL
+#define ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_SHIFT 16
+#define ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_FULL_SHIFT 0
+#define ETH_MAC_REG_COMMAND_CONFIG 0x000008UL
+#define MISC_REG_RESET_PL_PDA_VAUX 0x008090UL
+#define MISC_REG_XMAC_CORE_PORT_MODE 0x008c08UL
+#define MISC_REG_XMAC_PHY_PORT_MODE 0x008c04UL
+#define XMAC_REG_MODE 0x210008UL
+#define XMAC_REG_RX_MAX_SIZE 0x210040UL
+#define XMAC_REG_TX_CTRL_LO 0x210020UL
+#define XMAC_REG_CTRL 0x210000UL
+#define XMAC_REG_RX_CTRL 0x210030UL
+#define XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE (0x1 << 12)
+#define MISC_REG_CLK_100G_MODE 0x008c10UL
+#define MISC_REG_OPTE_MODE 0x008c0cUL
+#define NIG_REG_LLH_ENG_CLS_TCP_4_TUPLE_SEARCH 0x501b84UL
+#define NIG_REG_LLH_ENG_CLS_ENG_ID_TBL 0x501b90UL
+#define PRS_REG_SEARCH_TAG1 0x1f0444UL
+#define PRS_REG_SEARCH_TCP_FIRST_FRAG 0x1f0410UL
+#define MISCS_REG_PLL_MAIN_CTRL_4 0x00974cUL
+#define MISCS_REG_ECO_RESERVED 0x0097b4UL
+#define PGLUE_B_REG_PF_BAR0_SIZE 0x2aae60UL
+#define PGLUE_B_REG_PF_BAR1_SIZE 0x2aae64UL
+#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_EN 0x501a80UL
+#define NIG_REG_LLH_FUNC_FILTER_VALUE 0x501a00UL
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE 0x501b00UL
+#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_VALUE 0x501a00UL
+#define NIG_REG_LLH_FUNC_FILTER_EN 0x501a80UL
+#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_EN 0x501a80UL
+#define NIG_REG_LLH_FUNC_FILTER_VALUE 0x501a00UL
+#define NIG_REG_LLH_FUNC_FILTER_MODE 0x501ac0UL
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE 0x501b00UL
+#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_VALUE 0x501a00UL
+#define NIG_REG_LLH_FUNC_FILTER_EN 0x501a80UL
+#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_EN 0x501a80UL
+#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_VALUE 0x501a00UL
+#define XMAC_REG_CTRL_TX_EN (0x1 << 0)
+#define XMAC_REG_CTRL_RX_EN (0x1 << 1)
+#define CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE (0xff << 24)
+#define CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE (0xff << 16)
+#define CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT 16
+#define CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE (0xff << 16)
+#define CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE (0xff << 24)
+#define CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK (0xfff << 0)
+#define CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT 0
+#define CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK (0xfff << 0)
+#define CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT 0
+#define PSWRQ2_REG_ILT_MEMORY 0x260000UL
+#define QM_REG_WFQPFWEIGHT 0x2f4e80UL
+#define QM_REG_WFQVPWEIGHT 0x2fa000UL
+#define NIG_REG_LB_ARB_CREDIT_WEIGHT_0 0x50160cUL
+#define NIG_REG_TX_ARB_CREDIT_WEIGHT_0 0x501f88UL
+#define NIG_REG_LB_ARB_CREDIT_WEIGHT_1 0x501610UL
+#define NIG_REG_TX_ARB_CREDIT_WEIGHT_1 0x501f8cUL
+#define NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 0x5015e4UL
+#define NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0 0x501f58UL
+#define NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_1 0x5015e8UL
+#define NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_1 0x501f5cUL
+#define NIG_REG_LB_ARB_CLIENT_IS_STRICT 0x5015c0UL
+#define NIG_REG_TX_ARB_CLIENT_IS_STRICT 0x501f34UL
+#define NIG_REG_LB_ARB_CLIENT_IS_SUBJECT2WFQ 0x5015c4UL
+#define NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ 0x501f38UL
+#define NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_BASE_TYPE_SHIFT 1
+#define NIG_REG_TX_LB_GLBRATELIMIT_CTRL 0x501f1cUL
+#define NIG_REG_TX_LB_GLBRATELIMIT_INC_PERIOD 0x501f20UL
+#define NIG_REG_TX_LB_GLBRATELIMIT_INC_VALUE 0x501f24UL
+#define NIG_REG_TX_LB_GLBRATELIMIT_MAX_VALUE 0x501f28UL
+#define NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_EN_SHIFT 0
+#define NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_BASE_TYPE_SHIFT 1
+#define NIG_REG_LB_BRBRATELIMIT_CTRL 0x50150cUL
+#define NIG_REG_LB_BRBRATELIMIT_INC_PERIOD 0x501510UL
+#define NIG_REG_LB_BRBRATELIMIT_INC_VALUE 0x501514UL
+#define NIG_REG_LB_BRBRATELIMIT_MAX_VALUE 0x501518UL
+#define NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_EN_SHIFT 0
+#define NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_BASE_TYPE_0_SHIFT 1
+#define NIG_REG_LB_TCRATELIMIT_CTRL_0 0x501520UL
+#define NIG_REG_LB_TCRATELIMIT_INC_PERIOD_0 0x501540UL
+#define NIG_REG_LB_TCRATELIMIT_INC_VALUE_0 0x501560UL
+#define NIG_REG_LB_TCRATELIMIT_MAX_VALUE_0 0x501580UL
+#define NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT 0
+#define NIG_REG_PRIORITY_FOR_TC_0 0x501bccUL
+#define NIG_REG_RX_TC0_PRIORITY_MASK 0x501becUL
+#define PRS_REG_ETS_ARB_CREDIT_WEIGHT_1 0x1f0540UL
+#define PRS_REG_ETS_ARB_CREDIT_WEIGHT_0 0x1f0534UL
+#define PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_1 0x1f053cUL
+#define PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0 0x1f0530UL
+#define PRS_REG_ETS_ARB_CLIENT_IS_STRICT 0x1f0514UL
+#define PRS_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ 0x1f0518UL
+#define BRB_REG_TOTAL_MAC_SIZE 0x3408c0UL
+#define BRB_REG_SHARED_HR_AREA 0x340880UL
+#define BRB_REG_TC_GUARANTIED_0 0x340900UL
+#define BRB_REG_MAIN_TC_GUARANTIED_HYST_0 0x340978UL
+#define BRB_REG_LB_TC_FULL_XOFF_THRESHOLD_0 0x340c60UL
+#define BRB_REG_LB_TC_FULL_XON_THRESHOLD_0 0x340d38UL
+#define BRB_REG_LB_TC_PAUSE_XOFF_THRESHOLD_0 0x340ab0UL
+#define BRB_REG_LB_TC_PAUSE_XON_THRESHOLD_0 0x340b88UL
+#define BRB_REG_MAIN_TC_FULL_XOFF_THRESHOLD_0 0x340c00UL
+#define BRB_REG_MAIN_TC_FULL_XON_THRESHOLD_0 0x340cd8UL
+#define BRB_REG_MAIN_TC_PAUSE_XOFF_THRESHOLD_0 0x340a50UL
+#define BRB_REG_MAIN_TC_PAUSE_XON_THRESHOLD_0 0x340b28UL
+#define PRS_REG_VXLAN_PORT 0x1f0738UL
+#define NIG_REG_VXLAN_PORT 0x50105cUL
+#define PBF_REG_VXLAN_PORT 0xd80518UL
+#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL
+#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL
+#define NIG_REG_ENC_TYPE_ENABLE 0x501058UL
+#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT 2
+#define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN 0x100914UL
+#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL
+#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL
+#define NIG_REG_ENC_TYPE_ENABLE 0x501058UL
+#define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT 0
+#define NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT 1
+#define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN 0x10090cUL
+#define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN 0x100910UL
+#define PRS_REG_NGE_PORT 0x1f086cUL
+#define NIG_REG_NGE_PORT 0x508b38UL
+#define PBF_REG_NGE_PORT 0xd8051cUL
+#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL
+#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL
+#define NIG_REG_NGE_ETH_ENABLE 0x508b2cUL
+#define NIG_REG_NGE_IP_ENABLE 0x508b28UL
+#define NIG_REG_NGE_COMP_VER 0x508b30UL
+#define PBF_REG_NGE_COMP_VER 0xd80524UL
+#define PRS_REG_NGE_COMP_VER 0x1f0878UL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN 0x100930UL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN 0x10092cUL
+#define NIG_REG_PKT_PRIORITY_TO_TC 0x501ba4UL
+#define PGLUE_B_REG_START_INIT_PTT_GTT 0x2a8008UL
+#define PGLUE_B_REG_INIT_DONE_PTT_GTT 0x2a800cUL
+#define MISC_REG_AEU_GENERAL_ATTN_35 0x00848cUL
+#define MCP_REG_CPU_STATE 0xe05004UL
+#define MCP_REG_CPU_MODE 0xe05000UL
+#define MCP_REG_CPU_MODE_SOFT_HALT (0x1 << 10)
+#define MCP_REG_CPU_EVENT_MASK 0xe05008UL
+#define PSWHST_REG_VF_DISABLED_ERROR_VALID 0x2a0060UL
+#define PSWHST_REG_VF_DISABLED_ERROR_ADDRESS 0x2a0064UL
+#define PSWHST_REG_VF_DISABLED_ERROR_DATA 0x2a005cUL
+#define PSWHST_REG_INCORRECT_ACCESS_VALID 0x2a0070UL
+#define PSWHST_REG_INCORRECT_ACCESS_ADDRESS 0x2a0074UL
+#define PSWHST_REG_INCORRECT_ACCESS_DATA 0x2a0068UL
+#define PSWHST_REG_INCORRECT_ACCESS_LENGTH 0x2a006cUL
+#define GRC_REG_TIMEOUT_ATTN_ACCESS_VALID 0x050054UL
+#define GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0 0x05004cUL
+#define GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1 0x050050UL
+#define PGLUE_B_REG_TX_ERR_WR_DETAILS2 0x2aa150UL
+#define PGLUE_B_REG_TX_ERR_WR_ADD_31_0 0x2aa144UL
+#define PGLUE_B_REG_TX_ERR_WR_ADD_63_32 0x2aa148UL
+#define PGLUE_B_REG_TX_ERR_WR_DETAILS 0x2aa14cUL
+#define PGLUE_B_REG_TX_ERR_RD_DETAILS2 0x2aa160UL
+#define PGLUE_B_REG_TX_ERR_RD_ADD_31_0 0x2aa154UL
+#define PGLUE_B_REG_TX_ERR_RD_ADD_63_32 0x2aa158UL
+#define PGLUE_B_REG_TX_ERR_RD_DETAILS 0x2aa15cUL
+#define PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL 0x2aa164UL
+#define PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS 0x2aa54cUL
+#define PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0 0x2aa544UL
+#define PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32 0x2aa548UL
+#define PGLUE_B_REG_VF_ILT_ERR_DETAILS2 0x2aae80UL
+#define PGLUE_B_REG_VF_ILT_ERR_ADD_31_0 0x2aae74UL
+#define PGLUE_B_REG_VF_ILT_ERR_ADD_63_32 0x2aae78UL
+#define PGLUE_B_REG_VF_ILT_ERR_DETAILS 0x2aae7cUL
+#define PGLUE_B_REG_LATCHED_ERRORS_CLR 0x2aa3bcUL
+#define NIG_REG_INT_MASK_3_P0_LB_TC1_PAUSE_TOO_LONG_INT (0x1 << 10)
+#define DORQ_REG_DB_DROP_REASON 0x100a2cUL
+#define DORQ_REG_DB_DROP_DETAILS 0x100a24UL
+#define TM_REG_INT_STS_1 0x2c0190UL
+#define TM_REG_INT_STS_1_PEND_TASK_SCAN (0x1 << 6)
+#define TM_REG_INT_STS_1_PEND_CONN_SCAN (0x1 << 5)
+#define TM_REG_INT_MASK_1 0x2c0194UL
+#define TM_REG_INT_MASK_1_PEND_CONN_SCAN (0x1 << 5)
+#define TM_REG_INT_MASK_1_PEND_TASK_SCAN (0x1 << 6)
+#define MISC_REG_AEU_AFTER_INVERT_1_IGU 0x0087b4UL
+#define MISC_REG_AEU_ENABLE4_IGU_OUT_0 0x0084a8UL
+#define MISC_REG_AEU_ENABLE3_IGU_OUT_0 0x0084a4UL
+#define YSEM_REG_FAST_MEMORY 0x1540000UL
+#define NIG_REG_FLOWCTRL_MODE 0x501ba0UL
+#define TSEM_REG_FAST_MEMORY 0x1740000UL
+#define TSEM_REG_DBG_FRAME_MODE 0x1701408UL
+#define TSEM_REG_SLOW_DBG_ACTIVE 0x1701400UL
+#define TSEM_REG_SLOW_DBG_MODE 0x1701404UL
+#define TSEM_REG_DBG_MODE1_CFG 0x1701420UL
+#define TSEM_REG_SYNC_DBG_EMPTY 0x1701160UL
+#define TSEM_REG_SLOW_DBG_EMPTY 0x1701140UL
+#define TCM_REG_CTX_RBC_ACCS 0x11814c0UL
+#define TCM_REG_AGG_CON_CTX 0x11814c4UL
+#define TCM_REG_SM_CON_CTX 0x11814ccUL
+#define TCM_REG_AGG_TASK_CTX 0x11814c8UL
+#define TCM_REG_SM_TASK_CTX 0x11814d0UL
+#define MSEM_REG_FAST_MEMORY 0x1840000UL
+#define MSEM_REG_DBG_FRAME_MODE 0x1801408UL
+#define MSEM_REG_SLOW_DBG_ACTIVE 0x1801400UL
+#define MSEM_REG_SLOW_DBG_MODE 0x1801404UL
+#define MSEM_REG_DBG_MODE1_CFG 0x1801420UL
+#define MSEM_REG_SYNC_DBG_EMPTY 0x1801160UL
+#define MSEM_REG_SLOW_DBG_EMPTY 0x1801140UL
+#define MCM_REG_CTX_RBC_ACCS 0x1201800UL
+#define MCM_REG_AGG_CON_CTX 0x1201804UL
+#define MCM_REG_SM_CON_CTX 0x120180cUL
+#define MCM_REG_AGG_TASK_CTX 0x1201808UL
+#define MCM_REG_SM_TASK_CTX 0x1201810UL
+#define USEM_REG_FAST_MEMORY 0x1940000UL
+#define USEM_REG_DBG_FRAME_MODE 0x1901408UL
+#define USEM_REG_SLOW_DBG_ACTIVE 0x1901400UL
+#define USEM_REG_SLOW_DBG_MODE 0x1901404UL
+#define USEM_REG_DBG_MODE1_CFG 0x1901420UL
+#define USEM_REG_SYNC_DBG_EMPTY 0x1901160UL
+#define USEM_REG_SLOW_DBG_EMPTY 0x1901140UL
+#define UCM_REG_CTX_RBC_ACCS 0x1281700UL
+#define UCM_REG_AGG_CON_CTX 0x1281704UL
+#define UCM_REG_SM_CON_CTX 0x128170cUL
+#define UCM_REG_AGG_TASK_CTX 0x1281708UL
+#define UCM_REG_SM_TASK_CTX 0x1281710UL
+#define XSEM_REG_FAST_MEMORY 0x1440000UL
+#define XSEM_REG_DBG_FRAME_MODE 0x1401408UL
+#define XSEM_REG_SLOW_DBG_ACTIVE 0x1401400UL
+#define XSEM_REG_SLOW_DBG_MODE 0x1401404UL
+#define XSEM_REG_DBG_MODE1_CFG 0x1401420UL
+#define XSEM_REG_SYNC_DBG_EMPTY 0x1401160UL
+#define XSEM_REG_SLOW_DBG_EMPTY 0x1401140UL
+#define XCM_REG_CTX_RBC_ACCS 0x1001800UL
+#define XCM_REG_AGG_CON_CTX 0x1001804UL
+#define XCM_REG_SM_CON_CTX 0x1001808UL
+#define YSEM_REG_DBG_FRAME_MODE 0x1501408UL
+#define YSEM_REG_SLOW_DBG_ACTIVE 0x1501400UL
+#define YSEM_REG_SLOW_DBG_MODE 0x1501404UL
+#define YSEM_REG_DBG_MODE1_CFG 0x1501420UL
+#define YSEM_REG_SYNC_DBG_EMPTY 0x1501160UL
+#define YCM_REG_CTX_RBC_ACCS 0x1081800UL
+#define YCM_REG_AGG_CON_CTX 0x1081804UL
+#define YCM_REG_SM_CON_CTX 0x108180cUL
+#define YCM_REG_AGG_TASK_CTX 0x1081808UL
+#define YCM_REG_SM_TASK_CTX 0x1081810UL
+#define PSEM_REG_FAST_MEMORY 0x1640000UL
+#define PSEM_REG_DBG_FRAME_MODE 0x1601408UL
+#define PSEM_REG_SLOW_DBG_ACTIVE 0x1601400UL
+#define PSEM_REG_SLOW_DBG_MODE 0x1601404UL
+#define PSEM_REG_DBG_MODE1_CFG 0x1601420UL
+#define PSEM_REG_SYNC_DBG_EMPTY 0x1601160UL
+#define PSEM_REG_SLOW_DBG_EMPTY 0x1601140UL
+#define PCM_REG_CTX_RBC_ACCS 0x1101440UL
+#define PCM_REG_SM_CON_CTX 0x1101444UL
+#define GRC_REG_DBG_SELECT 0x0500a4UL
+#define GRC_REG_DBG_DWORD_ENABLE 0x0500a8UL
+#define GRC_REG_DBG_SHIFT 0x0500acUL
+#define GRC_REG_DBG_FORCE_VALID 0x0500b0UL
+#define GRC_REG_DBG_FORCE_FRAME 0x0500b4UL
+#define PGLUE_B_REG_DBG_SELECT 0x2a8400UL
+#define PGLUE_B_REG_DBG_DWORD_ENABLE 0x2a8404UL
+#define PGLUE_B_REG_DBG_SHIFT 0x2a8408UL
+#define PGLUE_B_REG_DBG_FORCE_VALID 0x2a840cUL
+#define PGLUE_B_REG_DBG_FORCE_FRAME 0x2a8410UL
+#define CNIG_REG_DBG_SELECT_K2 0x218254UL
+#define CNIG_REG_DBG_DWORD_ENABLE_K2 0x218258UL
+#define CNIG_REG_DBG_SHIFT_K2 0x21825cUL
+#define CNIG_REG_DBG_FORCE_VALID_K2 0x218260UL
+#define CNIG_REG_DBG_FORCE_FRAME_K2 0x218264UL
+#define NCSI_REG_DBG_SELECT 0x040474UL
+#define NCSI_REG_DBG_DWORD_ENABLE 0x040478UL
+#define NCSI_REG_DBG_SHIFT 0x04047cUL
+#define NCSI_REG_DBG_FORCE_VALID 0x040480UL
+#define NCSI_REG_DBG_FORCE_FRAME 0x040484UL
+#define BMB_REG_DBG_SELECT 0x540a7cUL
+#define BMB_REG_DBG_DWORD_ENABLE 0x540a80UL
+#define BMB_REG_DBG_SHIFT 0x540a84UL
+#define BMB_REG_DBG_FORCE_VALID 0x540a88UL
+#define BMB_REG_DBG_FORCE_FRAME 0x540a8cUL
+#define PCIE_REG_DBG_SELECT 0x0547e8UL
+#define PHY_PCIE_REG_DBG_SELECT 0x629fe8UL
+#define PCIE_REG_DBG_DWORD_ENABLE 0x0547ecUL
+#define PHY_PCIE_REG_DBG_DWORD_ENABLE 0x629fecUL
+#define PCIE_REG_DBG_SHIFT 0x0547f0UL
+#define PHY_PCIE_REG_DBG_SHIFT 0x629ff0UL
+#define PCIE_REG_DBG_FORCE_VALID 0x0547f4UL
+#define PHY_PCIE_REG_DBG_FORCE_VALID 0x629ff4UL
+#define PCIE_REG_DBG_FORCE_FRAME 0x0547f8UL
+#define PHY_PCIE_REG_DBG_FORCE_FRAME 0x629ff8UL
+#define MCP2_REG_DBG_SELECT 0x052400UL
+#define MCP2_REG_DBG_SHIFT 0x052408UL
+#define MCP2_REG_DBG_FORCE_VALID 0x052440UL
+#define MCP2_REG_DBG_FORCE_FRAME 0x052444UL
+#define PSWHST_REG_DBG_SELECT 0x2a0100UL
+#define PSWHST_REG_DBG_DWORD_ENABLE 0x2a0104UL
+#define PSWHST_REG_DBG_SHIFT 0x2a0108UL
+#define PSWHST_REG_DBG_FORCE_VALID 0x2a010cUL
+#define PSWHST_REG_DBG_FORCE_FRAME 0x2a0110UL
+#define PSWHST2_REG_DBG_SELECT 0x29e058UL
+#define PSWHST2_REG_DBG_DWORD_ENABLE 0x29e05cUL
+#define PSWHST2_REG_DBG_SHIFT 0x29e060UL
+#define PSWHST2_REG_DBG_FORCE_VALID 0x29e064UL
+#define PSWHST2_REG_DBG_FORCE_FRAME 0x29e068UL
+#define PSWRD_REG_DBG_DWORD_ENABLE 0x29c044UL
+#define PSWRD_REG_DBG_SHIFT 0x29c048UL
+#define PSWRD_REG_DBG_FORCE_VALID 0x29c04cUL
+#define PSWRD_REG_DBG_FORCE_FRAME 0x29c050UL
+#define PSWRD2_REG_DBG_SELECT 0x29d400UL
+#define PSWRD2_REG_DBG_DWORD_ENABLE 0x29d404UL
+#define PSWRD2_REG_DBG_SHIFT 0x29d408UL
+#define PSWRD2_REG_DBG_FORCE_VALID 0x29d40cUL
+#define PSWRD2_REG_DBG_FORCE_FRAME 0x29d410UL
+#define PSWWR_REG_DBG_SELECT 0x29a084UL
+#define PSWWR_REG_DBG_DWORD_ENABLE 0x29a088UL
+#define PSWWR_REG_DBG_SHIFT 0x29a08cUL
+#define PSWWR_REG_DBG_FORCE_VALID 0x29a090UL
+#define PSWWR_REG_DBG_FORCE_FRAME 0x29a094UL
+#define PSWRQ_REG_DBG_DWORD_ENABLE 0x280024UL
+#define PSWRQ_REG_DBG_SHIFT 0x280028UL
+#define PSWRQ_REG_DBG_FORCE_VALID 0x28002cUL
+#define PSWRQ_REG_DBG_FORCE_FRAME 0x280030UL
+#define PSWRQ2_REG_DBG_SELECT 0x240100UL
+#define PSWRQ2_REG_DBG_DWORD_ENABLE 0x240104UL
+#define PSWRQ2_REG_DBG_SHIFT 0x240108UL
+#define PSWRQ2_REG_DBG_FORCE_VALID 0x24010cUL
+#define PSWRQ2_REG_DBG_FORCE_FRAME 0x240110UL
+#define PGLCS_REG_DBG_SELECT 0x001d14UL
+#define PGLCS_REG_DBG_DWORD_ENABLE 0x001d18UL
+#define PGLCS_REG_DBG_SHIFT 0x001d1cUL
+#define PGLCS_REG_DBG_FORCE_VALID 0x001d20UL
+#define PGLCS_REG_DBG_FORCE_FRAME 0x001d24UL
+#define PTU_REG_DBG_SELECT 0x560100UL
+#define PTU_REG_DBG_DWORD_ENABLE 0x560104UL
+#define PTU_REG_DBG_SHIFT 0x560108UL
+#define PTU_REG_DBG_FORCE_VALID 0x56010cUL
+#define PTU_REG_DBG_FORCE_FRAME 0x560110UL
+#define DMAE_REG_DBG_SELECT 0x00c510UL
+#define DMAE_REG_DBG_DWORD_ENABLE 0x00c514UL
+#define DMAE_REG_DBG_SHIFT 0x00c518UL
+#define DMAE_REG_DBG_FORCE_VALID 0x00c51cUL
+#define DMAE_REG_DBG_FORCE_FRAME 0x00c520UL
+#define TCM_REG_DBG_SELECT 0x1180040UL
+#define TCM_REG_DBG_DWORD_ENABLE 0x1180044UL
+#define TCM_REG_DBG_SHIFT 0x1180048UL
+#define TCM_REG_DBG_FORCE_VALID 0x118004cUL
+#define TCM_REG_DBG_FORCE_FRAME 0x1180050UL
+#define MCM_REG_DBG_SELECT 0x1200040UL
+#define MCM_REG_DBG_DWORD_ENABLE 0x1200044UL
+#define MCM_REG_DBG_SHIFT 0x1200048UL
+#define MCM_REG_DBG_FORCE_VALID 0x120004cUL
+#define MCM_REG_DBG_FORCE_FRAME 0x1200050UL
+#define UCM_REG_DBG_SELECT 0x1280050UL
+#define UCM_REG_DBG_DWORD_ENABLE 0x1280054UL
+#define UCM_REG_DBG_SHIFT 0x1280058UL
+#define UCM_REG_DBG_FORCE_VALID 0x128005cUL
+#define UCM_REG_DBG_FORCE_FRAME 0x1280060UL
+#define XCM_REG_DBG_SELECT 0x1000040UL
+#define XCM_REG_DBG_DWORD_ENABLE 0x1000044UL
+#define XCM_REG_DBG_SHIFT 0x1000048UL
+#define XCM_REG_DBG_FORCE_VALID 0x100004cUL
+#define XCM_REG_DBG_FORCE_FRAME 0x1000050UL
+#define YCM_REG_DBG_SELECT 0x1080040UL
+#define YCM_REG_DBG_DWORD_ENABLE 0x1080044UL
+#define YCM_REG_DBG_SHIFT 0x1080048UL
+#define YCM_REG_DBG_FORCE_VALID 0x108004cUL
+#define YCM_REG_DBG_FORCE_FRAME 0x1080050UL
+#define PCM_REG_DBG_SELECT 0x1100040UL
+#define PCM_REG_DBG_DWORD_ENABLE 0x1100044UL
+#define PCM_REG_DBG_SHIFT 0x1100048UL
+#define PCM_REG_DBG_FORCE_VALID 0x110004cUL
+#define PCM_REG_DBG_FORCE_FRAME 0x1100050UL
+#define QM_REG_DBG_SELECT 0x2f2e74UL
+#define QM_REG_DBG_DWORD_ENABLE 0x2f2e78UL
+#define QM_REG_DBG_SHIFT 0x2f2e7cUL
+#define QM_REG_DBG_FORCE_VALID 0x2f2e80UL
+#define QM_REG_DBG_FORCE_FRAME 0x2f2e84UL
+#define TM_REG_DBG_SELECT 0x2c07a8UL
+#define TM_REG_DBG_DWORD_ENABLE 0x2c07acUL
+#define TM_REG_DBG_SHIFT 0x2c07b0UL
+#define TM_REG_DBG_FORCE_VALID 0x2c07b4UL
+#define TM_REG_DBG_FORCE_FRAME 0x2c07b8UL
+#define DORQ_REG_DBG_SELECT 0x100ad0UL
+#define DORQ_REG_DBG_DWORD_ENABLE 0x100ad4UL
+#define DORQ_REG_DBG_SHIFT 0x100ad8UL
+#define DORQ_REG_DBG_FORCE_VALID 0x100adcUL
+#define DORQ_REG_DBG_FORCE_FRAME 0x100ae0UL
+#define BRB_REG_DBG_SELECT 0x340ed0UL
+#define BRB_REG_DBG_DWORD_ENABLE 0x340ed4UL
+#define BRB_REG_DBG_SHIFT 0x340ed8UL
+#define BRB_REG_DBG_FORCE_VALID 0x340edcUL
+#define BRB_REG_DBG_FORCE_FRAME 0x340ee0UL
+#define SRC_REG_DBG_SELECT 0x238700UL
+#define SRC_REG_DBG_DWORD_ENABLE 0x238704UL
+#define SRC_REG_DBG_SHIFT 0x238708UL
+#define SRC_REG_DBG_FORCE_VALID 0x23870cUL
+#define SRC_REG_DBG_FORCE_FRAME 0x238710UL
+#define PRS_REG_DBG_SELECT 0x1f0b6cUL
+#define PRS_REG_DBG_DWORD_ENABLE 0x1f0b70UL
+#define PRS_REG_DBG_SHIFT 0x1f0b74UL
+#define PRS_REG_DBG_FORCE_VALID 0x1f0ba0UL
+#define PRS_REG_DBG_FORCE_FRAME 0x1f0ba4UL
+#define TSDM_REG_DBG_SELECT 0xfb0e28UL
+#define TSDM_REG_DBG_DWORD_ENABLE 0xfb0e2cUL
+#define TSDM_REG_DBG_SHIFT 0xfb0e30UL
+#define TSDM_REG_DBG_FORCE_VALID 0xfb0e34UL
+#define TSDM_REG_DBG_FORCE_FRAME 0xfb0e38UL
+#define MSDM_REG_DBG_SELECT 0xfc0e28UL
+#define MSDM_REG_DBG_DWORD_ENABLE 0xfc0e2cUL
+#define MSDM_REG_DBG_SHIFT 0xfc0e30UL
+#define MSDM_REG_DBG_FORCE_VALID 0xfc0e34UL
+#define MSDM_REG_DBG_FORCE_FRAME 0xfc0e38UL
+#define USDM_REG_DBG_SELECT 0xfd0e28UL
+#define USDM_REG_DBG_DWORD_ENABLE 0xfd0e2cUL
+#define USDM_REG_DBG_SHIFT 0xfd0e30UL
+#define USDM_REG_DBG_FORCE_VALID 0xfd0e34UL
+#define USDM_REG_DBG_FORCE_FRAME 0xfd0e38UL
+#define XSDM_REG_DBG_SELECT 0xf80e28UL
+#define XSDM_REG_DBG_DWORD_ENABLE 0xf80e2cUL
+#define XSDM_REG_DBG_SHIFT 0xf80e30UL
+#define XSDM_REG_DBG_FORCE_VALID 0xf80e34UL
+#define XSDM_REG_DBG_FORCE_FRAME 0xf80e38UL
+#define YSDM_REG_DBG_SELECT 0xf90e28UL
+#define YSDM_REG_DBG_DWORD_ENABLE 0xf90e2cUL
+#define YSDM_REG_DBG_SHIFT 0xf90e30UL
+#define YSDM_REG_DBG_FORCE_VALID 0xf90e34UL
+#define YSDM_REG_DBG_FORCE_FRAME 0xf90e38UL
+#define PSDM_REG_DBG_SELECT 0xfa0e28UL
+#define PSDM_REG_DBG_DWORD_ENABLE 0xfa0e2cUL
+#define PSDM_REG_DBG_SHIFT 0xfa0e30UL
+#define PSDM_REG_DBG_FORCE_VALID 0xfa0e34UL
+#define PSDM_REG_DBG_FORCE_FRAME 0xfa0e38UL
+#define TSEM_REG_DBG_SELECT 0x1701528UL
+#define TSEM_REG_DBG_DWORD_ENABLE 0x170152cUL
+#define TSEM_REG_DBG_SHIFT 0x1701530UL
+#define TSEM_REG_DBG_FORCE_VALID 0x1701534UL
+#define TSEM_REG_DBG_FORCE_FRAME 0x1701538UL
+#define MSEM_REG_DBG_SELECT 0x1801528UL
+#define MSEM_REG_DBG_DWORD_ENABLE 0x180152cUL
+#define MSEM_REG_DBG_SHIFT 0x1801530UL
+#define MSEM_REG_DBG_FORCE_VALID 0x1801534UL
+#define MSEM_REG_DBG_FORCE_FRAME 0x1801538UL
+#define USEM_REG_DBG_SELECT 0x1901528UL
+#define USEM_REG_DBG_DWORD_ENABLE 0x190152cUL
+#define USEM_REG_DBG_SHIFT 0x1901530UL
+#define USEM_REG_DBG_FORCE_VALID 0x1901534UL
+#define USEM_REG_DBG_FORCE_FRAME 0x1901538UL
+#define XSEM_REG_DBG_SELECT 0x1401528UL
+#define XSEM_REG_DBG_DWORD_ENABLE 0x140152cUL
+#define XSEM_REG_DBG_SHIFT 0x1401530UL
+#define XSEM_REG_DBG_FORCE_VALID 0x1401534UL
+#define XSEM_REG_DBG_FORCE_FRAME 0x1401538UL
+#define YSEM_REG_DBG_SELECT 0x1501528UL
+#define YSEM_REG_DBG_DWORD_ENABLE 0x150152cUL
+#define YSEM_REG_DBG_SHIFT 0x1501530UL
+#define YSEM_REG_DBG_FORCE_VALID 0x1501534UL
+#define YSEM_REG_DBG_FORCE_FRAME 0x1501538UL
+#define PSEM_REG_DBG_SELECT 0x1601528UL
+#define PSEM_REG_DBG_DWORD_ENABLE 0x160152cUL
+#define PSEM_REG_DBG_SHIFT 0x1601530UL
+#define PSEM_REG_DBG_FORCE_VALID 0x1601534UL
+#define PSEM_REG_DBG_FORCE_FRAME 0x1601538UL
+#define RSS_REG_DBG_SELECT 0x238c4cUL
+#define RSS_REG_DBG_DWORD_ENABLE 0x238c50UL
+#define RSS_REG_DBG_SHIFT 0x238c54UL
+#define RSS_REG_DBG_FORCE_VALID 0x238c58UL
+#define RSS_REG_DBG_FORCE_FRAME 0x238c5cUL
+#define TMLD_REG_DBG_SELECT 0x4d1600UL
+#define TMLD_REG_DBG_DWORD_ENABLE 0x4d1604UL
+#define TMLD_REG_DBG_SHIFT 0x4d1608UL
+#define TMLD_REG_DBG_FORCE_VALID 0x4d160cUL
+#define TMLD_REG_DBG_FORCE_FRAME 0x4d1610UL
+#define MULD_REG_DBG_SELECT 0x4e1600UL
+#define MULD_REG_DBG_DWORD_ENABLE 0x4e1604UL
+#define MULD_REG_DBG_SHIFT 0x4e1608UL
+#define MULD_REG_DBG_FORCE_VALID 0x4e160cUL
+#define MULD_REG_DBG_FORCE_FRAME 0x4e1610UL
+#define YULD_REG_DBG_SELECT 0x4c9600UL
+#define YULD_REG_DBG_DWORD_ENABLE 0x4c9604UL
+#define YULD_REG_DBG_SHIFT 0x4c9608UL
+#define YULD_REG_DBG_FORCE_VALID 0x4c960cUL
+#define YULD_REG_DBG_FORCE_FRAME 0x4c9610UL
+#define XYLD_REG_DBG_SELECT 0x4c1600UL
+#define XYLD_REG_DBG_DWORD_ENABLE 0x4c1604UL
+#define XYLD_REG_DBG_SHIFT 0x4c1608UL
+#define XYLD_REG_DBG_FORCE_VALID 0x4c160cUL
+#define XYLD_REG_DBG_FORCE_FRAME 0x4c1610UL
+#define PRM_REG_DBG_SELECT 0x2306a8UL
+#define PRM_REG_DBG_DWORD_ENABLE 0x2306acUL
+#define PRM_REG_DBG_SHIFT 0x2306b0UL
+#define PRM_REG_DBG_FORCE_VALID 0x2306b4UL
+#define PRM_REG_DBG_FORCE_FRAME 0x2306b8UL
+#define PBF_PB1_REG_DBG_SELECT 0xda0728UL
+#define PBF_PB1_REG_DBG_DWORD_ENABLE 0xda072cUL
+#define PBF_PB1_REG_DBG_SHIFT 0xda0730UL
+#define PBF_PB1_REG_DBG_FORCE_VALID 0xda0734UL
+#define PBF_PB1_REG_DBG_FORCE_FRAME 0xda0738UL
+#define PBF_PB2_REG_DBG_SELECT 0xda4728UL
+#define PBF_PB2_REG_DBG_DWORD_ENABLE 0xda472cUL
+#define PBF_PB2_REG_DBG_SHIFT 0xda4730UL
+#define PBF_PB2_REG_DBG_FORCE_VALID 0xda4734UL
+#define PBF_PB2_REG_DBG_FORCE_FRAME 0xda4738UL
+#define RPB_REG_DBG_SELECT 0x23c728UL
+#define RPB_REG_DBG_DWORD_ENABLE 0x23c72cUL
+#define RPB_REG_DBG_SHIFT 0x23c730UL
+#define RPB_REG_DBG_FORCE_VALID 0x23c734UL
+#define RPB_REG_DBG_FORCE_FRAME 0x23c738UL
+#define BTB_REG_DBG_SELECT 0xdb08c8UL
+#define BTB_REG_DBG_DWORD_ENABLE 0xdb08ccUL
+#define BTB_REG_DBG_SHIFT 0xdb08d0UL
+#define BTB_REG_DBG_FORCE_VALID 0xdb08d4UL
+#define BTB_REG_DBG_FORCE_FRAME 0xdb08d8UL
+#define PBF_REG_DBG_SELECT 0xd80060UL
+#define PBF_REG_DBG_DWORD_ENABLE 0xd80064UL
+#define PBF_REG_DBG_SHIFT 0xd80068UL
+#define PBF_REG_DBG_FORCE_VALID 0xd8006cUL
+#define PBF_REG_DBG_FORCE_FRAME 0xd80070UL
+#define RDIF_REG_DBG_SELECT 0x300500UL
+#define RDIF_REG_DBG_DWORD_ENABLE 0x300504UL
+#define RDIF_REG_DBG_SHIFT 0x300508UL
+#define RDIF_REG_DBG_FORCE_VALID 0x30050cUL
+#define RDIF_REG_DBG_FORCE_FRAME 0x300510UL
+#define TDIF_REG_DBG_SELECT 0x310500UL
+#define TDIF_REG_DBG_DWORD_ENABLE 0x310504UL
+#define TDIF_REG_DBG_SHIFT 0x310508UL
+#define TDIF_REG_DBG_FORCE_VALID 0x31050cUL
+#define TDIF_REG_DBG_FORCE_FRAME 0x310510UL
+#define CDU_REG_DBG_SELECT 0x580704UL
+#define CDU_REG_DBG_DWORD_ENABLE 0x580708UL
+#define CDU_REG_DBG_SHIFT 0x58070cUL
+#define CDU_REG_DBG_FORCE_VALID 0x580710UL
+#define CDU_REG_DBG_FORCE_FRAME 0x580714UL
+#define CCFC_REG_DBG_SELECT 0x2e0500UL
+#define CCFC_REG_DBG_DWORD_ENABLE 0x2e0504UL
+#define CCFC_REG_DBG_SHIFT 0x2e0508UL
+#define CCFC_REG_DBG_FORCE_VALID 0x2e050cUL
+#define CCFC_REG_DBG_FORCE_FRAME 0x2e0510UL
+#define TCFC_REG_DBG_SELECT 0x2d0500UL
+#define TCFC_REG_DBG_DWORD_ENABLE 0x2d0504UL
+#define TCFC_REG_DBG_SHIFT 0x2d0508UL
+#define TCFC_REG_DBG_FORCE_VALID 0x2d050cUL
+#define TCFC_REG_DBG_FORCE_FRAME 0x2d0510UL
+#define IGU_REG_DBG_SELECT 0x181578UL
+#define IGU_REG_DBG_DWORD_ENABLE 0x18157cUL
+#define IGU_REG_DBG_SHIFT 0x181580UL
+#define IGU_REG_DBG_FORCE_VALID 0x181584UL
+#define IGU_REG_DBG_FORCE_FRAME 0x181588UL
+#define CAU_REG_DBG_SELECT 0x1c0ea8UL
+#define CAU_REG_DBG_DWORD_ENABLE 0x1c0eacUL
+#define CAU_REG_DBG_SHIFT 0x1c0eb0UL
+#define CAU_REG_DBG_FORCE_VALID 0x1c0eb4UL
+#define CAU_REG_DBG_FORCE_FRAME 0x1c0eb8UL
+#define UMAC_REG_DBG_SELECT 0x051094UL
+#define UMAC_REG_DBG_DWORD_ENABLE 0x051098UL
+#define UMAC_REG_DBG_SHIFT 0x05109cUL
+#define UMAC_REG_DBG_FORCE_VALID 0x0510a0UL
+#define UMAC_REG_DBG_FORCE_FRAME 0x0510a4UL
+#define NIG_REG_DBG_SELECT 0x502140UL
+#define NIG_REG_DBG_DWORD_ENABLE 0x502144UL
+#define NIG_REG_DBG_SHIFT 0x502148UL
+#define NIG_REG_DBG_FORCE_VALID 0x50214cUL
+#define NIG_REG_DBG_FORCE_FRAME 0x502150UL
+#define WOL_REG_DBG_SELECT 0x600140UL
+#define WOL_REG_DBG_DWORD_ENABLE 0x600144UL
+#define WOL_REG_DBG_SHIFT 0x600148UL
+#define WOL_REG_DBG_FORCE_VALID 0x60014cUL
+#define WOL_REG_DBG_FORCE_FRAME 0x600150UL
+#define BMBN_REG_DBG_SELECT 0x610140UL
+#define BMBN_REG_DBG_DWORD_ENABLE 0x610144UL
+#define BMBN_REG_DBG_SHIFT 0x610148UL
+#define BMBN_REG_DBG_FORCE_VALID 0x61014cUL
+#define BMBN_REG_DBG_FORCE_FRAME 0x610150UL
+#define NWM_REG_DBG_SELECT 0x8000ecUL
+#define NWM_REG_DBG_DWORD_ENABLE 0x8000f0UL
+#define NWM_REG_DBG_SHIFT 0x8000f4UL
+#define NWM_REG_DBG_FORCE_VALID 0x8000f8UL
+#define NWM_REG_DBG_FORCE_FRAME 0x8000fcUL
+#define BRB_REG_BIG_RAM_ADDRESS 0x340800UL
+#define BRB_REG_BIG_RAM_DATA 0x341500UL
+#define BTB_REG_BIG_RAM_ADDRESS 0xdb0800UL
+#define BTB_REG_BIG_RAM_DATA 0xdb0c00UL
+#define BMB_REG_BIG_RAM_ADDRESS 0x540800UL
+#define BMB_REG_BIG_RAM_DATA 0x540f00UL
+#define MISCS_REG_RESET_PL_UA 0x009050UL
+#define MISC_REG_RESET_PL_UA 0x008050UL
+#define MISC_REG_RESET_PL_HV 0x008060UL
+#define MISC_REG_RESET_PL_PDA_VMAIN_1 0x008070UL
+#define MISC_REG_RESET_PL_PDA_VMAIN_2 0x008080UL
+#define SEM_FAST_REG_INT_RAM 0x020000UL
+#define DBG_REG_DBG_BLOCK_ON 0x010454UL
+#define DBG_REG_FRAMING_MODE 0x010058UL
+#define SEM_FAST_REG_DEBUG_MODE 0x000744UL
+#define SEM_FAST_REG_DEBUG_ACTIVE 0x000740UL
+#define SEM_FAST_REG_DBG_MODE6_SRC_DISABLE 0x000750UL
+#define SEM_FAST_REG_FILTER_CID 0x000754UL
+#define SEM_FAST_REG_EVENT_ID_RANGE_STRT 0x000760UL
+#define SEM_FAST_REG_EVENT_ID_RANGE_END 0x000764UL
+#define SEM_FAST_REG_FILTER_EVENT_ID 0x000758UL
+#define SEM_FAST_REG_EVENT_ID_MASK 0x00075cUL
+#define SEM_FAST_REG_RECORD_FILTER_ENABLE 0x000768UL
+#define SEM_FAST_REG_DBG_MODE6_SRC_DISABLE 0x000750UL
+#define SEM_FAST_REG_DEBUG_ACTIVE 0x000740UL
+#define SEM_FAST_REG_RECORD_FILTER_ENABLE 0x000768UL
+#define DBG_REG_TIMESTAMP_VALID_EN 0x010b58UL
+#define DBG_REG_FILTER_ENABLE 0x0109d0UL
+#define DBG_REG_TRIGGER_ENABLE 0x01054cUL
+#define DBG_REG_FILTER_CNSTR_OPRTN_0 0x010a28UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_OPRTN_0 0x01071cUL
+#define DBG_REG_FILTER_CNSTR_DATA_0 0x0109d8UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_DATA_0 0x01059cUL
+#define DBG_REG_FILTER_CNSTR_DATA_MASK_0 0x0109f8UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_DATA_MASK_0 0x01065cUL
+#define DBG_REG_FILTER_CNSTR_FRAME_0 0x0109e8UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_FRAME_0 0x0105fcUL
+#define DBG_REG_FILTER_CNSTR_FRAME_MASK_0 0x010a08UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_FRAME_MASK_0 0x0106bcUL
+#define DBG_REG_FILTER_CNSTR_OFFSET_0 0x010a18UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_OFFSET_0 0x0107dcUL
+#define DBG_REG_FILTER_CNSTR_RANGE_0 0x010a38UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_RANGE_0 0x01077cUL
+#define DBG_REG_FILTER_CNSTR_CYCLIC_0 0x010a68UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_CYCLIC_0 0x0108fcUL
+#define DBG_REG_FILTER_CNSTR_MUST_0 0x010a48UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_MUST_0 0x01083cUL
+#define DBG_REG_INTR_BUFFER 0x014000UL
+#define DBG_REG_INTR_BUFFER_WR_PTR 0x010404UL
+#define DBG_REG_WRAP_ON_INT_BUFFER 0x010418UL
+#define DBG_REG_INTR_BUFFER_RD_PTR 0x010400UL
+#define DBG_REG_EXT_BUFFER_WR_PTR 0x010410UL
+#define DBG_REG_WRAP_ON_EXT_BUFFER 0x01041cUL
+#define SEM_FAST_REG_STALL_0 0x000488UL
+#define SEM_FAST_REG_STALLED 0x000494UL
+#define SEM_FAST_REG_STORM_REG_FILE 0x008000UL
+#define SEM_FAST_REG_VFC_DATA_WR 0x000b40UL
+#define SEM_FAST_REG_VFC_ADDR 0x000b44UL
+#define SEM_FAST_REG_VFC_DATA_RD 0x000b48UL
+#define SEM_FAST_REG_VFC_DATA_WR 0x000b40UL
+#define SEM_FAST_REG_VFC_ADDR 0x000b44UL
+#define SEM_FAST_REG_VFC_DATA_RD 0x000b48UL
+#define RSS_REG_RSS_RAM_ADDR 0x238c30UL
+#define RSS_REG_RSS_RAM_DATA 0x238c20UL
+#define MISCS_REG_BLOCK_256B_EN 0x009074UL
+#define MCP_REG_CPU_REG_FILE 0xe05200UL
+#define MCP_REG_CPU_REG_FILE_SIZE 32
+#define DBG_REG_CALENDAR_OUT_DATA 0x010480UL
+#define DBG_REG_FULL_MODE 0x010060UL
+#define DBG_REG_PCI_EXT_BUFFER_STRT_ADDR_LSB 0x010430UL
+#define DBG_REG_PCI_EXT_BUFFER_STRT_ADDR_MSB 0x010434UL
+#define DBG_REG_TARGET_PACKET_SIZE 0x010b3cUL
+#define DBG_REG_PCI_EXT_BUFFER_SIZE 0x010438UL
+#define DBG_REG_PCI_FUNC_NUM 0x010a98UL
+#define DBG_REG_PCI_LOGIC_ADDR 0x010460UL
+#define DBG_REG_PCI_REQ_CREDIT 0x010440UL
+#define DBG_REG_DEBUG_TARGET 0x01005cUL
+#define DBG_REG_OUTPUT_ENABLE 0x01000cUL
+#define DBG_REG_OUTPUT_ENABLE 0x01000cUL
+#define DBG_REG_DEBUG_TARGET 0x01005cUL
+#define DBG_REG_OTHER_ENGINE_MODE 0x010010UL
+#define NIG_REG_DEBUG_PORT 0x5020d0UL
+#define DBG_REG_ETHERNET_HDR_WIDTH 0x010b38UL
+#define DBG_REG_ETHERNET_HDR_7 0x010b34UL
+#define DBG_REG_ETHERNET_HDR_6 0x010b30UL
+#define DBG_REG_ETHERNET_HDR_5 0x010b2cUL
+#define DBG_REG_ETHERNET_HDR_4 0x010b28UL
+#define DBG_REG_TARGET_PACKET_SIZE 0x010b3cUL
+#define DBG_REG_NIG_DATA_LIMIT_SIZE 0x01043cUL
+#define DBG_REG_TIMESTAMP_VALID_EN 0x010b58UL
+#define DBG_REG_TIMESTAMP_FRAME_EN 0x010b54UL
+#define DBG_REG_TIMESTAMP_TICK 0x010b50UL
+#define DBG_REG_FILTER_ID_NUM 0x0109d4UL
+#define DBG_REG_FILTER_MSG_LENGTH_ENABLE 0x010a78UL
+#define DBG_REG_FILTER_MSG_LENGTH 0x010a7cUL
+#define DBG_REG_RCRD_ON_WINDOW_PRE_NUM_CHUNKS 0x010a90UL
+#define DBG_REG_RCRD_ON_WINDOW_POST_NUM_CYCLES 0x010a94UL
+#define DBG_REG_RCRD_ON_WINDOW_PRE_TRGR_EVNT_MODE 0x010a88UL
+#define DBG_REG_RCRD_ON_WINDOW_POST_TRGR_EVNT_MODE 0x010a8cUL
+#define DBG_REG_TRIGGER_ENABLE 0x01054cUL
+#define DBG_REG_TRIGGER_STATE_ID_0 0x010554UL
+#define DBG_REG_TRIGGER_STATE_MSG_LENGTH_ENABLE_0 0x01095cUL
+#define DBG_REG_TRIGGER_STATE_MSG_LENGTH_0 0x010968UL
+#define DBG_REG_TRIGGER_STATE_SET_COUNT_0 0x010584UL
+#define DBG_REG_TRIGGER_STATE_SET_NXT_STATE_0 0x01056cUL
+#define DBG_REG_NO_GRANT_ON_FULL 0x010458UL
+#define DBG_REG_STORM_ID_NUM 0x010b14UL
+#define DBG_REG_CALENDAR_SLOT0 0x010014UL
+#define DBG_REG_HW_ID_NUM 0x010b10UL
+#define DBG_REG_FILTER_ENABLE 0x0109d0UL
+#define DBG_REG_TIMESTAMP 0x010b4cUL
+#define DBG_REG_CPU_TIMEOUT 0x010450UL
+#define DBG_REG_TRIGGER_STATUS_CUR_STATE 0x010b60UL
+#define GRC_REG_TRACE_FIFO_VALID_DATA 0x050064UL
+#define GRC_REG_TRACE_FIFO 0x050068UL
+#define IGU_REG_ERROR_HANDLING_DATA_VALID 0x181530UL
+#define IGU_REG_ERROR_HANDLING_MEMORY 0x181520UL
+#define GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW 0x05040cUL
+#define GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW 0x05040cUL
+#define GRC_REG_PROTECTION_OVERRIDE_WINDOW 0x050500UL
+#define TSEM_REG_VF_ERROR 0x1700408UL
+#define USEM_REG_VF_ERROR 0x1900408UL
+#define MSEM_REG_VF_ERROR 0x1800408UL
+#define XSEM_REG_VF_ERROR 0x1400408UL
+#define YSEM_REG_VF_ERROR 0x1500408UL
+#define PSEM_REG_VF_ERROR 0x1600408UL
+#define PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR 0x2aa118UL
+#define IGU_REG_STATISTIC_NUM_VF_MSG_SENT 0x180408UL
+#define IGU_REG_VF_CONFIGURATION 0x180804UL
+#define PSWHST_REG_ZONE_PERMISSION_TABLE 0x2a0800UL
+#define DORQ_REG_VF_USAGE_CNT 0x1009c4UL
+#define PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 0xd806ccUL
+#define PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 0xd806c8UL
+#define PRS_REG_MSG_CT_MAIN_0 0x1f0a24UL
+#define PRS_REG_MSG_CT_LB_0 0x1f0a28UL
+#define BRB_REG_PER_TC_COUNTERS 0x341a00UL
+
+/* added */
+#define DORQ_REG_PF_DPI_BIT_SHIFT 0x100450UL
+#define DORQ_REG_PF_ICID_BIT_SHIFT_NORM 0x100448UL
+#define DORQ_REG_PF_MIN_ADDR_REG1 0x100400UL
+#define MISCS_REG_FUNCTION_HIDE 0x0096f0UL
+#define PCIE_REG_PRTY_MASK 0x0547b4UL
+#define PGLUE_B_REG_VF_BAR0_SIZE 0x2aaeb4UL
+#define BAR0_MAP_REG_YSDM_RAM 0x1e80000UL
+#define SEM_FAST_REG_INT_RAM_SIZE 20480
+#define MCP_REG_SCRATCH_SIZE 57344
+
+#define CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT 24
+#define CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT 24
+#define CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT 16
+#define DORQ_REG_DB_DROP_DETAILS_ADDRESS 0x100a1cUL