net/qede/base: add support for new firmware
authorRasesh Mody <rasesh.mody@cavium.com>
Thu, 5 Jan 2017 07:04:04 +0000 (23:04 -0800)
committerFerruh Yigit <ferruh.yigit@intel.com>
Tue, 17 Jan 2017 18:40:53 +0000 (19:40 +0100)
Add support for 8.14.x.x firmware.

The new firmware adds support for external PHY BCM8485x; configures
fixed link speed with transceiver/cable not supporting negotiation;
supports engine swap; supports overriding PCIe preset equalization
value; checks pause too long for ports and reads die temperature
every second for shutdown threshold.
It includes change in FLR flow when there is a SW initiated FLR.

Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
22 files changed:
doc/guides/nics/qede.rst
drivers/net/qede/base/common_hsi.h
drivers/net/qede/base/ecore.h
drivers/net/qede/base/ecore_dcbx.c
drivers/net/qede/base/ecore_dcbx.h
drivers/net/qede/base/ecore_dev.c
drivers/net/qede/base/ecore_gtt_reg_addr.h
drivers/net/qede/base/ecore_hsi_common.h
drivers/net/qede/base/ecore_hsi_debug_tools.h
drivers/net/qede/base/ecore_hsi_eth.h
drivers/net/qede/base/ecore_hsi_init_tool.h
drivers/net/qede/base/ecore_init_fw_funcs.c
drivers/net/qede/base/ecore_init_ops.c
drivers/net/qede/base/ecore_iov_api.h
drivers/net/qede/base/ecore_iro_values.h
drivers/net/qede/base/ecore_mcp.c
drivers/net/qede/base/ecore_sp_commands.c
drivers/net/qede/base/ecore_spq.c
drivers/net/qede/base/ecore_sriov.c
drivers/net/qede/base/eth_common.h
drivers/net/qede/base/nvm_cfg.h
drivers/net/qede/qede_main.c

index 46ae2be..1cf5501 100644 (file)
@@ -77,10 +77,10 @@ Supported QLogic Adapters
 Prerequisites
 -------------
 
-- Requires firmware version **8.10.x.** and management firmware
-  version **8.10.x or higher**. Firmware may be available
+- Requires firmware version **8.14.x.** and management firmware
+  version **8.14.x or higher**. Firmware may be available
   inbox in certain newer Linux distros under the standard directory
-  ``E.g. /lib/firmware/qed/qed_init_values-8.10.9.0.bin``
+  ``E.g. /lib/firmware/qed/qed_init_values-8.14.6.0.bin``
 
 - If the required firmware files are not available then visit
   `QLogic Driver Download Center <http://driverdownloads.qlogic.com>`_.
@@ -119,7 +119,7 @@ enabling debugging options may affect system performance.
 - ``CONFIG_RTE_LIBRTE_QEDE_FW`` (default **""**)
 
   Gives absolute path of firmware file.
-  ``Eg: "/lib/firmware/qed/qed_init_values_zipped-8.10.9.0.bin"``
+  ``Eg: "/lib/firmware/qed/qed_init_values_zipped-8.14.6.0.bin"``
   Empty string indicates driver will pick up the firmware file
   from the default location.
 
index b431c78..4083e86 100644 (file)
@@ -89,8 +89,8 @@
 
 
 #define FW_MAJOR_VERSION               8
-#define FW_MINOR_VERSION               10
-#define FW_REVISION_VERSION            9
+#define FW_MINOR_VERSION               14
+#define FW_REVISION_VERSION            6
 #define FW_ENGINEERING_VERSION 0
 
 /***********************/
@@ -726,8 +726,6 @@ union event_ring_data {
        struct malicious_vf_eqe_data malicious_vf /* Malicious VF data */;
        struct initial_cleanup_eqe_data vf_init_cleanup
            /* VF Initial Cleanup data */;
-/* Host handle for the Async Completions */
-       struct regpair iwarp_handle;
 };
 /* Event Ring Entry */
 struct event_ring_entry {
index 034e885..b1b0a2e 100644 (file)
@@ -765,15 +765,6 @@ struct ecore_dev {
 #define NUM_OF_ENG_PFS(dev)    (ECORE_IS_BB(dev) ? MAX_NUM_PFS_BB \
                                                  : MAX_NUM_PFS_K2)
 
-#ifndef REAL_ASIC_ONLY
-#define ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn) ( \
-       (ECORE_IS_BB_A0(p_hwfn->p_dev)) && \
-       (ECORE_PATH_ID(p_hwfn) == 1) && \
-       ((p_hwfn->hw_info.port_mode == ECORE_PORT_MODE_DE_2X40G) || \
-        (p_hwfn->hw_info.port_mode == ECORE_PORT_MODE_DE_2X50G) || \
-        (p_hwfn->hw_info.port_mode == ECORE_PORT_MODE_DE_2X25G)))
-#endif
-
 /**
  * @brief ecore_concrete_to_sw_fid - get the sw function id from
  *        the concrete value.
index 8175619..5932948 100644 (file)
@@ -13,6 +13,7 @@
 #include "ecore_cxt.h"
 #include "ecore_gtt_reg_addr.h"
 #include "ecore_iro.h"
+#include "ecore_iov_api.h"
 
 #define ECORE_DCBX_MAX_MIB_READ_TRY    (100)
 #define ECORE_ETH_TYPE_DEFAULT         (0)
@@ -79,21 +80,6 @@ static bool ecore_dcbx_local(u32 dcbx_cfg_bitmap)
                DCBX_CONFIG_VERSION_STATIC) ? true : false;
 }
 
-/* @@@TBD A0 Eagle workaround */
-void ecore_dcbx_eagle_workaround(struct ecore_hwfn *p_hwfn,
-                                struct ecore_ptt *p_ptt, bool set_to_pfc)
-{
-       if (!ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn))
-               return;
-
-       ecore_wr(p_hwfn, p_ptt,
-                YSEM_REG_FAST_MEMORY + 0x20000 /* RAM in FASTMEM */  +
-                YSTORM_FLOW_CONTROL_MODE_OFFSET,
-                set_to_pfc ? flow_ctrl_pfc : flow_ctrl_pause);
-       ecore_wr(p_hwfn, p_ptt, NIG_REG_FLOWCTRL_MODE,
-                EAGLE_ENG1_WORKAROUND_NIG_FLOWCTRL_MODE);
-}
-
 static void
 ecore_dcbx_dp_protocol(struct ecore_hwfn *p_hwfn,
                       struct ecore_dcbx_results *p_data)
@@ -945,7 +931,6 @@ ecore_dcbx_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
                         * according to negotiation results
                         */
                        enabled = p_hwfn->p_dcbx_info->results.dcbx_enabled;
-                       ecore_dcbx_eagle_workaround(p_hwfn, p_ptt, enabled);
                }
        }
        ecore_dcbx_get_params(p_hwfn, p_ptt, type);
index 1518624..2ce4465 100644 (file)
@@ -56,10 +56,4 @@ void ecore_dcbx_info_free(struct ecore_hwfn *, struct ecore_dcbx_info *);
 void ecore_dcbx_set_pf_update_params(struct ecore_dcbx_results *p_src,
                                     struct pf_update_ramrod_data *p_dest);
 
-#ifndef REAL_ASIC_ONLY
-/* @@@TBD eagle phy workaround */
-void ecore_dcbx_eagle_workaround(struct ecore_hwfn *, struct ecore_ptt *,
-                                bool set_to_pfc);
-#endif
-
 #endif /* __ECORE_DCBX_H__ */
index 03620d9..15db09f 100644 (file)
@@ -711,7 +711,7 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
        }
 
        p_dev->reset_stats = OSAL_ZALLOC(p_dev, GFP_KERNEL,
-                                        sizeof(struct ecore_eth_stats));
+                                        sizeof(*p_dev->reset_stats));
        if (!p_dev->reset_stats) {
                DP_NOTICE(p_dev, true, "Failed to allocate reset statistics\n");
                goto alloc_no_mem;
@@ -823,9 +823,7 @@ static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn)
 {
        int hw_mode = 0;
 
-       if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
-               hw_mode |= 1 << MODE_BB_A0;
-       } else if (ECORE_IS_BB_B0(p_hwfn->p_dev)) {
+       if (ECORE_IS_BB_B0(p_hwfn->p_dev)) {
                hw_mode |= 1 << MODE_BB_B0;
        } else if (ECORE_IS_AH(p_hwfn->p_dev)) {
                hw_mode |= 1 << MODE_K2;
@@ -881,11 +879,6 @@ static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn)
 #endif
                hw_mode |= 1 << MODE_ASIC;
 
-#ifndef REAL_ASIC_ONLY
-       if (ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn))
-               hw_mode |= 1 << MODE_EAGLE_ENG1_WORKAROUND;
-#endif
-
        if (p_hwfn->p_dev->num_hwfns > 1)
                hw_mode |= 1 << MODE_100G;
 
@@ -991,7 +984,7 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
        ecore_gtt_init(p_hwfn);
 
 #ifndef ASIC_ONLY
-       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+       if (CHIP_REV_IS_EMUL(p_dev)) {
                rc = ecore_hw_init_chip(p_hwfn, p_hwfn->p_main_ptt);
                if (rc != ECORE_SUCCESS)
                        return rc;
@@ -1006,7 +999,7 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
        }
 
        ecore_qm_common_rt_init(p_hwfn,
-                               p_hwfn->p_dev->num_ports_in_engines,
+                               p_dev->num_ports_in_engines,
                                qm_info->max_phys_tcs_per_port,
                                qm_info->pf_rl_en, qm_info->pf_wfq_en,
                                qm_info->vport_rl_en, qm_info->vport_wfq_en,
@@ -1036,11 +1029,11 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
        ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
        ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
 
-       if (ECORE_IS_BB(p_hwfn->p_dev)) {
+       if (ECORE_IS_BB(p_dev)) {
                /* Workaround clears ROCE search for all functions to prevent
                 * involving non initialized function in processing ROCE packet.
                 */
-               num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
+               num_pfs = NUM_OF_ENG_PFS(p_dev);
                for (pf_id = 0; pf_id < num_pfs; pf_id++) {
                        ecore_fid_pretend(p_hwfn, p_ptt, pf_id);
                        ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
@@ -1056,8 +1049,7 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
         * This is not done inside the init tool since it currently can't
         * perform a pretending to VFs.
         */
-       max_num_vfs = ECORE_IS_AH(p_hwfn->p_dev) ? MAX_NUM_VFS_K2
-           : MAX_NUM_VFS_BB;
+       max_num_vfs = ECORE_IS_AH(p_dev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB;
        for (vf_id = 0; vf_id < max_num_vfs; vf_id++) {
                concrete_fid = ecore_vfid_to_concrete(p_hwfn, vf_id);
                ecore_fid_pretend(p_hwfn, p_ptt, (u16)concrete_fid);
@@ -1536,7 +1528,9 @@ ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
                return rc;
        if (b_hw_start) {
                /* enable interrupts */
-               ecore_int_igu_enable(p_hwfn, p_ptt, int_mode);
+               rc = ecore_int_igu_enable(p_hwfn, p_ptt, int_mode);
+               if (rc != ECORE_SUCCESS)
+                       return rc;
 
                /* send function start command */
                rc = ecore_sp_pf_start(p_hwfn, p_tunn, p_hwfn->p_dev->mf_mode,
@@ -1627,7 +1621,7 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
 {
        enum _ecore_status_t rc, mfw_rc;
        u32 load_code, param;
-       int i, j;
+       int i;
 
        if (p_params->int_mode == ECORE_INT_MODE_MSI && p_dev->num_hwfns > 1) {
                DP_NOTICE(p_dev, false,
@@ -1711,25 +1705,6 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
                                                p_hwfn->hw_info.hw_mode);
                        if (rc)
                                break;
-
-#ifndef REAL_ASIC_ONLY
-                       if (ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn)) {
-                               struct init_nig_pri_tc_map_req tc_map;
-
-                               OSAL_MEM_ZERO(&tc_map, sizeof(tc_map));
-
-                               /* remove this once flow control is
-                                * implemented
-                                */
-                               for (j = 0; j < NUM_OF_VLAN_PRIORITIES; j++) {
-                                       tc_map.pri[j].tc_id = 0;
-                                       tc_map.pri[j].valid = 1;
-                               }
-                               ecore_init_nig_pri_tc_map(p_hwfn,
-                                                         p_hwfn->p_main_ptt,
-                                                         &tc_map);
-                       }
-#endif
                        /* Fall into */
                case FW_MSG_CODE_DRV_LOAD_FUNCTION:
                        rc = ecore_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
@@ -1802,13 +1777,14 @@ static void ecore_hw_timers_stop(struct ecore_dev *p_dev,
                 */
                OSAL_MSLEEP(1);
        }
-       if (i == ECORE_HW_STOP_RETRY_LIMIT)
-               DP_NOTICE(p_hwfn, true,
-                         "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
-                         (u8)ecore_rd(p_hwfn, p_ptt,
-                                       TM_REG_PF_SCAN_ACTIVE_CONN),
-                         (u8)ecore_rd(p_hwfn, p_ptt,
-                                       TM_REG_PF_SCAN_ACTIVE_TASK));
+
+       if (i < ECORE_HW_STOP_RETRY_LIMIT)
+               return;
+
+       DP_NOTICE(p_hwfn, true, "Timers linear scans are not over"
+                 " [Connection %02x Tasks %02x]\n",
+                 (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN),
+                 (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK));
 }
 
 void ecore_hw_timers_stop_all(struct ecore_dev *p_dev)
@@ -3127,7 +3103,7 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
                }
        }
 
-       return ECORE_SUCCESS;
+       return rc;
 }
 
 void ecore_hw_remove(struct ecore_dev *p_dev)
@@ -3819,8 +3795,8 @@ static enum _ecore_status_t ecore_set_coalesce(struct ecore_hwfn *p_hwfn,
                return ECORE_INVAL;
        }
 
-       OSAL_MEMSET(p_eth_qzone, 0, eth_qzone_size);
        p_coal_timeset = p_eth_qzone;
+       OSAL_MEMSET(p_eth_qzone, 0, eth_qzone_size);
        SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset);
        SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1);
        ecore_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size);
index 6395b7c..070588d 100644 (file)
 #define GTT_REG_ADDR_H
 
 /* Win 2 */
-/* Access:RW   DataWidth:0x20    Chips: BB_A0 BB_B0 K2 */
+/* Access:RW   DataWidth:0x20    Chips: BB_B0 K2 E5 */
 #define GTT_BAR0_MAP_REG_IGU_CMD                                      0x00f000UL
 
 /* Win 3 */
-/* Access:RW   DataWidth:0x20    Chips: BB_A0 BB_B0 K2 */
+/* Access:RW   DataWidth:0x20    Chips: BB_B0 K2 E5 */
 #define GTT_BAR0_MAP_REG_TSDM_RAM                                     0x010000UL
 
 /* Win 4 */
-/* Access:RW   DataWidth:0x20    Chips: BB_A0 BB_B0 K2 */
+/* Access:RW   DataWidth:0x20    Chips: BB_B0 K2 E5 */
 #define GTT_BAR0_MAP_REG_MSDM_RAM                                     0x011000UL
 
 /* Win 5 */
-/* Access:RW   DataWidth:0x20    Chips: BB_A0 BB_B0 K2 */
+/* Access:RW   DataWidth:0x20    Chips: BB_B0 K2 E5 */
 #define GTT_BAR0_MAP_REG_MSDM_RAM_1024                                0x012000UL
 
 /* Win 6 */
-/* Access:RW   DataWidth:0x20    Chips: BB_A0 BB_B0 K2 */
+/* Access:RW   DataWidth:0x20    Chips: BB_B0 K2 E5 */
 #define GTT_BAR0_MAP_REG_USDM_RAM                                     0x013000UL
 
 /* Win 7 */
-/* Access:RW   DataWidth:0x20    Chips: BB_A0 BB_B0 K2 */
+/* Access:RW   DataWidth:0x20    Chips: BB_B0 K2 E5 */
 #define GTT_BAR0_MAP_REG_USDM_RAM_1024                                0x014000UL
 
 /* Win 8 */
-/* Access:RW   DataWidth:0x20    Chips: BB_A0 BB_B0 K2 */
+/* Access:RW   DataWidth:0x20    Chips: BB_B0 K2 E5 */
 #define GTT_BAR0_MAP_REG_USDM_RAM_2048                                0x015000UL
 
 /* Win 9 */
-/* Access:RW   DataWidth:0x20    Chips: BB_A0 BB_B0 K2 */
+/* Access:RW   DataWidth:0x20    Chips: BB_B0 K2 E5 */
 #define GTT_BAR0_MAP_REG_XSDM_RAM                                     0x016000UL
 
 /* Win 10 */
-/* Access:RW   DataWidth:0x20    Chips: BB_A0 BB_B0 K2 */
+/* Access:RW   DataWidth:0x20    Chips: BB_B0 K2 E5 */
 #define GTT_BAR0_MAP_REG_YSDM_RAM                                     0x017000UL
 
 /* Win 11 */
-/* Access:RW   DataWidth:0x20    Chips: BB_A0 BB_B0 K2 */
+/* Access:RW   DataWidth:0x20    Chips: BB_B0 K2 E5 */
 #define GTT_BAR0_MAP_REG_PSDM_RAM                                     0x018000UL
 
 #endif
index 179d410..6ddbe1a 100644 (file)
@@ -660,6 +660,7 @@ enum core_event_opcode {
        CORE_EVENT_TX_QUEUE_STOP,
        CORE_EVENT_RX_QUEUE_START,
        CORE_EVENT_RX_QUEUE_STOP,
+       CORE_EVENT_RX_QUEUE_FLUSH,
        MAX_CORE_EVENT_OPCODE
 };
 
@@ -743,6 +744,7 @@ enum core_ramrod_cmd_id {
        CORE_RAMROD_TX_QUEUE_START /* TX Queue Start Ramrod */,
        CORE_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */,
        CORE_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */,
+       CORE_RAMROD_RX_QUEUE_FLUSH /* RX Flush queue Ramrod */,
        MAX_CORE_RAMROD_CMD_ID
 };
 
@@ -860,7 +862,8 @@ struct core_rx_slow_path_cqe {
        u8 type /* CQE type */;
        u8 ramrod_cmd_id;
        __le16 echo;
-       __le32 reserved1[7];
+       struct core_rx_cqe_opaque_data opaque_data /* Opaque Data */;
+       __le32 reserved1[5];
 };
 
 /*
@@ -926,36 +929,51 @@ struct core_rx_stop_ramrod_data {
 /*
  * Flags for Core TX BD
  */
-struct core_tx_bd_flags {
-       u8 as_bitfield;
+struct core_tx_bd_data {
+       __le16 as_bitfield;
 /* Do not allow additional VLAN manipulations on this packet (DCB) */
-#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_MASK      0x1
-#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_SHIFT     0
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK      0x1
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT     0
 /* Insert VLAN into packet */
-#define CORE_TX_BD_FLAGS_VLAN_INSERTION_MASK       0x1
-#define CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT      1
+#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK       0x1
+#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT      1
 /* This is the first BD of the packet (for debug) */
-#define CORE_TX_BD_FLAGS_START_BD_MASK             0x1
-#define CORE_TX_BD_FLAGS_START_BD_SHIFT            2
+#define CORE_TX_BD_DATA_START_BD_MASK             0x1
+#define CORE_TX_BD_DATA_START_BD_SHIFT            2
 /* Calculate the IP checksum for the packet */
-#define CORE_TX_BD_FLAGS_IP_CSUM_MASK              0x1
-#define CORE_TX_BD_FLAGS_IP_CSUM_SHIFT             3
+#define CORE_TX_BD_DATA_IP_CSUM_MASK              0x1
+#define CORE_TX_BD_DATA_IP_CSUM_SHIFT             3
 /* Calculate the L4 checksum for the packet */
-#define CORE_TX_BD_FLAGS_L4_CSUM_MASK              0x1
-#define CORE_TX_BD_FLAGS_L4_CSUM_SHIFT             4
+#define CORE_TX_BD_DATA_L4_CSUM_MASK              0x1
+#define CORE_TX_BD_DATA_L4_CSUM_SHIFT             4
 /* Packet is IPv6 with extensions */
-#define CORE_TX_BD_FLAGS_IPV6_EXT_MASK             0x1
-#define CORE_TX_BD_FLAGS_IPV6_EXT_SHIFT            5
+#define CORE_TX_BD_DATA_IPV6_EXT_MASK             0x1
+#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT            5
 /* If IPv6+ext, and if l4_csum is 1, than this field indicates L4 protocol:
  * 0-TCP, 1-UDP
  */
-#define CORE_TX_BD_FLAGS_L4_PROTOCOL_MASK          0x1
-#define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT         6
+#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK          0x1
+#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT         6
 /* The pseudo checksum mode to place in the L4 checksum field. Required only
- *  when IPv6+ext and l4_csum is set. (use enum core_l4_pseudo_checksum_mode)
+ * when IPv6+ext and l4_csum is set. (use enum core_l4_pseudo_checksum_mode)
+ */
+#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK  0x1
+#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7
+/* Number of BDs that make up one packet - width wide enough to present
+ * CORE_LL2_TX_MAX_BDS_PER_PACKET
+ */
+#define CORE_TX_BD_DATA_NBDS_MASK                 0xF
+#define CORE_TX_BD_DATA_NBDS_SHIFT                8
+/* Use roce_flavor enum - Differentiate between Roce flavors is valid when
+ * connType is ROCE (use enum core_roce_flavor_type)
  */
-#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK  0x1
-#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7
+#define CORE_TX_BD_DATA_ROCE_FLAV_MASK            0x1
+#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT           12
+/* Calculate ip length */
+#define CORE_TX_BD_DATA_IP_LEN_MASK               0x1
+#define CORE_TX_BD_DATA_IP_LEN_SHIFT              13
+#define CORE_TX_BD_DATA_RESERVED0_MASK            0x3
+#define CORE_TX_BD_DATA_RESERVED0_SHIFT           14
 };
 
 /*
@@ -968,28 +986,18 @@ struct core_tx_bd {
  * packets: echo data to pass to Rx
  */
        __le16 nw_vlan_or_lb_echo;
-       u8 bitfield0;
-/* Number of BDs that make up one packet - width wide enough to present
- * X_CORE_LL2_NUM_OF_BDS_ON_ST_CT
- */
-#define CORE_TX_BD_NBDS_MASK             0xF
-#define CORE_TX_BD_NBDS_SHIFT            0
-/* Use roce_flavor enum - Diffrentiate between Roce flavors is valid when
- * connType is ROCE (use enum core_roce_flavor_type)
- */
-#define CORE_TX_BD_ROCE_FLAV_MASK        0x1
-#define CORE_TX_BD_ROCE_FLAV_SHIFT       4
-#define CORE_TX_BD_RESERVED0_MASK        0x7
-#define CORE_TX_BD_RESERVED0_SHIFT       5
-       struct core_tx_bd_flags bd_flags /* BD Flags */;
+       struct core_tx_bd_data bd_data /* BD Flags */;
        __le16 bitfield1;
+/* L4 Header Offset from start of packet (in Words). This is needed if both
+ * l4_csum and ipv6_ext are set
+ */
 #define CORE_TX_BD_L4_HDR_OFFSET_W_MASK  0x3FFF
 #define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
 /* Packet destination - Network, LB (use enum core_tx_dest) */
 #define CORE_TX_BD_TX_DST_MASK           0x1
 #define CORE_TX_BD_TX_DST_SHIFT          14
-#define CORE_TX_BD_RESERVED1_MASK        0x1
-#define CORE_TX_BD_RESERVED1_SHIFT       15
+#define CORE_TX_BD_RESERVED_MASK         0x1
+#define CORE_TX_BD_RESERVED_SHIFT        15
 };
 
 
@@ -1265,6 +1273,7 @@ enum malicious_vf_error_id {
 /* Tunneled packet with IPv6+Ext without a proper number of BDs */
        ETH_TUNN_IPV6_EXT_NBD_ERR,
        ETH_CONTROL_PACKET_VIOLATION /* VF sent control frame such as PFC */,
+       ETH_ANTI_SPOOFING_ERR /* Anti-Spoofing verification failure */,
        MAX_MALICIOUS_VF_ERROR_ID
 };
 
index e82b0d4..effb6ed 100644 (file)
@@ -92,6 +92,11 @@ enum block_addr {
        GRCBASE_MS = 0x6a0000,
        GRCBASE_PHY_PCIE = 0x620000,
        GRCBASE_LED = 0x6b8000,
+       GRCBASE_AVS_WRAP = 0x6b0000,
+       GRCBASE_RGFS = 0x19d0000,
+       GRCBASE_TGFS = 0x19e0000,
+       GRCBASE_PTLD = 0x19f0000,
+       GRCBASE_YPLD = 0x1a10000,
        GRCBASE_MISC_AEU = 0x8000,
        GRCBASE_BAR0_MAP = 0x1c00000,
        MAX_BLOCK_ADDR
@@ -177,6 +182,11 @@ enum block_id {
        BLOCK_MS,
        BLOCK_PHY_PCIE,
        BLOCK_LED,
+       BLOCK_AVS_WRAP,
+       BLOCK_RGFS,
+       BLOCK_TGFS,
+       BLOCK_PTLD,
+       BLOCK_YPLD,
        BLOCK_MISC_AEU,
        BLOCK_BAR0_MAP,
        MAX_BLOCK_ID
@@ -708,7 +718,7 @@ struct dbg_bus_data {
        struct dbg_bus_pci_buf_data pci_buf;
        __le16 reserved;
 /* Debug Bus data for each block */
-       struct dbg_bus_block_data blocks[80];
+       struct dbg_bus_block_data blocks[88];
 /* Debug Bus data for each block */
        struct dbg_bus_storm_data storms[6];
 };
@@ -846,12 +856,12 @@ enum dbg_bus_targets {
  * GRC Dump data
  */
 struct dbg_grc_data {
+/* Indicates if the GRC parameters were initialized */
+       u8 params_initialized;
+       u8 reserved1;
+       __le16 reserved2;
 /* Value of each GRC parameter. Array size must match enum dbg_grc_params. */
-       __le32 param_val[40];
-/* Indicates for each GRC parameter if it was set by the user (0/1).
- * Array size must match the enum dbg_grc_params.
- */
-       u8 param_set_by_user[40];
+       __le32 param_val[48];
 };
 
 
@@ -901,6 +911,8 @@ enum dbg_grc_params {
        DBG_GRC_PARAM_PARITY_SAFE,
        DBG_GRC_PARAM_DUMP_CM /* dump CM memories (0/1) */,
        DBG_GRC_PARAM_DUMP_PHY /* dump PHY memories (0/1) */,
+       DBG_GRC_PARAM_NO_MCP /* dont perform MCP commands (0/1) */,
+       DBG_GRC_PARAM_NO_FW_VER /* dont read FW/MFW version (0/1) */,
        MAX_DBG_GRC_PARAMS
 };
 
@@ -1014,7 +1026,7 @@ struct dbg_tools_data {
        struct idle_chk_data idle_chk /* Idle Check data */;
        u8 mode_enable[40] /* Indicates if a mode is enabled (0/1) */;
 /* Indicates if a block is in reset state (0/1) */
-       u8 block_in_reset[80];
+       u8 block_in_reset[88];
        u8 chip_id /* Chip ID (from enum chip_ids) */;
        u8 platform_id /* Platform ID (from enum platform_ids) */;
        u8 initialized /* Indicates if the data was initialized */;
index e26c183..e8373d7 100644 (file)
@@ -1446,7 +1446,15 @@ struct vport_update_ramrod_data_cmn {
 /* If set, MTU will be updated. Vport must be not active. */
        u8 update_mtu_flg;
        __le16 mtu /* New MTU value. Used if update_mtu_flg are set */;
-       u8 reserved[2];
+/* If set, ctl_frame_mac_check_en and ctl_frame_ethtype_check_en will be
+ * updated
+ */
+       u8 update_ctl_frame_checks_en_flg;
+/* If set, Contorl frames will be filtered according to MAC check. */
+       u8 ctl_frame_mac_check_en;
+/* If set, Contorl frames will be filtered according to ethtype check. */
+       u8 ctl_frame_ethtype_check_en;
+       u8 reserved[15];
 };
 
 struct vport_update_ramrod_mcast {
index 410b0bc..d07549c 100644 (file)
 /* Max size in dwords of a zipped array */
 #define MAX_ZIPPED_SIZE                        8192
 
+enum init_modes {
+       MODE_BB_A0_DEPRECATED,
+       MODE_BB_B0,
+       MODE_K2,
+       MODE_ASIC,
+       MODE_EMUL_REDUCED,
+       MODE_EMUL_FULL,
+       MODE_FPGA,
+       MODE_CHIPSIM,
+       MODE_SF,
+       MODE_MF_SD,
+       MODE_MF_SI,
+       MODE_PORTS_PER_ENG_1,
+       MODE_PORTS_PER_ENG_2,
+       MODE_PORTS_PER_ENG_4,
+       MODE_100G,
+       MODE_E5,
+       MAX_INIT_MODES
+};
+
+enum init_phases {
+       PHASE_ENGINE,
+       PHASE_PORT,
+       PHASE_PF,
+       PHASE_VF,
+       PHASE_QM_PF,
+       MAX_INIT_PHASES
+};
+
+enum init_split_types {
+       SPLIT_TYPE_NONE,
+       SPLIT_TYPE_PORT,
+       SPLIT_TYPE_PF,
+       SPLIT_TYPE_PORT_PF,
+       SPLIT_TYPE_VF,
+       MAX_INIT_SPLIT_TYPES
+};
 
 struct fw_asserts_ram_section {
 /* The offset of the section in the RAM in RAM lines (64-bit units) */
@@ -69,51 +106,6 @@ struct fw_info_location {
        __le32 size;
 };
 
-
-
-
-enum init_modes {
-       MODE_BB_A0,
-       MODE_BB_B0,
-       MODE_K2,
-       MODE_ASIC,
-       MODE_EMUL_REDUCED,
-       MODE_EMUL_FULL,
-       MODE_FPGA,
-       MODE_CHIPSIM,
-       MODE_SF,
-       MODE_MF_SD,
-       MODE_MF_SI,
-       MODE_PORTS_PER_ENG_1,
-       MODE_PORTS_PER_ENG_2,
-       MODE_PORTS_PER_ENG_4,
-       MODE_100G,
-       MODE_40G,
-       MODE_EAGLE_ENG1_WORKAROUND,
-       MAX_INIT_MODES
-};
-
-
-enum init_phases {
-       PHASE_ENGINE,
-       PHASE_PORT,
-       PHASE_PF,
-       PHASE_VF,
-       PHASE_QM_PF,
-       MAX_INIT_PHASES
-};
-
-
-enum init_split_types {
-       SPLIT_TYPE_NONE,
-       SPLIT_TYPE_PORT,
-       SPLIT_TYPE_PF,
-       SPLIT_TYPE_PORT_PF,
-       SPLIT_TYPE_VF,
-       MAX_INIT_SPLIT_TYPES
-};
-
-
 /*
  * Binary buffer header
  */
index e83eeb8..a5437b5 100644 (file)
@@ -176,12 +176,6 @@ static void ecore_cmdq_lines_voq_rt_init(struct ecore_hwfn *p_hwfn,
                                         u8 voq, u16 cmdq_lines)
 {
        u32 qm_line_crd;
-       /* In A0 - Limit the size of pbf queue so that only 511 commands
-        * with the minimum size of 4 (FCoE minimum size)
-        */
-       bool is_bb_a0 = ECORE_IS_BB_A0(p_hwfn->p_dev);
-       if (is_bb_a0)
-               cmdq_lines = OSAL_MIN_T(u32, cmdq_lines, 1022);
        qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
        OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
                         (u32)cmdq_lines);
@@ -327,11 +321,9 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
        u16 num_pqs = num_pf_pqs + num_vf_pqs;
        u16 first_pq_group = start_pq / QM_PF_QUEUE_GROUP_SIZE;
        u16 last_pq_group = (start_pq + num_pqs - 1) / QM_PF_QUEUE_GROUP_SIZE;
-       bool is_bb_a0 = ECORE_IS_BB_A0(p_hwfn->p_dev);
        /* a bit per Tx PQ indicating if the PQ is associated with a VF */
        u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
-       u32 tx_pq_vf_mask_width = is_bb_a0 ? 32 : QM_PF_QUEUE_GROUP_SIZE;
-       u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / tx_pq_vf_mask_width;
+       u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
        u32 pq_mem_4kb = QM_PQ_MEM_4KB(num_pf_cids);
        u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(num_vf_cids);
        u32 mem_addr_4kb = base_mem_addr_4kb;
@@ -397,8 +389,8 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
                        /* if PQ is associated with a VF, add indication to PQ
                         * VF mask
                         */
-                       tx_pq_vf_mask[pq_id / tx_pq_vf_mask_width] |=
-                           (1 << (pq_id % tx_pq_vf_mask_width));
+                       tx_pq_vf_mask[pq_id / QM_PF_QUEUE_GROUP_SIZE] |=
+                               (1 << (pq_id % QM_PF_QUEUE_GROUP_SIZE));
                        mem_addr_4kb += vport_pq_mem_4kb;
                } else {
                        mem_addr_4kb += pq_mem_4kb;
@@ -406,23 +398,9 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
        }
        /* store Tx PQ VF mask to size select register */
        for (i = 0; i < num_tx_pq_vf_masks; i++) {
-               if (tx_pq_vf_mask[i]) {
-                       if (is_bb_a0) {
-                               /* A0-only: perform read-modify-write
-                                *(fixed in B0)
-                                */
-                               u32 curr_mask =
-                                   is_first_pf ? 0 : ecore_rd(p_hwfn, p_ptt,
-                                                      QM_REG_MAXPQSIZETXSEL_0
-                                                               + i * 4);
-                               STORE_RT_REG(p_hwfn,
-                                            QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET +
-                                            i, curr_mask | tx_pq_vf_mask[i]);
-                       } else
-                               STORE_RT_REG(p_hwfn,
-                                            QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET +
-                                            i, tx_pq_vf_mask[i]);
-               }
+               if (tx_pq_vf_mask[i])
+                       STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET +
+                                    i, tx_pq_vf_mask[i]);
        }
 }
 
@@ -1246,9 +1224,6 @@ void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
 void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
                                struct ecore_ptt *p_ptt, u16 dest_port)
 {
-       /* geneve tunnel not supported in BB_A0 */
-       if (ECORE_IS_BB_A0(p_hwfn->p_dev))
-               return;
        /* update PRS register */
        ecore_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
        /* update NIG register */
@@ -1262,9 +1237,6 @@ void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
                             bool eth_geneve_enable, bool ip_geneve_enable)
 {
        u32 reg_val;
-       /* geneve tunnel not supported in BB_A0 */
-       if (ECORE_IS_BB_A0(p_hwfn->p_dev))
-               return;
        /* update PRS register */
        reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
        SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
@@ -1283,11 +1255,6 @@ void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
                 eth_geneve_enable ? 1 : 0);
        ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE,
                 ip_geneve_enable ? 1 : 0);
-       /* comp ver */
-       reg_val = (ip_geneve_enable || eth_geneve_enable) ? 1 : 0;
-       ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_COMP_VER, reg_val);
-       ecore_wr(p_hwfn, p_ptt, PBF_REG_NGE_COMP_VER, reg_val);
-       ecore_wr(p_hwfn, p_ptt, PRS_REG_NGE_COMP_VER, reg_val);
        /* EDPM with geneve tunnel not supported in BB_B0 */
        if (ECORE_IS_BB_B0(p_hwfn->p_dev))
                return;
index 351e946..faeca68 100644 (file)
@@ -573,8 +573,7 @@ enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
                return ECORE_INVAL;
        }
 
-       /* First Dword contains metadata and should be skipped */
-       buf_hdr = (struct bin_buffer_hdr *)((uintptr_t)(data + sizeof(u32)));
+       buf_hdr = (struct bin_buffer_hdr *)(uintptr_t)data;
 
        offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
        fw->fw_ver_info = (struct fw_ver_info *)((uintptr_t)(data + offset));
index 0b857bb..24a43d3 100644 (file)
@@ -663,6 +663,17 @@ bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn,
 bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn,
                                 u16 rel_vf_id);
 
+/**
+ * @brief - Returm true if VF has started in FW
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return
+ */
+bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
+                            u16 rel_vf_id);
+
 /**
  * @brief - Get VF's vport min rate configured.
  * @param p_hwfn
index 43e01e4..4ff7e95 100644 (file)
@@ -91,9 +91,9 @@ static const struct iro iro_arr[47] = {
 /* USTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
        {  0x11aa0,     0x38,      0x0,      0x0,     0x18},
 /* XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
-       {   0xa8c0,     0x30,      0x0,      0x0,     0x10},
+       {   0xa8c0,     0x38,      0x0,      0x0,     0x10},
 /* YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
-       {   0x86f8,     0x28,      0x0,      0x0,     0x18},
+       {   0x86f8,     0x30,      0x0,      0x0,     0x18},
 /* PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
        {  0x101f8,     0x10,      0x0,      0x0,     0x10},
 /* TSTORM_FCOE_RX_STATS_OFFSET(pf_id) */
index adcb0f0..e641a77 100644 (file)
@@ -801,9 +801,6 @@ static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
 
        p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
 
-       if (p_link->link_up)
-               ecore_dcbx_eagle_workaround(p_hwfn, p_ptt, p_link->pfc_enabled);
-
        OSAL_LINK_UPDATE(p_hwfn);
 }
 
@@ -2267,7 +2264,7 @@ enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn,
 enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn,
                                               struct ecore_ptt *p_ptt)
 {
-       u32 drv_mb_param = 0, rsp, param;
+       u32 drv_mb_param, rsp, param;
        enum _ecore_status_t rc = ECORE_SUCCESS;
 
        drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
index b3736a8..23ebab7 100644 (file)
@@ -31,7 +31,7 @@ enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
 {
        u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
        struct ecore_spq_entry *p_ent = OSAL_NULL;
-       enum _ecore_status_t rc = ECORE_NOTIMPL;
+       enum _ecore_status_t rc;
 
        if (!pp_ent)
                return ECORE_INVAL;
@@ -564,7 +564,7 @@ enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn)
 {
        struct ecore_spq_entry *p_ent = OSAL_NULL;
        struct ecore_sp_init_data init_data;
-       enum _ecore_status_t rc = ECORE_NOTIMPL;
+       enum _ecore_status_t rc;
 
        /* Get SPQ entry */
        OSAL_MEMSET(&init_data, 0, sizeof(init_data));
index c524cab..e371492 100644 (file)
@@ -924,6 +924,9 @@ enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
        if (found->comp_cb.function)
                found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
                                        fw_return_code);
+       else
+               DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+                          "Got a completion without a callback function\n");
 
        if ((found->comp_mode != ECORE_SPQ_MODE_EBLOCK) ||
            (found->queue == &p_spq->unlimited_pending))
index eaad843..e8f1ebe 100644 (file)
@@ -3964,6 +3964,18 @@ bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
        return (p_vf->state == VF_ENABLED);
 }
 
+bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
+                            u16 rel_vf_id)
+{
+       struct ecore_vf_info *p_vf;
+
+       p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+       if (!p_vf)
+               return false;
+
+       return (p_vf->state != VF_FREE && p_vf->state != VF_STOPPED);
+}
+
 enum _ecore_status_t
 ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid)
 {
index 3213070..d2ebce8 100644 (file)
 #define ETH_RX_CQE_PAGE_SIZE_BYTES          4096
 #define ETH_RX_NUM_NEXT_PAGE_BDS            2
 
+/* Limitation for Tunneled LSO Packets on the offset (in bytes) of the inner IP
+ * header (relevant to LSO for tunneled packet):
+ */
+/* Offset is limited to 253 bytes (inclusive). */
+#define ETH_MAX_TUNN_LSO_INNER_IPV4_OFFSET          253
+/* Offset is limited to 251 bytes (inclusive). */
+#define ETH_MAX_TUNN_LSO_INNER_IPV6_OFFSET          251
+
 #define ETH_TX_MIN_BDS_PER_NON_LSO_PKT              1
 #define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET           18
 #define ETH_TX_MAX_BDS_PER_LSO_PACKET               255
@@ -141,16 +149,23 @@ struct eth_tx_1st_bd_flags {
 /* Do not allow additional VLAN manipulations on this packet. */
 #define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK  0x1
 #define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1
-/* IP checksum recalculation in needed */
+/* Recalculate IP checksum. For tunneled packet - relevant to inner header. */
 #define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK          0x1
 #define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT         2
-/* TCP/UDP checksum recalculation in needed */
+/* Recalculate TCP/UDP checksum.
+ * For tunneled packet - relevant to inner header.
+ */
 #define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK          0x1
 #define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT         3
-/* If set, need to add the VLAN in vlan field to the packet. */
+/* If set, insert VLAN tag from vlan field to the packet.
+ * For tunneled packet - relevant to outer header.
+ */
 #define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK   0x1
 #define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT  4
-/* If set, this is an LSO packet. */
+/* If set, this is an LSO packet. Note: For Tunneled LSO packets, the offset of
+ * the inner IPV4 (and IPV6) header is limited to 253 (and 251 respectively)
+ * bytes, inclusive.
+ */
 #define ETH_TX_1ST_BD_FLAGS_LSO_MASK              0x1
 #define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT             5
 /* Recalculate Tunnel IP Checksum (if Tunnel IP Header is IPv4) */
@@ -165,7 +180,8 @@ struct eth_tx_1st_bd_flags {
  * The parsing information data for the first tx bd of a given packet.
  */
 struct eth_tx_data_1st_bd {
-       __le16 vlan /* VLAN tag to insert to packet (if needed). */;
+/* VLAN tag to insert to packet (if enabled by vlan_insertion flag). */
+       __le16 vlan;
 /* Number of BDs in packet. Should be at least 2 in non-LSO packet and at least
  * 3 in LSO (or Tunnel with IPv6+ext) packet.
  */
@@ -209,10 +225,14 @@ struct eth_tx_data_2nd_bd {
 /* For LSO / Tunnel header with IPv6+ext - Set if inner header is IPv6 */
 #define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK           0x1
 #define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT          11
-/* For LSO / Tunnel header with IPv6+ext - Set if outer header has IPv6+ext */
+/* In tunneling mode - Set to 1 when the Inner header is IPv6 with extension.
+ * Otherwise set to 1 if the header is IPv6 with extension.
+ */
 #define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK                  0x1
 #define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT                 12
-/* Set if Tunnel header has IPv6 ext. (3rd BD is required) */
+/* Set to 1 if Tunnel (outer = encapsulating) header has IPv6 ext. (Note: 3rd BD
+ * is required, hence EDPM does not support Tunnel [outer] header with Ipv6Ext)
+ */
 #define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK             0x1
 #define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT            13
 /* Set if (inner) L4 protocol is UDP. (Required when IPv6+ext (or tunnel with
index 4edffac..68abc2d 100644 (file)
@@ -13,7 +13,7 @@
  * Description: NVM config file - Generated file from nvm cfg excel.
  *              DO NOT MODIFY !!!
  *
- * Created:     5/9/2016
+ * Created:     9/6/2016
  *
  ****************************************************************************/
 
@@ -477,6 +477,9 @@ struct nvm_cfg1_glob {
                #define NVM_CFG1_GLOB_MANUF1_TIME_OFFSET 6
                #define NVM_CFG1_GLOB_MANUF2_TIME_MASK 0x0003F000
                #define NVM_CFG1_GLOB_MANUF2_TIME_OFFSET 12
+       /*  Max MSIX for Ethernet in default mode */
+               #define NVM_CFG1_GLOB_MAX_MSIX_MASK 0x03FC0000
+               #define NVM_CFG1_GLOB_MAX_MSIX_OFFSET 18
        u32 led_global_settings; /* 0x74 */
                #define NVM_CFG1_GLOB_LED_SWAP_0_MASK 0x0000000F
                #define NVM_CFG1_GLOB_LED_SWAP_0_OFFSET 0
@@ -497,6 +500,14 @@ struct nvm_cfg1_glob {
                #define NVM_CFG1_GLOB_LANE2_SWAP_OFFSET 14
                #define NVM_CFG1_GLOB_LANE3_SWAP_MASK 0x00030000
                #define NVM_CFG1_GLOB_LANE3_SWAP_OFFSET 16
+       /*  Enable option 195 - Overriding the PCIe Preset value */
+               #define NVM_CFG1_GLOB_OVERRIDE_PCIE_PRESET_EQUAL_MASK 0x00040000
+               #define NVM_CFG1_GLOB_OVERRIDE_PCIE_PRESET_EQUAL_OFFSET 18
+               #define NVM_CFG1_GLOB_OVERRIDE_PCIE_PRESET_EQUAL_DISABLED 0x0
+               #define NVM_CFG1_GLOB_OVERRIDE_PCIE_PRESET_EQUAL_ENABLED 0x1
+       /*  PCIe Preset value - applies only if option 194 is enabled */
+               #define NVM_CFG1_GLOB_PCIE_PRESET_VALUE_MASK 0x00780000
+               #define NVM_CFG1_GLOB_PCIE_PRESET_VALUE_OFFSET 19
        u32 mbi_version; /* 0x7C */
                #define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000FF
                #define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0
@@ -623,6 +634,44 @@ struct nvm_cfg1_port {
                #define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1
                #define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_FCOE 0x2
                #define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ISCSI 0x4
+       /* GPIO for HW reset the PHY. In case it is the same for all ports,
+        * need to set same value for all ports
+        */
+               #define NVM_CFG1_PORT_EXT_PHY_RESET_MASK 0xFF000000
+               #define NVM_CFG1_PORT_EXT_PHY_RESET_OFFSET 24
+               #define NVM_CFG1_PORT_EXT_PHY_RESET_NA 0x0
+               #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO0 0x1
+               #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO1 0x2
+               #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO2 0x3
+               #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO3 0x4
+               #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO4 0x5
+               #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO5 0x6
+               #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO6 0x7
+               #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO7 0x8
+               #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO8 0x9
+               #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO9 0xA
+               #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO10 0xB
+               #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO11 0xC
+               #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO12 0xD
+               #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO13 0xE
+               #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO14 0xF
+               #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO15 0x10
+               #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO16 0x11
+               #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO17 0x12
+               #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO18 0x13
+               #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO19 0x14
+               #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO20 0x15
+               #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO21 0x16
+               #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO22 0x17
+               #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO23 0x18
+               #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO24 0x19
+               #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO25 0x1A
+               #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO26 0x1B
+               #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO27 0x1C
+               #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO28 0x1D
+               #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO29 0x1E
+               #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO30 0x1F
+               #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO31 0x20
        u32 pcie_cfg; /* 0xC */
                #define NVM_CFG1_PORT_RESERVED15_MASK 0x00000007
                #define NVM_CFG1_PORT_RESERVED15_OFFSET 0
@@ -699,6 +748,7 @@ struct nvm_cfg1_port {
                #define NVM_CFG1_PORT_FEC_FORCE_MODE_NONE 0x0
                #define NVM_CFG1_PORT_FEC_FORCE_MODE_FIRECODE 0x1
                #define NVM_CFG1_PORT_FEC_FORCE_MODE_RS 0x2
+               #define NVM_CFG1_PORT_FEC_FORCE_MODE_AUTO 0x7
        u32 phy_cfg; /* 0x1C */
                #define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_MASK 0x0000FFFF
                #define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_OFFSET 0
@@ -738,9 +788,16 @@ struct nvm_cfg1_port {
                #define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_MASK 0x000000FF
                #define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_OFFSET 0
                #define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_NONE 0x0
-               #define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_BCM84844 0x1
+               #define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_BCM8485X 0x1
                #define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_MASK 0x0000FF00
                #define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_OFFSET 8
+       /*  EEE power saving mode */
+               #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK 0x00FF0000
+               #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET 16
+               #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED 0x0
+               #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED 0x1
+               #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE 0x2
+               #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY 0x3
        u32 mba_cfg1; /* 0x28 */
                #define NVM_CFG1_PORT_PREBOOT_OPROM_MASK 0x00000001
                #define NVM_CFG1_PORT_PREBOOT_OPROM_OFFSET 0
@@ -972,6 +1029,7 @@ struct nvm_cfg1_port {
                #define NVM_CFG1_PORT_MNM_10G_FEC_FORCE_MODE_NONE 0x0
                #define NVM_CFG1_PORT_MNM_10G_FEC_FORCE_MODE_FIRECODE 0x1
                #define NVM_CFG1_PORT_MNM_10G_FEC_FORCE_MODE_RS 0x2
+               #define NVM_CFG1_PORT_MNM_10G_FEC_FORCE_MODE_AUTO 0x7
        u32 mnm_25g_cap; /* 0x58 */
                #define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_MASK \
                        0x0000FFFF
@@ -1049,6 +1107,7 @@ struct nvm_cfg1_port {
                #define NVM_CFG1_PORT_MNM_25G_FEC_FORCE_MODE_NONE 0x0
                #define NVM_CFG1_PORT_MNM_25G_FEC_FORCE_MODE_FIRECODE 0x1
                #define NVM_CFG1_PORT_MNM_25G_FEC_FORCE_MODE_RS 0x2
+               #define NVM_CFG1_PORT_MNM_25G_FEC_FORCE_MODE_AUTO 0x7
        u32 mnm_40g_cap; /* 0x64 */
                #define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_MASK \
                        0x0000FFFF
@@ -1126,6 +1185,7 @@ struct nvm_cfg1_port {
                #define NVM_CFG1_PORT_MNM_40G_FEC_FORCE_MODE_NONE 0x0
                #define NVM_CFG1_PORT_MNM_40G_FEC_FORCE_MODE_FIRECODE 0x1
                #define NVM_CFG1_PORT_MNM_40G_FEC_FORCE_MODE_RS 0x2
+               #define NVM_CFG1_PORT_MNM_40G_FEC_FORCE_MODE_AUTO 0x7
        u32 mnm_50g_cap; /* 0x70 */
                #define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_MASK \
                        0x0000FFFF
@@ -1205,6 +1265,7 @@ struct nvm_cfg1_port {
                #define NVM_CFG1_PORT_MNM_50G_FEC_FORCE_MODE_NONE 0x0
                #define NVM_CFG1_PORT_MNM_50G_FEC_FORCE_MODE_FIRECODE 0x1
                #define NVM_CFG1_PORT_MNM_50G_FEC_FORCE_MODE_RS 0x2
+               #define NVM_CFG1_PORT_MNM_50G_FEC_FORCE_MODE_AUTO 0x7
        u32 mnm_100g_cap; /* 0x7C */
                #define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_MASK \
                        0x0000FFFF
@@ -1279,6 +1340,7 @@ struct nvm_cfg1_port {
                #define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_NONE 0x0
                #define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_FIRECODE 0x1
                #define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_RS 0x2
+               #define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_AUTO 0x7
        u32 reserved[116]; /* 0x88 */
 };
 
index 92ed404..8a4d68a 100644 (file)
@@ -21,7 +21,7 @@ static uint8_t npar_tx_switching = 1;
 char fw_file[PATH_MAX];
 
 const char *QEDE_DEFAULT_FIRMWARE =
-       "/lib/firmware/qed/qed_init_values-8.10.9.0.bin";
+       "/lib/firmware/qed/qed_init_values-8.14.6.0.bin";
 
 static void
 qed_update_pf_params(struct ecore_dev *edev, struct ecore_pf_params *params)
@@ -234,8 +234,7 @@ static int qed_slowpath_start(struct ecore_dev *edev,
        if (IS_PF(edev)) {
                rc = qed_load_firmware_data(edev);
                if (rc) {
-                       DP_NOTICE(edev, true,
-                                 "Failed to find fw file %s\n", fw_file);
+                       DP_ERR(edev, "Failed to find fw file %s\n", fw_file);
                        goto err;
                }
        }