X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fqede%2Fbase%2Fecore.h;h=6c8e6d40723e8b103cdfe7eb447b067fdbb999cd;hb=34d5e97e8dca3c60f72d6328fe20b8fad35ad586;hp=4607a80a9bdc260aafde56e4b9d7d337b9b8fc1c;hpb=f32557f95533821b67997d63b6fbafe61348fa71;p=dpdk.git diff --git a/drivers/net/qede/base/ecore.h b/drivers/net/qede/base/ecore.h index 4607a80a9b..6c8e6d4072 100644 --- a/drivers/net/qede/base/ecore.h +++ b/drivers/net/qede/base/ecore.h @@ -19,16 +19,18 @@ #include #endif +#include "ecore_status.h" #include "ecore_hsi_common.h" #include "ecore_hsi_debug_tools.h" #include "ecore_hsi_init_func.h" #include "ecore_hsi_init_tool.h" +#include "ecore_hsi_func_common.h" #include "ecore_proto_if.h" #include "mcp_public.h" #define ECORE_MAJOR_VERSION 8 -#define ECORE_MINOR_VERSION 37 -#define ECORE_REVISION_VERSION 20 +#define ECORE_MINOR_VERSION 40 +#define ECORE_REVISION_VERSION 26 #define ECORE_ENGINEERING_VERSION 0 #define ECORE_VERSION \ @@ -207,6 +209,7 @@ struct ecore_l2_info; struct ecore_igu_info; struct ecore_mcp_info; struct ecore_dcbx_info; +struct ecore_llh_info; struct ecore_rt_data { u32 *init_val; @@ -420,8 +423,8 @@ struct ecore_hw_info { u8 max_chains_per_vf; u32 port_mode; - u32 hw_mode; - unsigned long device_capabilities; + u32 hw_mode; + u32 device_capabilities; /* Default DCBX mode */ u8 dcbx_mode; @@ -465,6 +468,8 @@ struct ecore_wfq_data { bool configured; }; +#define OFLD_GRP_SIZE 4 + struct ecore_qm_info { struct init_qm_pq_params *qm_pq_params; struct init_qm_vport_params *qm_vport_params; @@ -511,6 +516,8 @@ struct ecore_fw_data { const u8 *modes_tree_buf; union init_op *init_ops; const u32 *arr_data; + const u32 *fw_overlays; + u32 fw_overlays_len; u32 init_ops_size; }; @@ -543,6 +550,9 @@ enum ecore_mf_mode_bit { /* Use stag for steering */ ECORE_MF_8021AD_TAGGING, + + /* Allow FIP discovery fallback */ + ECORE_MF_FIP_SPECIAL, }; enum ecore_ufp_mode { @@ -566,6 +576,12 @@ enum BAR_ID { BAR_ID_1 /* Used for doorbells */ }; +struct ecore_nvm_image_info { + u32 num_images; + struct bist_nvm_image_att *image_att; + bool valid; +}; + struct ecore_hwfn { struct ecore_dev *p_dev; u8 my_id; /* ID inside the PF */ @@ -587,6 +603,7 @@ struct ecore_hwfn { u8 num_funcs_on_engine; u8 enabled_func_idx; + u8 num_funcs_on_port; /* BAR access */ void OSAL_IOMEM *regview; @@ -661,6 +678,7 @@ struct ecore_hwfn { struct dbg_tools_data dbg_info; void *dbg_user_info; + struct virt_mem_desc dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE]; struct z_stream_s *stream; @@ -689,8 +707,17 @@ struct ecore_hwfn { */ bool b_en_pacing; + /* Nvm images number and attributes */ + struct ecore_nvm_image_info nvm_info; + + struct phys_mem_desc *fw_overlay_mem; + /* @DPDK */ struct ecore_ptt *p_arfs_ptt; + + /* DPDK specific, not the part of vanilla ecore */ + osal_spinlock_t spq_lock; + u32 iov_task_flags; }; enum ecore_mf_mode { @@ -700,26 +727,34 @@ enum ecore_mf_mode { ECORE_MF_UFP, }; -/* @DPDK */ -struct ecore_dbg_feature { - u8 *dump_buf; - u32 buf_size; - u32 dumped_dwords; +enum ecore_dev_type { + ECORE_DEV_TYPE_BB, + ECORE_DEV_TYPE_AH, }; -enum qed_dbg_features { - DBG_FEATURE_BUS, +/* @DPDK */ +enum ecore_dbg_features { DBG_FEATURE_GRC, DBG_FEATURE_IDLE_CHK, DBG_FEATURE_MCP_TRACE, DBG_FEATURE_REG_FIFO, + DBG_FEATURE_IGU_FIFO, DBG_FEATURE_PROTECTION_OVERRIDE, + DBG_FEATURE_FW_ASSERTS, + DBG_FEATURE_ILT, DBG_FEATURE_NUM }; -enum ecore_dev_type { - ECORE_DEV_TYPE_BB, - ECORE_DEV_TYPE_AH, +struct ecore_dbg_feature { + u8 *dump_buf; + u32 buf_size; + u32 dumped_dwords; +}; + +struct ecore_dbg_params { + struct ecore_dbg_feature features[DBG_FEATURE_NUM]; + u8 engine_for_debug; + bool print_data; }; struct ecore_dev { @@ -795,7 +830,7 @@ struct ecore_dev { u8 path_id; - unsigned long mf_bits; + u32 mf_bits; enum ecore_mf_mode mf_mode; #define IS_MF_DEFAULT(_p_hwfn) \ (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_DEFAULT) @@ -828,14 +863,32 @@ struct ecore_dev { u8 cache_shift; /* Init */ - const struct iro *iro_arr; - #define IRO (p_hwfn->p_dev->iro_arr) + const u32 *iro_arr; +#define IRO ((const struct iro *)p_hwfn->p_dev->iro_arr) /* HW functions */ u8 num_hwfns; struct ecore_hwfn hwfns[MAX_HWFNS_PER_DEVICE]; +#define ECORE_LEADING_HWFN(dev) (&dev->hwfns[0]) #define ECORE_IS_CMT(dev) ((dev)->num_hwfns > 1) + /* Engine affinity */ + u8 l2_affin_hint; + u8 fir_affin; + u8 iwarp_affin; + /* Macro for getting the engine-affinitized hwfn for FCoE/iSCSI/RoCE */ +#define ECORE_FIR_AFFIN_HWFN(dev) (&dev->hwfns[dev->fir_affin]) + /* Macro for getting the engine-affinitized hwfn for iWARP */ +#define ECORE_IWARP_AFFIN_HWFN(dev) (&dev->hwfns[dev->iwarp_affin]) + /* Generic macro for getting the engine-affinitized hwfn */ +#define ECORE_AFFIN_HWFN(dev) \ + (ECORE_IS_IWARP_PERSONALITY(ECORE_LEADING_HWFN(dev)) ? \ + ECORE_IWARP_AFFIN_HWFN(dev) : \ + ECORE_FIR_AFFIN_HWFN(dev)) + /* Macro for getting the index (0/1) of the engine-affinitized hwfn */ +#define ECORE_AFFIN_HWFN_IDX(dev) \ + (IS_LEAD_HWFN(ECORE_AFFIN_HWFN(dev)) ? 0 : 1) + /* SRIOV */ struct ecore_hw_sriov_info *p_iov_info; #define IS_ECORE_SRIOV(p_dev) (!!(p_dev)->p_iov_info) @@ -869,7 +922,11 @@ struct ecore_dev { #ifndef ASIC_ONLY bool b_is_emul_full; + bool b_is_emul_mac; #endif + /* LLH info */ + u8 ppfid_bitmap; + struct ecore_llh_info *p_llh_info; /* Indicates whether this PF serves a storage target */ bool b_is_target; @@ -878,22 +935,63 @@ struct ecore_dev { void *firmware; u64 fw_len; #endif + bool disable_ilt_dump; /* @DPDK */ struct ecore_dbg_feature dbg_features[DBG_FEATURE_NUM]; - u8 engine_for_debug; + struct ecore_dbg_params dbg_params; + osal_mutex_t dbg_lock; + + /* DPDK specific ecore field */ + struct rte_pci_device *pci_dev; +}; + +enum ecore_hsi_def_type { + ECORE_HSI_DEF_MAX_NUM_VFS, + ECORE_HSI_DEF_MAX_NUM_L2_QUEUES, + ECORE_HSI_DEF_MAX_NUM_PORTS, + ECORE_HSI_DEF_MAX_SB_PER_PATH, + ECORE_HSI_DEF_MAX_NUM_PFS, + ECORE_HSI_DEF_MAX_NUM_VPORTS, + ECORE_HSI_DEF_NUM_ETH_RSS_ENGINE, + ECORE_HSI_DEF_MAX_QM_TX_QUEUES, + ECORE_HSI_DEF_NUM_PXP_ILT_RECORDS, + ECORE_HSI_DEF_NUM_RDMA_STATISTIC_COUNTERS, + ECORE_HSI_DEF_MAX_QM_GLOBAL_RLS, + ECORE_HSI_DEF_MAX_PBF_CMD_LINES, + ECORE_HSI_DEF_MAX_BTB_BLOCKS, + ECORE_NUM_HSI_DEFS }; -#define NUM_OF_VFS(dev) (ECORE_IS_BB(dev) ? MAX_NUM_VFS_BB \ - : MAX_NUM_VFS_K2) -#define NUM_OF_L2_QUEUES(dev) (ECORE_IS_BB(dev) ? MAX_NUM_L2_QUEUES_BB \ - : MAX_NUM_L2_QUEUES_K2) -#define NUM_OF_PORTS(dev) (ECORE_IS_BB(dev) ? MAX_NUM_PORTS_BB \ - : MAX_NUM_PORTS_K2) -#define NUM_OF_SBS(dev) (ECORE_IS_BB(dev) ? MAX_SB_PER_PATH_BB \ - : MAX_SB_PER_PATH_K2) -#define NUM_OF_ENG_PFS(dev) (ECORE_IS_BB(dev) ? MAX_NUM_PFS_BB \ - : MAX_NUM_PFS_K2) +u32 ecore_get_hsi_def_val(struct ecore_dev *p_dev, + enum ecore_hsi_def_type type); + +#define NUM_OF_VFS(dev) \ + ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_NUM_VFS) +#define NUM_OF_L2_QUEUES(dev) \ + ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_NUM_L2_QUEUES) +#define NUM_OF_PORTS(dev) \ + ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_NUM_PORTS) +#define NUM_OF_SBS(dev) \ + ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_SB_PER_PATH) +#define NUM_OF_ENG_PFS(dev) \ + ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_NUM_PFS) +#define NUM_OF_VPORTS(dev) \ + ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_NUM_VPORTS) +#define NUM_OF_RSS_ENGINES(dev) \ + ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_NUM_ETH_RSS_ENGINE) +#define NUM_OF_QM_TX_QUEUES(dev) \ + ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_QM_TX_QUEUES) +#define NUM_OF_PXP_ILT_RECORDS(dev) \ + ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_NUM_PXP_ILT_RECORDS) +#define NUM_OF_RDMA_STATISTIC_COUNTERS(dev) \ + ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_NUM_RDMA_STATISTIC_COUNTERS) +#define NUM_OF_QM_GLOBAL_RLS(dev) \ + ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_QM_GLOBAL_RLS) +#define NUM_OF_PBF_CMD_LINES(dev) \ + ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_PBF_CMD_LINES) +#define NUM_OF_BTB_BLOCKS(dev) \ + ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_BTB_BLOCKS) #define CRC8_TABLE_SIZE 256 @@ -921,7 +1019,6 @@ static OSAL_INLINE u8 ecore_concrete_to_sw_fid(u32 concrete_fid) } #define PKT_LB_TC 9 -#define MAX_NUM_VOQS_E4 20 int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate); void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev, @@ -971,6 +1068,45 @@ u16 ecore_init_qm_get_num_pf_rls(struct ecore_hwfn *p_hwfn); u16 ecore_init_qm_get_num_vports(struct ecore_hwfn *p_hwfn); u16 ecore_init_qm_get_num_pqs(struct ecore_hwfn *p_hwfn); -#define ECORE_LEADING_HWFN(dev) (&dev->hwfns[0]) +#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \ + ecore_device_num_ports((_p_hwfn)->p_dev)) + +/* The PFID<->PPFID calculation is based on the relative index of a PF on its + * port. In BB there is a bug in the LLH in which the PPFID is actually engine + * based, and thus it equals the PFID. + */ +#define ECORE_PFID_BY_PPFID(_p_hwfn, abs_ppfid) \ + (ECORE_IS_BB((_p_hwfn)->p_dev) ? \ + (abs_ppfid) : \ + (abs_ppfid) * (_p_hwfn)->p_dev->num_ports_in_engine + \ + MFW_PORT(_p_hwfn)) +#define ECORE_PPFID_BY_PFID(_p_hwfn) \ + (ECORE_IS_BB((_p_hwfn)->p_dev) ? \ + (_p_hwfn)->rel_pf_id : \ + (_p_hwfn)->rel_pf_id / (_p_hwfn)->p_dev->num_ports_in_engine) + +enum _ecore_status_t ecore_all_ppfids_wr(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u32 addr, + u32 val); + +/* Utility functions for dumping the content of the NIG LLH filters */ +enum _ecore_status_t ecore_llh_dump_ppfid(struct ecore_dev *p_dev, u8 ppfid); +enum _ecore_status_t ecore_llh_dump_all(struct ecore_dev *p_dev); + +/** + * @brief ecore_set_platform_str - Set the debug dump platform string. + * Write the ecore version and device's string to the given buffer. + * + * @param p_hwfn + * @param buf_str + * @param buf_size + */ +void ecore_set_platform_str(struct ecore_hwfn *p_hwfn, + char *buf_str, u32 buf_size); + +#define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A + +#define MSTORM_QZONE_START(dev) \ + (TSTORM_QZONE_START + (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev))) #endif /* __ECORE_H */