static osal_spinlock_t qm_lock;
static u32 qm_lock_ref_cnt;
+#ifndef ASIC_ONLY
+static bool b_ptt_gtt_init;
+#endif
+
/******************** Doorbell Recovery *******************/
/* The doorbell recovery mechanism consists of a list of entries which represent
* doorbelling entities (l2 queues, roce sq/rq/cqs, the slowpath spq, etc). Each
return rc;
}
-struct ecore_llh_filter_e4_details {
+struct ecore_llh_filter_details {
u64 value;
u32 mode;
u32 protocol_type;
};
static enum _ecore_status_t
-ecore_llh_access_filter_e4(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u8 abs_ppfid, u8 filter_idx,
- struct ecore_llh_filter_e4_details *p_details,
- bool b_write_access)
+ecore_llh_access_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 abs_ppfid, u8 filter_idx,
+ struct ecore_llh_filter_details *p_details,
+ bool b_write_access)
{
u8 pfid = ECORE_PFID_BY_PPFID(p_hwfn, abs_ppfid);
- struct ecore_dmae_params params;
+ struct dmae_params params;
enum _ecore_status_t rc;
u32 addr;
/* Filter enable - should be done first when removing a filter */
if (b_write_access && !p_details->enable) {
- addr = NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + filter_idx * 0x4;
+ addr = NIG_REG_LLH_FUNC_FILTER_EN + filter_idx * 0x4;
ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr,
p_details->enable);
}
/* Filter value */
- addr = NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 2 * filter_idx * 0x4;
+ addr = NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * filter_idx * 0x4;
OSAL_MEMSET(¶ms, 0, sizeof(params));
if (b_write_access) {
- params.flags = ECORE_DMAE_FLAG_PF_DST;
- params.dst_pfid = pfid;
+ SET_FIELD(params.flags, DMAE_PARAMS_DST_PF_VALID, 0x1);
+ params.dst_pf_id = pfid;
rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
(u64)(osal_uintptr_t)&p_details->value,
addr, 2 /* size_in_dwords */, ¶ms);
} else {
- params.flags = ECORE_DMAE_FLAG_PF_SRC |
- ECORE_DMAE_FLAG_COMPLETION_DST;
- params.src_pfid = pfid;
+ SET_FIELD(params.flags, DMAE_PARAMS_SRC_PF_VALID, 0x1);
+ SET_FIELD(params.flags, DMAE_PARAMS_COMPLETION_DST, 0x1);
+ params.src_pf_id = pfid;
rc = ecore_dmae_grc2host(p_hwfn, p_ptt, addr,
(u64)(osal_uintptr_t)&p_details->value,
2 /* size_in_dwords */, ¶ms);
return rc;
/* Filter mode */
- addr = NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 + filter_idx * 0x4;
+ addr = NIG_REG_LLH_FUNC_FILTER_MODE + filter_idx * 0x4;
if (b_write_access)
ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr, p_details->mode);
else
addr);
/* Filter protocol type */
- addr = NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 + filter_idx * 0x4;
+ addr = NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE + filter_idx * 0x4;
if (b_write_access)
ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr,
p_details->protocol_type);
abs_ppfid, addr);
/* Filter header select */
- addr = NIG_REG_LLH_FUNC_FILTER_HDR_SEL_BB_K2 + filter_idx * 0x4;
+ addr = NIG_REG_LLH_FUNC_FILTER_HDR_SEL + filter_idx * 0x4;
if (b_write_access)
ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr,
p_details->hdr_sel);
/* Filter enable - should be done last when adding a filter */
if (!b_write_access || p_details->enable) {
- addr = NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + filter_idx * 0x4;
+ addr = NIG_REG_LLH_FUNC_FILTER_EN + filter_idx * 0x4;
if (b_write_access)
ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr,
p_details->enable);
}
static enum _ecore_status_t
-ecore_llh_add_filter_e4(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ecore_llh_add_filter(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
u8 abs_ppfid, u8 filter_idx, u8 filter_prot_type,
u32 high, u32 low)
{
- struct ecore_llh_filter_e4_details filter_details;
+ struct ecore_llh_filter_details filter_details;
filter_details.enable = 1;
filter_details.value = ((u64)high << 32) | low;
1 : /* protocol-based classification */
0; /* MAC-address based classification */
- return ecore_llh_access_filter_e4(p_hwfn, p_ptt, abs_ppfid, filter_idx,
- &filter_details,
- true /* write access */);
+ return ecore_llh_access_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx,
+ &filter_details,
+ true /* write access */);
}
static enum _ecore_status_t
-ecore_llh_remove_filter_e4(struct ecore_hwfn *p_hwfn,
+ecore_llh_remove_filter(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u8 abs_ppfid, u8 filter_idx)
{
- struct ecore_llh_filter_e4_details filter_details;
+ struct ecore_llh_filter_details filter_details;
OSAL_MEMSET(&filter_details, 0, sizeof(filter_details));
- return ecore_llh_access_filter_e4(p_hwfn, p_ptt, abs_ppfid, filter_idx,
- &filter_details,
- true /* write access */);
-}
-
-static enum _ecore_status_t
-ecore_llh_add_filter(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
- u8 abs_ppfid, u8 filter_idx, u8 filter_prot_type, u32 high,
- u32 low)
-{
- return ecore_llh_add_filter_e4(p_hwfn, p_ptt, abs_ppfid,
- filter_idx, filter_prot_type,
- high, low);
-}
-
-static enum _ecore_status_t
-ecore_llh_remove_filter(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
- u8 abs_ppfid, u8 filter_idx)
-{
- return ecore_llh_remove_filter_e4(p_hwfn, p_ptt, abs_ppfid,
- filter_idx);
+ return ecore_llh_access_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx,
+ &filter_details,
+ true /* write access */);
}
enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_dev *p_dev, u8 ppfid,
for (filter_idx = 0; filter_idx < NIG_REG_LLH_FUNC_FILTER_EN_SIZE;
filter_idx++) {
- rc = ecore_llh_remove_filter_e4(p_hwfn, p_ptt,
+ rc = ecore_llh_remove_filter(p_hwfn, p_ptt,
abs_ppfid, filter_idx);
if (rc != ECORE_SUCCESS)
goto out;
return ECORE_SUCCESS;
}
-static enum _ecore_status_t
-ecore_llh_dump_ppfid_e4(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
- u8 ppfid)
+enum _ecore_status_t
+ecore_llh_dump_ppfid(struct ecore_dev *p_dev, u8 ppfid)
{
- struct ecore_llh_filter_e4_details filter_details;
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
+ struct ecore_llh_filter_details filter_details;
u8 abs_ppfid, filter_idx;
u32 addr;
enum _ecore_status_t rc;
+ if (!p_ptt)
+ return ECORE_AGAIN;
+
rc = ecore_abs_ppfid(p_hwfn->p_dev, ppfid, &abs_ppfid);
if (rc != ECORE_SUCCESS)
- return rc;
+ goto out;
addr = NIG_REG_PPF_TO_ENGINE_SEL + abs_ppfid * 0x4;
DP_NOTICE(p_hwfn, false,
for (filter_idx = 0; filter_idx < NIG_REG_LLH_FUNC_FILTER_EN_SIZE;
filter_idx++) {
OSAL_MEMSET(&filter_details, 0, sizeof(filter_details));
- rc = ecore_llh_access_filter_e4(p_hwfn, p_ptt, abs_ppfid,
- filter_idx, &filter_details,
- false /* read access */);
+ rc = ecore_llh_access_filter(p_hwfn, p_ptt, abs_ppfid,
+ filter_idx, &filter_details,
+ false /* read access */);
if (rc != ECORE_SUCCESS)
- return rc;
+ goto out;
DP_NOTICE(p_hwfn, false,
"filter %2hhd: enable %d, value 0x%016lx, mode %d, protocol_type 0x%x, hdr_sel 0x%x\n",
filter_details.protocol_type, filter_details.hdr_sel);
}
- return ECORE_SUCCESS;
-}
-
-enum _ecore_status_t ecore_llh_dump_ppfid(struct ecore_dev *p_dev, u8 ppfid)
-{
- struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
- struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
- enum _ecore_status_t rc;
-
- if (p_ptt == OSAL_NULL)
- return ECORE_AGAIN;
-
- rc = ecore_llh_dump_ppfid_e4(p_hwfn, p_ptt, ppfid);
+out:
ecore_ptt_release(p_hwfn, p_ptt);
return rc;
ecore_dmae_info_free(p_hwfn);
ecore_dcbx_info_free(p_hwfn);
ecore_dbg_user_data_free(p_hwfn);
+ ecore_fw_overlay_mem_free(p_hwfn, p_hwfn->fw_overlay_mem);
/* @@@TBD Flush work-queue ? */
/* destroy doorbell recovery mechanism */
/* all vports participate in weighted fair queueing */
for (i = 0; i < ecore_init_qm_get_num_vports(p_hwfn); i++)
- qm_info->qm_vport_params[i].vport_wfq = 1;
+ qm_info->qm_vport_params[i].wfq = 1;
}
/* initialize qm port params */
{
/* Initialize qm port parameters */
u8 i, active_phys_tcs, num_ports = p_hwfn->p_dev->num_ports_in_engine;
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
/* indicate how ooo and high pri traffic is dealt with */
active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ?
for (i = 0; i < num_ports; i++) {
struct init_qm_port_params *p_qm_port =
&p_hwfn->qm_info.qm_port_params[i];
+ u16 pbf_max_cmd_lines;
p_qm_port->active = 1;
p_qm_port->active_phys_tcs = active_phys_tcs;
- p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES_E4 / num_ports;
- p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
+ pbf_max_cmd_lines = (u16)NUM_OF_PBF_CMD_LINES(p_dev);
+ p_qm_port->num_pbf_cmd_lines = pbf_max_cmd_lines / num_ports;
+ p_qm_port->num_btb_blocks =
+ NUM_OF_BTB_BLOCKS(p_dev) / num_ports;
}
}
(pq_init_flags & PQ_INIT_PF_RL ||
pq_init_flags & PQ_INIT_VF_RL);
+ /* The "rl_id" is set as the "vport_id" */
+ qm_info->qm_pq_params[pq_idx].rl_id =
+ qm_info->qm_pq_params[pq_idx].vport_id;
+
/* qm params accounting */
qm_info->num_pqs++;
if (!(pq_init_flags & PQ_INIT_SHARE_VPORT))
/* vport table */
for (i = 0; i < qm_info->num_vports; i++) {
vport = &qm_info->qm_vport_params[i];
- DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
- "vport idx %d, vport_rl %d, wfq %d,"
- " first_tx_pq_id [ ",
- qm_info->start_vport + i, vport->vport_rl,
- vport->vport_wfq);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "vport idx %d, wfq %d, first_tx_pq_id [ ",
+ qm_info->start_vport + i, vport->wfq);
for (tc = 0; tc < NUM_OF_TCS; tc++)
DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "%d ",
vport->first_tx_pq_id[tc]);
/* pq table */
for (i = 0; i < qm_info->num_pqs; i++) {
pq = &qm_info->qm_pq_params[i];
- DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
- "pq idx %d, port %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n",
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "pq idx %d, port %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d, rl_id %d\n",
qm_info->start_pq + i, pq->port_id, pq->vport_id,
- pq->tc_id, pq->wrr_group, pq->rl_valid);
+ pq->tc_id, pq->wrr_group, pq->rl_valid, pq->rl_id);
}
}
"Failed to allocate dbg user info structure\n");
goto alloc_err;
}
+
+ rc = OSAL_DBG_ALLOC_USER_DATA(p_hwfn, &p_hwfn->dbg_user_info);
+ if (rc) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to allocate dbg user info structure\n");
+ goto alloc_err;
+ }
} /* hwfn loop */
rc = ecore_llh_alloc(p_dev);
{
int hw_mode = 0;
- if (ECORE_IS_BB_B0(p_hwfn->p_dev)) {
+ if (ECORE_IS_BB(p_hwfn->p_dev)) {
hw_mode |= 1 << MODE_BB;
} else if (ECORE_IS_AH(p_hwfn->p_dev)) {
hw_mode |= 1 << MODE_K2;
}
#ifndef ASIC_ONLY
-/* MFW-replacement initializations for non-ASIC */
-static enum _ecore_status_t ecore_hw_init_chip(struct ecore_hwfn *p_hwfn,
+/* MFW-replacement initializations for emulation */
+static enum _ecore_status_t ecore_hw_init_chip(struct ecore_dev *p_dev,
struct ecore_ptt *p_ptt)
{
- struct ecore_dev *p_dev = p_hwfn->p_dev;
- u32 pl_hv = 1;
- int i;
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ u32 pl_hv, wr_mbs;
+ int i, pos;
+ u16 ctrl = 0;
- if (CHIP_REV_IS_EMUL(p_dev)) {
- if (ECORE_IS_AH(p_dev))
- pl_hv |= 0x600;
+ if (!CHIP_REV_IS_EMUL(p_dev)) {
+ DP_NOTICE(p_dev, false,
+ "ecore_hw_init_chip() shouldn't be called in a non-emulation environment\n");
+ return ECORE_INVAL;
}
+ pl_hv = ECORE_IS_BB(p_dev) ? 0x1 : 0x401;
ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV + 4, pl_hv);
- if (CHIP_REV_IS_EMUL(p_dev) &&
- (ECORE_IS_AH(p_dev)))
- ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV_2_K2_E5,
- 0x3ffffff);
+ if (ECORE_IS_AH(p_dev))
+ ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV_2_K2, 0x3ffffff);
- /* initialize port mode to 4x10G_E (10G with 4x10 SERDES) */
- /* CNIG_REG_NW_PORT_MODE is same for A0 and B0 */
- if (!CHIP_REV_IS_EMUL(p_dev) || ECORE_IS_BB(p_dev))
+ /* Initialize port mode to 4x10G_E (10G with 4x10 SERDES) */
+ if (ECORE_IS_BB(p_dev))
ecore_wr(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB, 4);
- if (CHIP_REV_IS_EMUL(p_dev)) {
- if (ECORE_IS_AH(p_dev)) {
- /* 2 for 4-port, 1 for 2-port, 0 for 1-port */
- ecore_wr(p_hwfn, p_ptt, MISC_REG_PORT_MODE,
- (p_dev->num_ports_in_engine >> 1));
+ if (ECORE_IS_AH(p_dev)) {
+ /* 2 for 4-port, 1 for 2-port, 0 for 1-port */
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_PORT_MODE,
+ p_dev->num_ports_in_engine >> 1);
- ecore_wr(p_hwfn, p_ptt, MISC_REG_BLOCK_256B_EN,
- p_dev->num_ports_in_engine == 4 ? 0 : 3);
- }
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_BLOCK_256B_EN,
+ p_dev->num_ports_in_engine == 4 ? 0 : 3);
}
- /* Poll on RBC */
+ /* Signal the PSWRQ block to start initializing internal memories */
ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_RBC_DONE, 1);
for (i = 0; i < 100; i++) {
OSAL_UDELAY(50);
if (ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_CFG_DONE) == 1)
break;
}
- if (i == 100)
+ if (i == 100) {
DP_NOTICE(p_hwfn, true,
"RBC done failed to complete in PSWRQ2\n");
+ return ECORE_TIMEOUT;
+ }
+
+ /* Indicate PSWRQ to initialize steering tag table with zeros */
+ ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_RESET_STT, 1);
+ for (i = 0; i < 100; i++) {
+ OSAL_UDELAY(50);
+ if (!ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_RESET_STT))
+ break;
+ }
+ if (i == 100) {
+ DP_NOTICE(p_hwfn, true,
+ "Steering tag table initialization failed to complete in PSWRQ2\n");
+ return ECORE_TIMEOUT;
+ }
+
+ /* Clear a possible PSWRQ2 STT parity which might have been generated by
+ * a previous MSI-X read.
+ */
+ ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_PRTY_STS_WR_H_0, 0x8);
+
+ /* Configure PSWRQ2_REG_WR_MBS0 according to the MaxPayloadSize field in
+ * the PCI configuration space. The value is common for all PFs, so it
+ * is okay to do it according to the first loading PF.
+ */
+ pos = OSAL_PCI_FIND_CAPABILITY(p_dev, PCI_CAP_ID_EXP);
+ if (!pos) {
+ DP_NOTICE(p_dev, true,
+ "Failed to find the PCI Express Capability structure in the PCI config space\n");
+ return ECORE_IO;
+ }
+
+ OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_EXP_DEVCTL, &ctrl);
+ wr_mbs = (ctrl & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
+ ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_WR_MBS0, wr_mbs);
+
+ /* Configure the PGLUE_B to discard mode */
+ ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_DISCARD_NBLOCK, 0x3f);
return ECORE_SUCCESS;
}
static void ecore_init_cau_rt_data(struct ecore_dev *p_dev)
{
u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
- int i, igu_sb_id;
+ u32 igu_sb_id;
+ int i;
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
ecore_init_cau_rt_data(p_dev);
/* Program GTT windows */
- ecore_gtt_init(p_hwfn, p_ptt);
+ ecore_gtt_init(p_hwfn);
#ifndef ASIC_ONLY
- if (CHIP_REV_IS_EMUL(p_dev)) {
- rc = ecore_hw_init_chip(p_hwfn, p_ptt);
+ if (CHIP_REV_IS_EMUL(p_dev) && IS_LEAD_HWFN(p_hwfn)) {
+ rc = ecore_hw_init_chip(p_dev, p_ptt);
if (rc != ECORE_SUCCESS)
return rc;
}
qm_info->max_phys_tcs_per_port,
qm_info->pf_rl_en, qm_info->pf_wfq_en,
qm_info->vport_rl_en, qm_info->vport_wfq_en,
- qm_info->qm_port_params);
+ qm_info->qm_port_params,
+ OSAL_NULL /* global RLs are not configured */);
ecore_cxt_hw_init_common(p_hwfn);
/* Workaround clears ROCE search for all functions to prevent
* involving non initialized function in processing ROCE packet.
*/
- num_pfs = NUM_OF_ENG_PFS(p_dev);
+ num_pfs = (u16)NUM_OF_ENG_PFS(p_dev);
for (pf_id = 0; pf_id < num_pfs; pf_id++) {
ecore_fid_pretend(p_hwfn, p_ptt, pf_id);
ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
* This is not done inside the init tool since it currently can't
* perform a pretending to VFs.
*/
- max_num_vfs = ECORE_IS_AH(p_dev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB;
+ max_num_vfs = (u8)NUM_OF_VFS(p_dev);
for (vf_id = 0; vf_id < max_num_vfs; vf_id++) {
concrete_fid = ecore_vfid_to_concrete(p_hwfn, vf_id);
ecore_fid_pretend(p_hwfn, p_ptt, (u16)concrete_fid);
{
u8 loopback = 0, port = p_hwfn->port_id * 2;
- DP_INFO(p_hwfn->p_dev, "Configurating Emulation Link %02x\n", port);
-
/* XLPORT MAC MODE *//* 0 Quad, 4 Single... */
ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MODE_REG, (0x4 << 4) | 0x4, 1,
port);
ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_ENABLE_REG, 0xf, 1, port);
}
-static void ecore_emul_link_init_ah_e5(struct ecore_hwfn *p_hwfn,
+static void ecore_emul_link_init_ah(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
+ u32 mac_base, mac_config_val = 0xa853;
u8 port = p_hwfn->port_id;
- u32 mac_base = NWM_REG_MAC0_K2_E5 + (port << 2) * NWM_REG_MAC0_SIZE;
-
- DP_INFO(p_hwfn->p_dev, "Configurating Emulation Link %02x\n", port);
- ecore_wr(p_hwfn, p_ptt, CNIG_REG_NIG_PORT0_CONF_K2_E5 + (port << 2),
- (1 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_ENABLE_0_K2_E5_SHIFT) |
+ ecore_wr(p_hwfn, p_ptt, CNIG_REG_NIG_PORT0_CONF_K2 + (port << 2),
+ (1 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_ENABLE_0_K2_SHIFT) |
(port <<
- CNIG_REG_NIG_PORT0_CONF_NIG_PORT_NWM_PORT_MAP_0_K2_E5_SHIFT) |
- (0 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_RATE_0_K2_E5_SHIFT));
+ CNIG_REG_NIG_PORT0_CONF_NIG_PORT_NWM_PORT_MAP_0_K2_SHIFT) |
+ (0 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_RATE_0_K2_SHIFT));
- ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_XIF_MODE_K2_E5,
- 1 << ETH_MAC_REG_XIF_MODE_XGMII_K2_E5_SHIFT);
+ mac_base = NWM_REG_MAC0_K2 + (port << 2) * NWM_REG_MAC0_SIZE;
- ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_FRM_LENGTH_K2_E5,
- 9018 << ETH_MAC_REG_FRM_LENGTH_FRM_LENGTH_K2_E5_SHIFT);
+ ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_XIF_MODE_K2,
+ 1 << ETH_MAC_REG_XIF_MODE_XGMII_K2_SHIFT);
- ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_IPG_LENGTH_K2_E5,
- 0xc << ETH_MAC_REG_TX_IPG_LENGTH_TXIPG_K2_E5_SHIFT);
+ ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_FRM_LENGTH_K2,
+ 9018 << ETH_MAC_REG_FRM_LENGTH_FRM_LENGTH_K2_SHIFT);
- ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_RX_FIFO_SECTIONS_K2_E5,
- 8 << ETH_MAC_REG_RX_FIFO_SECTIONS_RX_SECTION_FULL_K2_E5_SHIFT);
+ ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_IPG_LENGTH_K2,
+ 0xc << ETH_MAC_REG_TX_IPG_LENGTH_TXIPG_K2_SHIFT);
- ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_FIFO_SECTIONS_K2_E5,
+ ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_RX_FIFO_SECTIONS_K2,
+ 8 << ETH_MAC_REG_RX_FIFO_SECTIONS_RX_SECTION_FULL_K2_SHIFT);
+
+ ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_FIFO_SECTIONS_K2,
(0xA <<
- ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_K2_E5_SHIFT) |
+ ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_K2_SHIFT) |
(8 <<
- ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_FULL_K2_E5_SHIFT));
+ ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_FULL_K2_SHIFT));
- ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_COMMAND_CONFIG_K2_E5,
- 0xa853);
+ /* Strip the CRC field from the frame */
+ mac_config_val &= ~ETH_MAC_REG_COMMAND_CONFIG_CRC_FWD_K2;
+ ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_COMMAND_CONFIG_K2,
+ mac_config_val);
}
static void ecore_emul_link_init(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- if (ECORE_IS_AH(p_hwfn->p_dev))
- ecore_emul_link_init_ah_e5(p_hwfn, p_ptt);
- else /* BB */
+ u8 port = ECORE_IS_BB(p_hwfn->p_dev) ? p_hwfn->port_id * 2
+ : p_hwfn->port_id;
+
+ DP_INFO(p_hwfn->p_dev, "Emulation: Configuring Link [port %02x]\n",
+ port);
+
+ if (ECORE_IS_BB(p_hwfn->p_dev))
ecore_emul_link_init_bb(p_hwfn, p_ptt);
+ else
+ ecore_emul_link_init_ah(p_hwfn, p_ptt);
+
+ return;
}
static void ecore_link_init_bb(struct ecore_hwfn *p_hwfn,
}
#endif
+static u32 ecore_hw_norm_region_conn(struct ecore_hwfn *p_hwfn)
+{
+ u32 norm_region_conn;
+
+ /* The order of CIDs allocation is according to the order of
+ * 'enum protocol_type'. Therefore, the number of CIDs for the normal
+ * region is calculated based on the CORE CIDs, in case of non-ETH
+ * personality, and otherwise - based on the ETH CIDs.
+ */
+ norm_region_conn =
+ ecore_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) +
+ ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE,
+ OSAL_NULL) +
+ ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
+ OSAL_NULL);
+
+ return norm_region_conn;
+}
+
static enum _ecore_status_t
ecore_hw_init_dpi_size(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus)
ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
+ u32 norm_region_conn, min_addr_reg1;
u32 pwm_regsize, norm_regsize;
- u32 non_pwm_conn, min_addr_reg1;
u32 db_bar_size, n_cpus;
u32 roce_edpm_mode;
u32 pf_dems_shift;
* connections. The DORQ_REG_PF_MIN_ADDR_REG1 register is
* in units of 4,096 bytes.
*/
- non_pwm_conn = ecore_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) +
- ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE,
- OSAL_NULL) +
- ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, OSAL_NULL);
- norm_regsize = ROUNDUP(ECORE_PF_DEMS_SIZE * non_pwm_conn,
+ norm_region_conn = ecore_hw_norm_region_conn(p_hwfn);
+ norm_regsize = ROUNDUP(ECORE_PF_DEMS_SIZE * norm_region_conn,
OSAL_PAGE_SIZE);
min_addr_reg1 = norm_regsize / 4096;
pwm_regsize = db_bar_size - norm_regsize;
struct ecore_ptt *p_ptt,
int hw_mode)
{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
enum _ecore_status_t rc = ECORE_SUCCESS;
/* In CMT the gate should be cleared by the 2nd hwfn */
- if (!ECORE_IS_CMT(p_hwfn->p_dev) || !IS_LEAD_HWFN(p_hwfn))
+ if (!ECORE_IS_CMT(p_dev) || !IS_LEAD_HWFN(p_hwfn))
STORE_RT_REG(p_hwfn, NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET, 0);
rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_WRITE_PAD_ENABLE, 0);
#ifndef ASIC_ONLY
- if (CHIP_REV_IS_ASIC(p_hwfn->p_dev))
- return ECORE_SUCCESS;
+ if (CHIP_REV_IS_FPGA(p_dev) && ECORE_IS_BB(p_dev))
+ ecore_link_init_bb(p_hwfn, p_ptt, p_hwfn->port_id);
- if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
- if (ECORE_IS_AH(p_hwfn->p_dev))
- return ECORE_SUCCESS;
- else if (ECORE_IS_BB(p_hwfn->p_dev))
- ecore_link_init_bb(p_hwfn, p_ptt, p_hwfn->port_id);
- } else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
- if (ECORE_IS_CMT(p_hwfn->p_dev)) {
+ if (CHIP_REV_IS_EMUL(p_dev)) {
+ if (ECORE_IS_CMT(p_dev)) {
/* Activate OPTE in CMT */
u32 val;
0x55555555);
}
+ /* Set the TAGMAC default function on the port if needed.
+ * The ppfid should be set in the vector, except in BB which has
+ * a bug in the LLH where the ppfid is actually engine based.
+ */
+ if (OSAL_TEST_BIT(ECORE_MF_NEED_DEF_PF, &p_dev->mf_bits)) {
+ u8 pf_id = p_hwfn->rel_pf_id;
+
+ if (!ECORE_IS_BB(p_dev))
+ pf_id /= p_dev->num_ports_in_engine;
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR, 1 << pf_id);
+ }
+
ecore_emul_link_init(p_hwfn, p_ptt);
- } else {
- DP_INFO(p_hwfn->p_dev, "link is not being configured\n");
}
#endif
- return rc;
+ return ECORE_SUCCESS;
}
static enum _ecore_status_t
if (rc)
return rc;
+ ecore_fw_overlay_init_ram(p_hwfn, p_ptt, p_hwfn->fw_overlay_mem);
+
/* Pure runtime initializations - directly to the HW */
ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
if (IS_LEAD_HWFN(p_hwfn)) {
rc = ecore_llh_hw_init_pf(p_hwfn, p_ptt,
p_params->avoid_eng_affin);
- if (rc)
+ if (rc != ECORE_SUCCESS)
return rc;
}
if (rc) {
DP_NOTICE(p_hwfn, true,
"Function start ramrod failed\n");
- } else {
return rc;
}
prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1);
u32 load_code, resp, param, drv_mb_param;
bool b_default_mtu = true;
struct ecore_hwfn *p_hwfn;
+ const u32 *fw_overlays;
+ u32 fw_overlays_len;
enum _ecore_status_t rc = ECORE_SUCCESS;
u16 ether_type;
int i;
goto load_err;
/* Clear the pglue_b was_error indication.
- * In E4 it must be done after the BME and the internal
- * FID_enable for the PF are set, since VDMs may cause the
- * indication to be set again.
+ * It must be done after the BME and the internal FID_enable for
+ * the PF are set, since VDMs may cause the indication to be set
+ * again.
*/
ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
+ fw_overlays = p_dev->fw_data->fw_overlays;
+ fw_overlays_len = p_dev->fw_data->fw_overlays_len;
+ p_hwfn->fw_overlay_mem =
+ ecore_fw_overlay_mem_alloc(p_hwfn, fw_overlays,
+ fw_overlays_len);
+ if (!p_hwfn->fw_overlay_mem) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to allocate fw overlay memory\n");
+ goto load_err;
+ }
+
switch (load_code) {
case FW_MSG_CODE_DRV_LOAD_ENGINE:
rc = ecore_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
/* clear indirect access */
if (ECORE_IS_AH(p_hwfn->p_dev)) {
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
- PGLUE_B_REG_PGL_ADDR_E8_F0_K2_E5, 0);
+ PGLUE_B_REG_PGL_ADDR_E8_F0_K2, 0);
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
- PGLUE_B_REG_PGL_ADDR_EC_F0_K2_E5, 0);
+ PGLUE_B_REG_PGL_ADDR_EC_F0_K2, 0);
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
- PGLUE_B_REG_PGL_ADDR_F0_F0_K2_E5, 0);
+ PGLUE_B_REG_PGL_ADDR_F0_F0_K2, 0);
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
- PGLUE_B_REG_PGL_ADDR_F4_F0_K2_E5, 0);
+ PGLUE_B_REG_PGL_ADDR_F4_F0_K2, 0);
} else {
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
PGLUE_B_REG_PGL_ADDR_88_F0_BB, 0);
return ECORE_SUCCESS;
}
+#define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2
+#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB
+
+static u32 ecore_hsi_def_val[][MAX_CHIP_IDS] = {
+ {MAX_NUM_VFS_BB, MAX_NUM_VFS_K2},
+ {MAX_NUM_L2_QUEUES_BB, MAX_NUM_L2_QUEUES_K2},
+ {MAX_NUM_PORTS_BB, MAX_NUM_PORTS_K2},
+ {MAX_SB_PER_PATH_BB, MAX_SB_PER_PATH_K2, },
+ {MAX_NUM_PFS_BB, MAX_NUM_PFS_K2},
+ {MAX_NUM_VPORTS_BB, MAX_NUM_VPORTS_K2},
+ {ETH_RSS_ENGINE_NUM_BB, ETH_RSS_ENGINE_NUM_K2},
+ {MAX_QM_TX_QUEUES_BB, MAX_QM_TX_QUEUES_K2},
+ {PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2},
+ {RDMA_NUM_STATISTIC_COUNTERS_BB, RDMA_NUM_STATISTIC_COUNTERS_K2},
+ {MAX_QM_GLOBAL_RLS, MAX_QM_GLOBAL_RLS},
+ {PBF_MAX_CMD_LINES, PBF_MAX_CMD_LINES},
+ {BTB_MAX_BLOCKS_BB, BTB_MAX_BLOCKS_K2},
+};
+
+u32 ecore_get_hsi_def_val(struct ecore_dev *p_dev, enum ecore_hsi_def_type type)
+{
+ enum chip_ids chip_id = ECORE_IS_BB(p_dev) ? CHIP_BB : CHIP_K2;
+
+ if (type >= ECORE_NUM_HSI_DEFS) {
+ DP_ERR(p_dev, "Unexpected HSI definition type [%d]\n", type);
+ return 0;
+ }
+
+ return ecore_hsi_def_val[type][chip_id];
+}
+
static enum _ecore_status_t
ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
u32 resc_max_val, mcp_resp;
u8 res_id;
enum _ecore_status_t rc;
u32 *p_resc_num, u32 *p_resc_start)
{
u8 num_funcs = p_hwfn->num_funcs_on_engine;
- bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
switch (res_id) {
case ECORE_L2_QUEUE:
- *p_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 :
- MAX_NUM_L2_QUEUES_BB) / num_funcs;
+ *p_resc_num = NUM_OF_L2_QUEUES(p_dev) / num_funcs;
break;
case ECORE_VPORT:
- *p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 :
- MAX_NUM_VPORTS_BB) / num_funcs;
+ *p_resc_num = NUM_OF_VPORTS(p_dev) / num_funcs;
break;
case ECORE_RSS_ENG:
- *p_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 :
- ETH_RSS_ENGINE_NUM_BB) / num_funcs;
+ *p_resc_num = NUM_OF_RSS_ENGINES(p_dev) / num_funcs;
break;
case ECORE_PQ:
- *p_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 :
- MAX_QM_TX_QUEUES_BB) / num_funcs;
+ *p_resc_num = NUM_OF_QM_TX_QUEUES(p_dev) / num_funcs;
+ *p_resc_num &= ~0x7; /* The granularity of the PQs is 8 */
break;
case ECORE_RL:
- *p_resc_num = MAX_QM_GLOBAL_RLS / num_funcs;
+ *p_resc_num = NUM_OF_QM_GLOBAL_RLS(p_dev) / num_funcs;
break;
case ECORE_MAC:
case ECORE_VLAN:
*p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;
break;
case ECORE_ILT:
- *p_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 :
- PXP_NUM_ILT_RECORDS_BB) / num_funcs;
+ *p_resc_num = NUM_OF_PXP_ILT_RECORDS(p_dev) / num_funcs;
break;
case ECORE_LL2_QUEUE:
- *p_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs;
+ *p_resc_num = MAX_NUM_LL2_RX_RAM_QUEUES / num_funcs;
break;
case ECORE_RDMA_CNQ_RAM:
case ECORE_CMDQS_CQS:
*p_resc_num = (NUM_OF_GLOBAL_QUEUES / 2) / num_funcs;
break;
case ECORE_RDMA_STATS_QUEUE:
- /* @DPDK */
- *p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 :
- MAX_NUM_VPORTS_BB) / num_funcs;
+ *p_resc_num = NUM_OF_RDMA_STATISTIC_COUNTERS(p_dev) / num_funcs;
break;
case ECORE_BDQ:
/* @DPDK */
/* 4-ports mode has limitations that should be enforced:
* - BB: the MFW can access only PPFIDs which their corresponding PFIDs
* belong to this certain port.
- * - AH/E5: only 4 PPFIDs per port are available.
+ * - AH: only 4 PPFIDs per port are available.
*/
if (ecore_device_num_ports(p_dev) == 4) {
u8 mask;
{
struct ecore_resc_unlock_params resc_unlock_params;
struct ecore_resc_lock_params resc_lock_params;
- bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ u32 max_ilt_lines;
u8 res_id;
enum _ecore_status_t rc;
#ifndef ASIC_ONLY
}
#ifndef ASIC_ONLY
- if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+ if (CHIP_REV_IS_EMUL(p_dev)) {
/* Reduced build contains less PQs */
- if (!(p_hwfn->p_dev->b_is_emul_full)) {
+ if (!(p_dev->b_is_emul_full)) {
resc_num[ECORE_PQ] = 32;
resc_start[ECORE_PQ] = resc_num[ECORE_PQ] *
p_hwfn->enabled_func_idx;
/* For AH emulation, since we have a possible maximal number of
* 16 enabled PFs, in case there are not enough ILT lines -
- * allocate only first PF as RoCE and have all the other ETH
- * only with less ILT lines.
+ * allocate only first PF as RoCE and have all the other as
+ * ETH-only with less ILT lines.
+ * In case we increase the number of ILT lines for PF0, we need
+ * also to correct the start value for PF1-15.
*/
- if (!p_hwfn->rel_pf_id && p_hwfn->p_dev->b_is_emul_full)
- resc_num[ECORE_ILT] = OSAL_MAX_T(u32,
- resc_num[ECORE_ILT],
+ if (ECORE_IS_AH(p_dev) && p_dev->b_is_emul_full) {
+ if (!p_hwfn->rel_pf_id) {
+ resc_num[ECORE_ILT] =
+ OSAL_MAX_T(u32, resc_num[ECORE_ILT],
roce_min_ilt_lines);
+ } else if (resc_num[ECORE_ILT] < roce_min_ilt_lines) {
+ resc_start[ECORE_ILT] += roce_min_ilt_lines -
+ resc_num[ECORE_ILT];
+ }
+ }
}
-
- /* Correct the common ILT calculation if PF0 has more */
- if (CHIP_REV_IS_SLOW(p_hwfn->p_dev) &&
- p_hwfn->p_dev->b_is_emul_full &&
- p_hwfn->rel_pf_id && resc_num[ECORE_ILT] < roce_min_ilt_lines)
- resc_start[ECORE_ILT] += roce_min_ilt_lines -
- resc_num[ECORE_ILT];
#endif
/* Sanity for ILT */
- if ((b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_K2)) ||
- (!b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_BB))) {
+ max_ilt_lines = NUM_OF_PXP_ILT_RECORDS(p_dev);
+ if (RESC_END(p_hwfn, ECORE_ILT) > max_ilt_lines) {
DP_NOTICE(p_hwfn, true,
"Can't assign ILT pages [%08x,...,%08x]\n",
RESC_START(p_hwfn, ECORE_ILT), RESC_END(p_hwfn,
return rc;
}
+#ifndef ASIC_ONLY
+static enum _ecore_status_t
+ecore_emul_hw_get_nvm_info(struct ecore_hwfn *p_hwfn)
+{
+ if (IS_LEAD_HWFN(p_hwfn)) {
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+
+ /* The MF mode on emulation is either default or NPAR 1.0 */
+ p_dev->mf_bits = 1 << ECORE_MF_LLH_MAC_CLSS |
+ 1 << ECORE_MF_LLH_PROTO_CLSS |
+ 1 << ECORE_MF_LL2_NON_UNICAST;
+ if (p_hwfn->num_funcs_on_port > 1)
+ p_dev->mf_bits |= 1 << ECORE_MF_INTER_PF_SWITCH |
+ 1 << ECORE_MF_DISABLE_ARFS;
+ else
+ p_dev->mf_bits |= 1 << ECORE_MF_NEED_DEF_PF;
+ }
+
+ return ECORE_SUCCESS;
+}
+#endif
+
static enum _ecore_status_t
ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_mcp_link_params *link;
enum _ecore_status_t rc;
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
+ return ecore_emul_hw_get_nvm_info(p_hwfn);
+#endif
+
/* Read global nvm_cfg address */
nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine);
}
-static void ecore_hw_info_port_num_bb(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
-{
- struct ecore_dev *p_dev = p_hwfn->p_dev;
- u32 port_mode;
-
#ifndef ASIC_ONLY
- /* Read the port mode */
- if (CHIP_REV_IS_FPGA(p_dev))
- port_mode = 4;
- else if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_CMT(p_dev))
- /* In CMT on emulation, assume 1 port */
- port_mode = 1;
- else
-#endif
- port_mode = ecore_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB);
-
- if (port_mode < 3) {
- p_dev->num_ports_in_engine = 1;
- } else if (port_mode <= 5) {
- p_dev->num_ports_in_engine = 2;
- } else {
- DP_NOTICE(p_hwfn, true, "PORT MODE: %d not supported\n",
- p_dev->num_ports_in_engine);
-
- /* Default num_ports_in_engine to something */
- p_dev->num_ports_in_engine = 1;
- }
-}
-
-static void ecore_hw_info_port_num_ah_e5(struct ecore_hwfn *p_hwfn,
+static void ecore_emul_hw_info_port_num(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
struct ecore_dev *p_dev = p_hwfn->p_dev;
- u32 port;
- int i;
+ u32 eco_reserved;
- p_dev->num_ports_in_engine = 0;
-
-#ifndef ASIC_ONLY
- if (CHIP_REV_IS_EMUL(p_dev)) {
- port = ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED);
- switch ((port & 0xf000) >> 12) {
+ /* MISCS_REG_ECO_RESERVED[15:12]: num of ports in an engine */
+ eco_reserved = ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED);
+ switch ((eco_reserved & 0xf000) >> 12) {
case 1:
p_dev->num_ports_in_engine = 1;
break;
break;
default:
DP_NOTICE(p_hwfn, false,
- "Unknown port mode in ECO_RESERVED %08x\n",
- port);
- }
- } else
-#endif
- for (i = 0; i < MAX_NUM_PORTS_K2; i++) {
- port = ecore_rd(p_hwfn, p_ptt,
- CNIG_REG_NIG_PORT0_CONF_K2_E5 +
- (i * 4));
- if (port & 1)
- p_dev->num_ports_in_engine++;
+ "Emulation: Unknown port mode [ECO_RESERVED 0x%08x]\n",
+ eco_reserved);
+ p_dev->num_ports_in_engine = 1; /* Default to something */
+ break;
}
- if (!p_dev->num_ports_in_engine) {
- DP_NOTICE(p_hwfn, true, "All NIG ports are inactive\n");
-
- /* Default num_ports_in_engine to something */
- p_dev->num_ports_in_engine = 1;
- }
+ p_dev->num_ports = p_dev->num_ports_in_engine *
+ ecore_device_num_engines(p_dev);
}
+#endif
+/* Determine the number of ports of the device and per engine */
static void ecore_hw_info_port_num(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
+ u32 addr, global_offsize, global_addr, port_mode;
struct ecore_dev *p_dev = p_hwfn->p_dev;
- /* Determine the number of ports per engine */
- if (ECORE_IS_BB(p_dev))
- ecore_hw_info_port_num_bb(p_hwfn, p_ptt);
- else
- ecore_hw_info_port_num_ah_e5(p_hwfn, p_ptt);
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_TEDIBEAR(p_dev)) {
+ p_dev->num_ports_in_engine = 1;
+ p_dev->num_ports = 2;
+ return;
+ }
+
+ if (CHIP_REV_IS_EMUL(p_dev)) {
+ ecore_emul_hw_info_port_num(p_hwfn, p_ptt);
+ return;
+ }
+#endif
- /* Get the total number of ports of the device */
- if (ECORE_IS_CMT(p_dev)) {
/* In CMT there is always only one port */
+ if (ECORE_IS_CMT(p_dev)) {
+ p_dev->num_ports_in_engine = 1;
p_dev->num_ports = 1;
-#ifndef ASIC_ONLY
- } else if (CHIP_REV_IS_EMUL(p_dev) || CHIP_REV_IS_TEDIBEAR(p_dev)) {
- p_dev->num_ports = p_dev->num_ports_in_engine *
- ecore_device_num_engines(p_dev);
-#endif
- } else {
- u32 addr, global_offsize, global_addr;
+ return;
+ }
- addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
- PUBLIC_GLOBAL);
- global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
- global_addr = SECTION_ADDR(global_offsize, 0);
- addr = global_addr + OFFSETOF(struct public_global, max_ports);
- p_dev->num_ports = (u8)ecore_rd(p_hwfn, p_ptt, addr);
+ /* Determine the number of ports per engine */
+ port_mode = ecore_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE);
+ switch (port_mode) {
+ case 0x0:
+ p_dev->num_ports_in_engine = 1;
+ break;
+ case 0x1:
+ p_dev->num_ports_in_engine = 2;
+ break;
+ case 0x2:
+ p_dev->num_ports_in_engine = 4;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, false, "Unknown port mode 0x%08x\n",
+ port_mode);
+ p_dev->num_ports_in_engine = 1; /* Default to something */
+ break;
}
+
+ /* Get the total number of ports of the device */
+ addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_GLOBAL);
+ global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
+ global_addr = SECTION_ADDR(global_offsize, 0);
+ addr = global_addr + OFFSETOF(struct public_global, max_ports);
+ p_dev->num_ports = (u8)ecore_rd(p_hwfn, p_ptt, addr);
}
static void ecore_mcp_get_eee_caps(struct ecore_hwfn *p_hwfn,
ecore_mcp_get_capabilities(p_hwfn, p_ptt);
-#ifndef ASIC_ONLY
- if (CHIP_REV_IS_ASIC(p_hwfn->p_dev)) {
-#endif
rc = ecore_hw_get_nvm_info(p_hwfn, p_ptt, p_params);
if (rc != ECORE_SUCCESS)
return rc;
-#ifndef ASIC_ONLY
- }
-#endif
rc = ecore_int_igu_read_cam(p_hwfn, p_ptt);
if (rc != ECORE_SUCCESS) {
protocol = p_hwfn->mcp_info->func_info.protocol;
p_hwfn->hw_info.personality = protocol;
}
-
#ifndef ASIC_ONLY
- /* To overcome ILT lack for emulation, until at least until we'll have
- * a definite answer from system about it, allow only PF0 to be RoCE.
+ else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+ /* AH emulation:
+ * Allow only PF0 to be RoCE to overcome a lack of ILT lines.
*/
- if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev)) {
- if (!p_hwfn->rel_pf_id)
- p_hwfn->hw_info.personality = ECORE_PCI_ETH_ROCE;
- else
+ if (ECORE_IS_AH(p_hwfn->p_dev) && p_hwfn->rel_pf_id)
p_hwfn->hw_info.personality = ECORE_PCI_ETH;
+ else
+ p_hwfn->hw_info.personality = ECORE_PCI_ETH_ROCE;
}
#endif
return rc;
}
+#define ECORE_MAX_DEVICE_NAME_LEN (8)
+
+void ecore_get_dev_name(struct ecore_dev *p_dev, u8 *name, u8 max_chars)
+{
+ u8 n;
+
+ n = OSAL_MIN_T(u8, max_chars, ECORE_MAX_DEVICE_NAME_LEN);
+ OSAL_SNPRINTF((char *)name, n, "%s %c%d",
+ ECORE_IS_BB(p_dev) ? "BB" : "AH",
+ 'A' + p_dev->chip_rev, (int)p_dev->chip_metal);
+}
+
static enum _ecore_status_t ecore_get_dev_info(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
}
#ifndef ASIC_ONLY
- if (CHIP_REV_IS_EMUL(p_dev)) {
+ if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_BB(p_dev)) {
/* For some reason we have problems with this register
- * in B0 emulation; Simply assume no CMT
+ * in BB B0 emulation; Simply assume no CMT
*/
DP_NOTICE(p_dev->hwfns, false,
"device on emul - assume no CMT\n");
if (CHIP_REV_IS_EMUL(p_dev)) {
tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED);
- if (tmp & (1 << 29)) {
- DP_NOTICE(p_hwfn, false,
- "Emulation: Running on a FULL build\n");
- p_dev->b_is_emul_full = true;
- } else {
+
+ /* MISCS_REG_ECO_RESERVED[29]: full/reduced emulation build */
+ p_dev->b_is_emul_full = !!(tmp & (1 << 29));
+
+ /* MISCS_REG_ECO_RESERVED[28]: emulation build w/ or w/o MAC */
+ p_dev->b_is_emul_mac = !!(tmp & (1 << 28));
+
DP_NOTICE(p_hwfn, false,
- "Emulation: Running on a REDUCED build\n");
- }
+ "Emulation: Running on a %s build %s MAC\n",
+ p_dev->b_is_emul_full ? "full" : "reduced",
+ p_dev->b_is_emul_mac ? "with" : "without");
}
#endif
p_hwfn->p_main_ptt = ecore_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
/* First hwfn learns basic information, e.g., number of hwfns */
- if (!p_hwfn->my_id) {
+ if (IS_LEAD_HWFN(p_hwfn)) {
rc = ecore_get_dev_info(p_hwfn, p_hwfn->p_main_ptt);
if (rc != ECORE_SUCCESS) {
if (p_params->b_relaxed_probe)
}
}
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev) && !b_ptt_gtt_init) {
+ struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
+ u32 val;
+
+ /* Initialize PTT/GTT (done by MFW on ASIC) */
+ ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_START_INIT_PTT_GTT, 1);
+ OSAL_MSLEEP(10);
+ ecore_ptt_invalidate(p_hwfn);
+ val = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_INIT_DONE_PTT_GTT);
+ if (val != 1) {
+ DP_ERR(p_hwfn,
+ "PTT and GTT init in PGLUE_B didn't complete\n");
+ goto err1;
+ }
+
+ /* Clear a possible PGLUE_B parity from a previous GRC access */
+ ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_PRTY_STS_WR_H_0, 0x380);
+
+ b_ptt_gtt_init = true;
+ }
+#endif
+
+ /* Store the precompiled init data ptrs */
+ if (IS_LEAD_HWFN(p_hwfn))
+ ecore_init_iro_array(p_hwfn->p_dev);
+
ecore_hw_hwfn_prepare(p_hwfn);
/* Initialize MCP structure */
/* Check if mdump logs/data are present and update the epoch value */
if (IS_LEAD_HWFN(p_hwfn)) {
-#ifndef ASIC_ONLY
- if (!CHIP_REV_IS_EMUL(p_dev)) {
-#endif
rc = ecore_mcp_mdump_get_info(p_hwfn, p_hwfn->p_main_ptt,
&mdump_info);
if (rc == ECORE_SUCCESS && mdump_info.num_of_logs)
ecore_mcp_mdump_set_values(p_hwfn, p_hwfn->p_main_ptt,
p_params->epoch);
-#ifndef ASIC_ONLY
- }
-#endif
}
/* Allocate the init RT array and initialize the init-ops engine */
}
#ifndef ASIC_ONLY
if (CHIP_REV_IS_FPGA(p_dev)) {
- DP_NOTICE(p_hwfn, false,
- "FPGA: workaround; Prevent DMAE parities\n");
- ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PCIE_REG_PRTY_MASK_K2_E5,
- 7);
+ if (ECORE_IS_AH(p_dev)) {
+ DP_NOTICE(p_hwfn, false,
+ "FPGA: workaround; Prevent DMAE parities\n");
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PCIE_REG_PRTY_MASK_K2, 7);
+ }
DP_NOTICE(p_hwfn, false,
"FPGA: workaround: Set VF bar0 size\n");
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
- PGLUE_B_REG_VF_BAR0_SIZE_K2_E5, 4);
+ PGLUE_B_REG_VF_BAR0_SIZE_K2, 4);
}
#endif
if (p_params->b_relaxed_probe)
p_params->p_relaxed_res = ECORE_HW_PREPARE_SUCCESS;
- /* Store the precompiled init data ptrs */
- if (IS_PF(p_dev))
- ecore_init_iro_array(p_dev);
-
/* Initialize the first hwfn - will learn number of hwfns */
rc = ecore_hw_prepare_single(p_hwfn, p_dev->regview,
p_dev->doorbells, p_dev->db_phys_addr,
p_params->personality = p_hwfn->hw_info.personality;
- /* initilalize 2nd hwfn if necessary */
+ /* Initialize 2nd hwfn if necessary */
if (ECORE_IS_CMT(p_dev)) {
void OSAL_IOMEM *p_regview, *p_doorbell;
u8 OSAL_IOMEM *addr;
/* Calculate final WFQ values for all vports and configure it.
* After this configuration each vport must have
- * approx min rate = vport_wfq * min_pf_rate / ECORE_WFQ_UNIT
+ * approx min rate = wfq * min_pf_rate / ECORE_WFQ_UNIT
*/
static void ecore_configure_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
- vport_params[i].vport_wfq = (wfq_speed * ECORE_WFQ_UNIT) /
+ vport_params[i].wfq = (wfq_speed * ECORE_WFQ_UNIT) /
min_pf_rate;
ecore_init_vport_wfq(p_hwfn, p_ptt,
vport_params[i].first_tx_pq_id,
- vport_params[i].vport_wfq);
+ vport_params[i].wfq);
}
}
int i;
for (i = 0; i < p_hwfn->qm_info.num_vports; i++)
- p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1;
+ p_hwfn->qm_info.qm_vport_params[i].wfq = 1;
}
static void ecore_disable_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
ecore_init_wfq_default_param(p_hwfn);
ecore_init_vport_wfq(p_hwfn, p_ptt,
vport_params[i].first_tx_pq_id,
- vport_params[i].vport_wfq);
+ vport_params[i].wfq);
}
}
struct ecore_mcp_link_state *p_link;
int rc = ECORE_SUCCESS;
- p_link = &p_hwfn->p_dev->hwfns[0].mcp_info->link_output;
+ p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
if (!p_link->min_pf_rate) {
p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate;