/* Forward declaration */
struct ecore_dev;
struct ecore_hwfn;
+struct ecore_ptt;
struct ecore_vf_acquire_sw_info;
struct vf_pf_resc_request;
enum ecore_mcp_protocol_type;
union ecore_mcp_protocol_stats;
enum ecore_hw_err_type;
-void qed_link_update(struct ecore_hwfn *hwfn);
+void qed_link_update(struct ecore_hwfn *hwfn, struct ecore_ptt *ptt);
#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
#undef __BIG_ENDIAN
#define OSAL_BITMAP_WEIGHT(bitmap, count) 0
-#define OSAL_LINK_UPDATE(hwfn) qed_link_update(hwfn)
+#define OSAL_LINK_UPDATE(hwfn, ptt) qed_link_update(hwfn, ptt)
#define OSAL_DCBX_AEN(hwfn, mib_type) nothing
/* SR-IOV channel */
}
}
-void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn)
+void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
{
struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
struct ecore_qm_iids iids;
OSAL_MEM_ZERO(&iids, sizeof(iids));
ecore_cxt_qm_iids(p_hwfn, &iids);
- ecore_qm_pf_rt_init(p_hwfn, p_hwfn->p_main_ptt, p_hwfn->port_id,
+ ecore_qm_pf_rt_init(p_hwfn, p_ptt, p_hwfn->port_id,
p_hwfn->rel_pf_id, qm_info->max_phys_tcs_per_port,
p_hwfn->first_on_engine,
iids.cids, iids.vf_cids, iids.tids,
ecore_cdu_init_common(p_hwfn);
}
-void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn)
+void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
{
- ecore_qm_init_pf(p_hwfn);
+ ecore_qm_init_pf(p_hwfn, p_ptt);
ecore_cm_init_pf(p_hwfn);
ecore_dq_init_pf(p_hwfn);
ecore_cdu_init_pf(p_hwfn);
* @brief ecore_cxt_hw_init_pf - Initailze ILT and DQ, PF phase, per path.
*
* @param p_hwfn
+ * @param p_ptt
*/
-void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn);
+void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
/**
* @brief ecore_qm_init_pf - Initailze the QM PF phase, per path
*
* @param p_hwfn
+ * @param p_ptt
*/
-void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn);
+void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
/**
* @brief Reconfigures QM pf on the fly
BAR_ID_1 /* Used for doorbells */
};
-static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn, enum BAR_ID bar_id)
+static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum BAR_ID bar_id)
{
u32 bar_reg = (bar_id == BAR_ID_0 ?
PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
return 1 << 17;
}
- val = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
+ val = ecore_rd(p_hwfn, p_ptt, bar_reg);
if (val)
return 1 << (val + 15);
* In older MFW versions they are set to 0 which means disabled.
*/
if (p_hwfn->p_dev->num_hwfns > 1) {
- DP_NOTICE(p_hwfn, false,
- "BAR size not configured. Assuming BAR size of 256kB"
- " for GRC and 512kB for DB\n");
+ DP_INFO(p_hwfn,
+ "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n");
val = BAR_ID_0 ? 256 * 1024 : 512 * 1024;
} else {
- DP_NOTICE(p_hwfn, false,
- "BAR size not configured. Assuming BAR size of 512kB"
- " for GRC and 512kB for DB\n");
+ DP_INFO(p_hwfn,
+ "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n");
val = 512 * 1024;
}
ecore_init_clear_rt_data(p_hwfn);
/* prepare QM portion of runtime array */
- ecore_qm_init_pf(p_hwfn);
+ ecore_qm_init_pf(p_hwfn, p_ptt);
/* activate init tool on runtime array */
rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
ecore_int_setup(p_hwfn, p_hwfn->p_main_ptt);
ecore_l2_setup(p_hwfn);
- ecore_iov_setup(p_hwfn, p_hwfn->p_main_ptt);
+ ecore_iov_setup(p_hwfn);
}
}
ecore_init_cau_rt_data(p_dev);
/* Program GTT windows */
- ecore_gtt_init(p_hwfn);
+ ecore_gtt_init(p_hwfn, p_ptt);
#ifndef ASIC_ONLY
if (CHIP_REV_IS_EMUL(p_dev)) {
- rc = ecore_hw_init_chip(p_hwfn, p_hwfn->p_main_ptt);
+ rc = ecore_hw_init_chip(p_hwfn, p_ptt);
if (rc != ECORE_SUCCESS)
return rc;
}
enum _ecore_status_t rc = ECORE_SUCCESS;
u8 cond;
- db_bar_size = ecore_hw_bar_size(p_hwfn, BAR_ID_1);
+ db_bar_size = ecore_hw_bar_size(p_hwfn, p_ptt, BAR_ID_1);
if (p_hwfn->p_dev->num_hwfns > 1)
db_bar_size /= 2;
/* Update rate limit once we'll actually have a link */
p_hwfn->qm_info.pf_rl = 100000;
}
- ecore_cxt_hw_init_pf(p_hwfn);
+ ecore_cxt_hw_init_pf(p_hwfn, p_ptt);
ecore_int_igu_init_rt(p_hwfn);
return rc;
/* send function start command */
- rc = ecore_sp_pf_start(p_hwfn, p_tunn, p_hwfn->p_dev->mf_mode,
+ rc = ecore_sp_pf_start(p_hwfn, p_ptt, p_tunn,
+ p_hwfn->p_dev->mf_mode,
allow_npar_tx_switch);
if (rc) {
DP_NOTICE(p_hwfn, true,
return rc2;
}
-void ecore_hw_stop_fastpath(struct ecore_dev *p_dev)
+enum _ecore_status_t ecore_hw_stop_fastpath(struct ecore_dev *p_dev)
{
int j;
for_each_hwfn(p_dev, j) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
- struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
+ struct ecore_ptt *p_ptt;
if (IS_VF(p_dev)) {
ecore_vf_pf_int_cleanup(p_hwfn);
continue;
}
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_AGAIN;
DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN,
"Shutting down the fastpath\n");
ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
/* Need to wait 1ms to guarantee SBs are cleared */
OSAL_MSLEEP(1);
+ ecore_ptt_release(p_hwfn, p_ptt);
}
+
+ return ECORE_SUCCESS;
}
-void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn)
+enum _ecore_status_t ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn)
{
- struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
+ struct ecore_ptt *p_ptt;
if (IS_VF(p_hwfn->p_dev))
- return;
+ return ECORE_SUCCESS;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_AGAIN;
/* If roce info is allocated it means roce is initialized and should
* be enabled in searcher.
}
/* Re-open incoming traffic */
- ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ ecore_wr(p_hwfn, p_ptt,
NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return ECORE_SUCCESS;
}
/* Free hwfn memory and resources acquired in hw_hwfn_prepare */
static enum _ecore_status_t
__ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn,
- enum ecore_resources res_id, u32 resc_max_val,
+ struct ecore_ptt *p_ptt,
+ enum ecore_resources res_id,
+ u32 resc_max_val,
u32 *p_mcp_resp)
{
enum _ecore_status_t rc;
- rc = ecore_mcp_set_resc_max_val(p_hwfn, p_hwfn->p_main_ptt, res_id,
+ rc = ecore_mcp_set_resc_max_val(p_hwfn, p_ptt, res_id,
resc_max_val, p_mcp_resp);
if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true,
}
static enum _ecore_status_t
-ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn)
+ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
{
bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
u32 resc_max_val, mcp_resp;
continue;
}
- rc = __ecore_hw_set_soft_resc_size(p_hwfn, res_id,
+ rc = __ecore_hw_set_soft_resc_size(p_hwfn, p_ptt, res_id,
resc_max_val, &mcp_resp);
if (rc != ECORE_SUCCESS)
return rc;
#define ECORE_RESC_ALLOC_LOCK_RETRY_INTVL_US 10000 /* 10 msec */
static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
bool drv_resc_alloc)
{
struct ecore_resc_unlock_params resc_unlock_params;
OSAL_MEM_ZERO(&resc_unlock_params, sizeof(resc_unlock_params));
resc_unlock_params.resource = ECORE_RESC_LOCK_RESC_ALLOC;
- rc = ecore_mcp_resc_lock(p_hwfn, p_hwfn->p_main_ptt, &resc_lock_params);
+ rc = ecore_mcp_resc_lock(p_hwfn, p_ptt, &resc_lock_params);
if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) {
return rc;
} else if (rc == ECORE_NOTIMPL) {
rc = ECORE_BUSY;
goto unlock_and_exit;
} else {
- rc = ecore_hw_set_soft_resc_size(p_hwfn);
+ rc = ecore_hw_set_soft_resc_size(p_hwfn, p_ptt);
if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) {
DP_NOTICE(p_hwfn, false,
"Failed to set the max values of the soft resources\n");
} else if (rc == ECORE_NOTIMPL) {
DP_INFO(p_hwfn,
"Skip the max values setting of the soft resources since it is not supported by the MFW\n");
- rc = ecore_mcp_resc_unlock(p_hwfn, p_hwfn->p_main_ptt,
+ rc = ecore_mcp_resc_unlock(p_hwfn, p_ptt,
&resc_unlock_params);
if (rc != ECORE_SUCCESS)
DP_INFO(p_hwfn,
goto unlock_and_exit;
if (resc_lock_params.b_granted && !resc_unlock_params.b_released) {
- rc = ecore_mcp_resc_unlock(p_hwfn, p_hwfn->p_main_ptt,
+ rc = ecore_mcp_resc_unlock(p_hwfn, p_ptt,
&resc_unlock_params);
if (rc != ECORE_SUCCESS)
DP_INFO(p_hwfn,
}
/* This will also learn the number of SBs from MFW */
- if (ecore_int_igu_reset_cam(p_hwfn, p_hwfn->p_main_ptt))
+ if (ecore_int_igu_reset_cam(p_hwfn, p_ptt))
return ECORE_INVAL;
ecore_hw_set_feat(p_hwfn);
return ECORE_SUCCESS;
unlock_and_exit:
- ecore_mcp_resc_unlock(p_hwfn, p_hwfn->p_main_ptt, &resc_unlock_params);
+ if (resc_lock_params.b_granted && !resc_unlock_params.b_released)
+ ecore_mcp_resc_unlock(p_hwfn, p_ptt,
+ &resc_unlock_params);
return rc;
}
* the resources/features depends on them.
* This order is not harmful if not forcing.
*/
- rc = ecore_hw_get_resc(p_hwfn, drv_resc_alloc);
+ rc = ecore_hw_get_resc(p_hwfn, p_ptt, drv_resc_alloc);
if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) {
rc = ECORE_SUCCESS;
p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_MCP;
return rc;
}
-static enum _ecore_status_t ecore_get_dev_info(struct ecore_dev *p_dev)
+static enum _ecore_status_t ecore_get_dev_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
{
- struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
u16 device_id_mask;
u32 tmp;
return ECORE_ABORTED;
}
- p_dev->chip_num = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
- MISCS_REG_CHIP_NUM);
- p_dev->chip_rev = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
- MISCS_REG_CHIP_REV);
+ p_dev->chip_num = (u16)ecore_rd(p_hwfn, p_ptt,
+ MISCS_REG_CHIP_NUM);
+ p_dev->chip_rev = (u16)ecore_rd(p_hwfn, p_ptt,
+ MISCS_REG_CHIP_REV);
MASK_FIELD(CHIP_REV, p_dev->chip_rev);
/* Learn number of HW-functions */
- tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
- MISCS_REG_CMT_ENABLED_FOR_PAIR);
+ tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CMT_ENABLED_FOR_PAIR);
if (tmp & (1 << p_hwfn->rel_pf_id)) {
DP_NOTICE(p_dev->hwfns, false, "device in CMT mode\n");
}
#endif
- p_dev->chip_bond_id = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
+ p_dev->chip_bond_id = ecore_rd(p_hwfn, p_ptt,
MISCS_REG_CHIP_TEST_REG) >> 4;
MASK_FIELD(CHIP_BOND_ID, p_dev->chip_bond_id);
- p_dev->chip_metal = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
+ p_dev->chip_metal = (u16)ecore_rd(p_hwfn, p_ptt,
MISCS_REG_CHIP_METAL);
MASK_FIELD(CHIP_METAL, p_dev->chip_metal);
DP_INFO(p_dev->hwfns,
}
#ifndef ASIC_ONLY
if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_AH(p_dev))
- ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
- MISCS_REG_PLL_MAIN_CTRL_4, 0x1);
+ ecore_wr(p_hwfn, p_ptt, MISCS_REG_PLL_MAIN_CTRL_4, 0x1);
if (CHIP_REV_IS_EMUL(p_dev)) {
- tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
- MISCS_REG_ECO_RESERVED);
+ tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED);
if (tmp & (1 << 29)) {
DP_NOTICE(p_hwfn, false,
"Emulation: Running on a FULL build\n");
/* First hwfn learns basic information, e.g., number of hwfns */
if (!p_hwfn->my_id) {
- rc = ecore_get_dev_info(p_dev);
+ rc = ecore_get_dev_info(p_hwfn, p_hwfn->p_main_ptt);
if (rc != ECORE_SUCCESS) {
if (p_params->b_relaxed_probe)
p_params->p_relaxed_res =
/* adjust bar offset for second engine */
addr = (u8 OSAL_IOMEM *)p_dev->regview +
- ecore_hw_bar_size(p_hwfn, BAR_ID_0) / 2;
+ ecore_hw_bar_size(p_hwfn,
+ p_hwfn->p_main_ptt,
+ BAR_ID_0) / 2;
p_regview = (void OSAL_IOMEM *)addr;
addr = (u8 OSAL_IOMEM *)p_dev->doorbells +
- ecore_hw_bar_size(p_hwfn, BAR_ID_1) / 2;
+ ecore_hw_bar_size(p_hwfn,
+ p_hwfn->p_main_ptt,
+ BAR_ID_1) / 2;
p_doorbell = (void OSAL_IOMEM *)addr;
/* prepare second hw function */
*
* @param p_dev
*
+ * @return enum _ecore_status_t
*/
-void ecore_hw_stop_fastpath(struct ecore_dev *p_dev);
+enum _ecore_status_t ecore_hw_stop_fastpath(struct ecore_dev *p_dev);
#ifndef LINUX_REMOVE
/**
* @brief ecore_hw_start_fastpath -restart fastpath traffic,
* only if hw_stop_fastpath was called
- * @param p_dev
+ * @param p_hwfn
*
+ * @return enum _ecore_status_t
*/
-void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn);
+enum _ecore_status_t ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn);
enum ecore_hw_prepare_result {
ECORE_HW_PREPARE_SUCCESS,
* @brief ecore_gtt_init - Initialize GTT windows
*
* @param p_hwfn
+* @param p_ptt
*/
-void ecore_gtt_init(struct ecore_hwfn *p_hwfn);
+void ecore_gtt_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
/**
* @brief ecore_ptt_invalidate - Forces all ptt entries to be re-configured
return rc;
}
-void ecore_gtt_init(struct ecore_hwfn *p_hwfn)
+void ecore_gtt_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
{
u32 gtt_base;
u32 i;
/* initialize PTT/GTT (poll for completion) */
if (!initialized) {
- ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ ecore_wr(p_hwfn, p_ptt,
PGLUE_B_REG_START_INIT_PTT_GTT, 1);
initialized = true;
}
/* ptt might be overrided by HW until this is done */
OSAL_UDELAY(10);
ecore_ptt_invalidate(p_hwfn);
- val = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
+ val = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_INIT_DONE_PTT_GTT);
} while ((val != 1) && --poll_cnt);
*
* @param p_hwfn
*/
-void ecore_gtt_init(struct ecore_hwfn *p_hwfn);
+void ecore_gtt_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
#endif /* __ECORE_INIT_OPS__ */
tmp2 = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1);
- DP_INFO(p_hwfn->p_dev,
- "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s]"
- " [PF: %02x %s %02x]\n",
- tmp2, tmp,
- (tmp & ECORE_GRC_ATTENTION_RDWR_BIT) ? "Write to" : "Read from",
- (tmp & ECORE_GRC_ATTENTION_ADDRESS_MASK) << 2,
- grc_timeout_attn_master_to_str((tmp &
- ECORE_GRC_ATTENTION_MASTER_MASK) >>
- ECORE_GRC_ATTENTION_MASTER_SHIFT),
- (tmp2 & ECORE_GRC_ATTENTION_PF_MASK),
- (((tmp2 & ECORE_GRC_ATTENTION_PRIV_MASK) >>
+ DP_NOTICE(p_hwfn->p_dev, false,
+ "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n",
+ tmp2, tmp,
+ (tmp & ECORE_GRC_ATTENTION_RDWR_BIT) ? "Write to"
+ : "Read from",
+ (tmp & ECORE_GRC_ATTENTION_ADDRESS_MASK) << 2,
+ grc_timeout_attn_master_to_str(
+ (tmp & ECORE_GRC_ATTENTION_MASTER_MASK) >>
+ ECORE_GRC_ATTENTION_MASTER_SHIFT),
+ (tmp2 & ECORE_GRC_ATTENTION_PF_MASK),
+ (((tmp2 & ECORE_GRC_ATTENTION_PRIV_MASK) >>
ECORE_GRC_ATTENTION_PRIV_SHIFT) ==
- ECORE_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant:)",
- (tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >>
- ECORE_GRC_ATTENTION_VF_SHIFT);
+ ECORE_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant:)",
+ (tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >>
+ ECORE_GRC_ATTENTION_VF_SHIFT);
out:
/* Regardles of anything else, clean the validity bit */
if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
ecore_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
- OSAL_LINK_UPDATE(p_hwfn);
+ OSAL_LINK_UPDATE(p_hwfn, p_ptt);
}
enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
-enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
+enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
u32 *p_media_type)
{
- struct ecore_hwfn *p_hwfn = &p_dev->hwfns[0];
- struct ecore_ptt *p_ptt;
/* TODO - Add support for VFs */
- if (IS_VF(p_dev))
+ if (IS_VF(p_hwfn->p_dev))
return ECORE_INVAL;
if (!ecore_mcp_is_init(p_hwfn)) {
return ECORE_BUSY;
}
- *p_media_type = MEDIA_UNSPECIFIED;
-
- p_ptt = ecore_ptt_acquire(p_hwfn);
- if (!p_ptt)
- return ECORE_BUSY;
-
- *p_media_type = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
- OFFSETOF(struct public_port, media_type));
-
- ecore_ptt_release(p_hwfn, p_ptt);
+ if (!p_ptt) {
+ *p_media_type = MEDIA_UNSPECIFIED;
+ return ECORE_INVAL;
+ } else {
+ *p_media_type = ecore_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->port_addr +
+ OFFSETOF(struct public_port,
+ media_type));
+ }
return ECORE_SUCCESS;
}
* @brief Get media type value of the port.
*
* @param p_dev - ecore dev pointer
+ * @param p_ptt
* @param mfw_ver - media type value
*
* @return enum _ecore_status_t -
* ECORE_SUCCESS - Operation was successful.
* ECORE_BUSY - Operation failed
*/
-enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
- u32 *media_type);
+enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *media_type);
/**
* @brief - Sends a command to the MCP mailbox.
* for a physical function (PF).
*
* @param p_hwfn
+ * @param p_ptt
* @param p_tunn - pf update tunneling parameters
* @param comp_mode - completion mode
* @param p_comp_data - callback function
enum _ecore_status_t
ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
struct ecore_tunnel_info *p_tunn,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_data);
}
static void ecore_set_hw_tunn_mode_port(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
struct ecore_tunnel_info *p_tunn)
{
if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
}
if (p_tunn->vxlan_port.b_update_port)
- ecore_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+ ecore_set_vxlan_dest_port(p_hwfn, p_ptt,
p_tunn->vxlan_port.port);
if (p_tunn->geneve_port.b_update_port)
- ecore_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+ ecore_set_geneve_dest_port(p_hwfn, p_ptt,
p_tunn->geneve_port.port);
- ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn);
+ ecore_set_hw_tunn_mode(p_hwfn, p_ptt, p_tunn);
}
static void
}
enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
struct ecore_tunnel_info *p_tunn,
enum ecore_mf_mode mode,
bool allow_npar_tx_switch)
rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
if (p_tunn)
- ecore_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->p_dev->tunnel);
+ ecore_set_hw_tunn_mode_port(p_hwfn, p_ptt,
+ &p_hwfn->p_dev->tunnel);
return rc;
}
/* Set pf update ramrod command params */
enum _ecore_status_t
ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
struct ecore_tunnel_info *p_tunn,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_data)
if (rc != ECORE_SUCCESS)
return rc;
- ecore_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->p_dev->tunnel);
+ ecore_set_hw_tunn_mode_port(p_hwfn, p_ptt, &p_hwfn->p_dev->tunnel);
return rc;
}
* to the internal RAM of the UStorm by the Function Start Ramrod.
*
* @param p_hwfn
+ * @param p_ptt
* @param p_tunn - pf start tunneling configuration
* @param mode
* @param allow_npar_tx_switch - npar tx switching to be used
*/
enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
struct ecore_tunnel_info *p_tunn,
enum ecore_mf_mode mode,
bool allow_npar_tx_switch);
u8 *p_fw_ret, bool skip_quick_poll)
{
struct ecore_spq_comp_done *comp_done;
+ struct ecore_ptt *p_ptt;
enum _ecore_status_t rc;
/* A relatively short polling period w/o sleeping, to allow the FW to
if (rc == ECORE_SUCCESS)
return ECORE_SUCCESS;
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_AGAIN;
+
DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
- rc = ecore_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
+ rc = ecore_mcp_drain(p_hwfn, p_ptt);
+ ecore_ptt_release(p_hwfn, p_ptt);
if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true, "MCP drain failed\n");
goto err;
return ecore_iov_allocate_vfdb(p_hwfn);
}
-void ecore_iov_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+void ecore_iov_setup(struct ecore_hwfn *p_hwfn)
{
if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
return;
if (b_update_required) {
u16 geneve_port;
- rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
+ rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
ECORE_SPQ_MODE_EBLOCK,
OSAL_NULL);
if (rc != ECORE_SUCCESS)
* @brief ecore_iov_setup - setup sriov related resources
*
* @param p_hwfn
- * @param p_ptt
*/
-void ecore_iov_setup(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt);
+void ecore_iov_setup(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_iov_free - free sriov related resources
QEDE_VXLAN_DEF_PORT;
for_each_hwfn(edev, i) {
p_hwfn = &edev->hwfns[i];
- rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
+ struct ecore_ptt *p_ptt = IS_PF(edev) ?
+ ecore_ptt_acquire(p_hwfn) : NULL;
+ rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
ECORE_SPQ_MODE_CB, NULL);
if (rc != ECORE_SUCCESS) {
DP_ERR(edev, "Unable to config UDP port %u\n",
tunn.vxlan_port.port);
+ if (IS_PF(edev))
+ ecore_ptt_release(p_hwfn, p_ptt);
return rc;
}
}
qede_set_cmn_tunn_param(&tunn, clss, true, true);
for_each_hwfn(edev, i) {
p_hwfn = &edev->hwfns[i];
- rc = ecore_sp_pf_update_tunn_cfg(p_hwfn,
+ struct ecore_ptt *p_ptt = IS_PF(edev) ?
+ ecore_ptt_acquire(p_hwfn) : NULL;
+ rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
&tunn, ECORE_SPQ_MODE_CB, NULL);
if (rc != ECORE_SUCCESS) {
DP_ERR(edev, "Failed to update tunn_clss %u\n",
tunn.vxlan.tun_cls);
+ if (IS_PF(edev))
+ ecore_ptt_release(p_hwfn, p_ptt);
}
}
qdev->num_tunn_filters++; /* Filter added successfully */
qede_set_cmn_tunn_param(&tunn, clss, false, true);
for_each_hwfn(edev, i) {
p_hwfn = &edev->hwfns[i];
- rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
- ECORE_SPQ_MODE_CB, NULL);
+ struct ecore_ptt *p_ptt = IS_PF(edev) ?
+ ecore_ptt_acquire(p_hwfn) : NULL;
+ rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
+ &tunn, ECORE_SPQ_MODE_CB, NULL);
if (rc != ECORE_SUCCESS) {
DP_ERR(edev,
"Failed to update tunn_clss %u\n",
tunn.vxlan.tun_cls);
+ if (IS_PF(edev))
+ ecore_ptt_release(p_hwfn,
+ p_ptt);
break;
}
}
rte_memcpy(hwfn->hw_info.hw_mac_addr, mac, ETH_ALEN);
/* Always update link configuration according to bulletin */
- qed_link_update(hwfn);
+ qed_link_update(hwfn, NULL);
}
static void qede_vf_task(void *arg)
}
static void qed_fill_link(struct ecore_hwfn *hwfn,
+ __rte_unused struct ecore_ptt *ptt,
struct qed_link_output *if_link)
{
struct ecore_mcp_link_params params;
static void
qed_get_current_link(struct ecore_dev *edev, struct qed_link_output *if_link)
{
- qed_fill_link(&edev->hwfns[0], if_link);
+ struct ecore_hwfn *hwfn;
+ struct ecore_ptt *ptt;
-#ifdef CONFIG_QED_SRIOV
- for_each_hwfn(cdev, i)
- qed_inform_vf_link_state(&cdev->hwfns[i]);
-#endif
+ hwfn = &edev->hwfns[0];
+ if (IS_PF(edev)) {
+ ptt = ecore_ptt_acquire(hwfn);
+ if (!ptt)
+ DP_NOTICE(hwfn, true, "Failed to fill link; No PTT\n");
+
+ qed_fill_link(hwfn, ptt, if_link);
+
+ if (ptt)
+ ecore_ptt_release(hwfn, ptt);
+ } else {
+ qed_fill_link(hwfn, NULL, if_link);
+ }
}
static int qed_set_link(struct ecore_dev *edev, struct qed_link_params *params)
return rc;
}
-void qed_link_update(struct ecore_hwfn *hwfn)
+void qed_link_update(struct ecore_hwfn *hwfn, struct ecore_ptt *ptt)
{
struct qed_link_output if_link;
- qed_fill_link(hwfn, &if_link);
+ qed_fill_link(hwfn, ptt, &if_link);
}
static int qed_drain(struct ecore_dev *edev)