#define OSAL_PAGE_SIZE 4096
#define OSAL_CACHE_LINE_SIZE RTE_CACHE_LINE_SIZE
#define OSAL_IOMEM volatile
+#define OSAL_UNUSED __attribute__((unused))
#define OSAL_UNLIKELY(x) __builtin_expect(!!(x), 0)
#define OSAL_MIN_T(type, __min1, __min2) \
((type)(__min1) < (type)(__min2) ? (type)(__min1) : (type)(__min2))
*
* @return OSAL_INLINE u8
*/
-static OSAL_INLINE u8
-ecore_concrete_to_sw_fid(__rte_unused struct ecore_dev *p_dev, u32 concrete_fid)
+static OSAL_INLINE u8 ecore_concrete_to_sw_fid(u32 concrete_fid)
{
u8 vfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID);
u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
u32 per_vf_tids;
};
-static OSAL_INLINE void ecore_cxt_tm_iids(struct ecore_cxt_mngr *p_mngr,
- struct ecore_tm_iids *iids)
+static void ecore_cxt_tm_iids(struct ecore_cxt_mngr *p_mngr,
+ struct ecore_tm_iids *iids)
{
bool tm_vf_required = false;
bool tm_required = false;
p_blk = &p_cli->pf_blks[0];
ecore_cxt_qm_iids(p_hwfn, &qm_iids);
- total = ecore_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids,
+ total = ecore_qm_pf_mem_size(qm_iids.cids,
qm_iids.vf_cids, qm_iids.tids,
p_hwfn->qm_info.num_pqs,
p_hwfn->qm_info.num_vf_pqs);
ecore_qm_pf_rt_init(p_hwfn, p_ptt, p_hwfn->port_id,
p_hwfn->rel_pf_id, qm_info->max_phys_tcs_per_port,
- p_hwfn->first_on_engine,
iids.cids, iids.vf_cids, iids.tids,
qm_info->start_pq,
qm_info->num_pqs - qm_info->num_vf_pqs,
static void
ecore_dcbx_get_local_params(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
struct ecore_dcbx_get *params)
{
struct dcbx_features *p_feat;
static void
ecore_dcbx_get_remote_params(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
struct ecore_dcbx_get *params)
{
struct dcbx_features *p_feat;
static enum _ecore_status_t
ecore_dcbx_get_operational_params(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
struct ecore_dcbx_get *params)
{
struct ecore_dcbx_operational_params *p_operational;
return ECORE_SUCCESS;
}
-static void
-ecore_dcbx_get_dscp_params(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct ecore_dcbx_get *params)
+static void ecore_dcbx_get_dscp_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_dcbx_get *params)
{
struct ecore_dcbx_dscp_params *p_dscp;
struct dcb_dscp_map *p_dscp_map;
}
}
-static void
-ecore_dcbx_get_local_lldp_params(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct ecore_dcbx_get *params)
+static void ecore_dcbx_get_local_lldp_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_dcbx_get *params)
{
struct lldp_config_params_s *p_local;
OSAL_ARRAY_SIZE(p_local->local_port_id));
}
-static void
-ecore_dcbx_get_remote_lldp_params(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct ecore_dcbx_get *params)
+static void ecore_dcbx_get_remote_lldp_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_dcbx_get *params)
{
struct lldp_status_params_s *p_remote;
}
static enum _ecore_status_t
-ecore_dcbx_get_params(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ecore_dcbx_get_params(struct ecore_hwfn *p_hwfn,
struct ecore_dcbx_get *p_params,
enum ecore_mib_read_type type)
{
- enum _ecore_status_t rc = ECORE_SUCCESS;
-
switch (type) {
case ECORE_DCBX_REMOTE_MIB:
- ecore_dcbx_get_remote_params(p_hwfn, p_ptt, p_params);
+ ecore_dcbx_get_remote_params(p_hwfn, p_params);
break;
case ECORE_DCBX_LOCAL_MIB:
- ecore_dcbx_get_local_params(p_hwfn, p_ptt, p_params);
+ ecore_dcbx_get_local_params(p_hwfn, p_params);
break;
case ECORE_DCBX_OPERATIONAL_MIB:
- ecore_dcbx_get_operational_params(p_hwfn, p_ptt, p_params);
+ ecore_dcbx_get_operational_params(p_hwfn, p_params);
break;
case ECORE_DCBX_REMOTE_LLDP_MIB:
- ecore_dcbx_get_remote_lldp_params(p_hwfn, p_ptt, p_params);
+ ecore_dcbx_get_remote_lldp_params(p_hwfn, p_params);
break;
case ECORE_DCBX_LOCAL_LLDP_MIB:
- ecore_dcbx_get_local_lldp_params(p_hwfn, p_ptt, p_params);
+ ecore_dcbx_get_local_lldp_params(p_hwfn, p_params);
break;
default:
DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type);
return ECORE_INVAL;
}
- return rc;
+ return ECORE_SUCCESS;
}
static enum _ecore_status_t
return rc;
if (type == ECORE_DCBX_OPERATIONAL_MIB) {
- ecore_dcbx_get_dscp_params(p_hwfn, p_ptt,
- &p_hwfn->p_dcbx_info->get);
+ ecore_dcbx_get_dscp_params(p_hwfn, &p_hwfn->p_dcbx_info->get);
rc = ecore_dcbx_process_mib_info(p_hwfn);
if (!rc) {
enabled = p_hwfn->p_dcbx_info->results.dcbx_enabled;
}
}
- ecore_dcbx_get_params(p_hwfn, p_ptt, &p_hwfn->p_dcbx_info->get, type);
+
+ ecore_dcbx_get_params(p_hwfn, &p_hwfn->p_dcbx_info->get, type);
/* Update the DSCP to TC mapping bit if required */
if ((type == ECORE_DCBX_OPERATIONAL_MIB) &&
if (rc != ECORE_SUCCESS)
goto out;
- rc = ecore_dcbx_get_params(p_hwfn, p_ptt, p_get, type);
+ rc = ecore_dcbx_get_params(p_hwfn, p_get, type);
out:
ecore_ptt_release(p_hwfn, p_ptt);
* Old drivers that don't acquire the lock can run in parallel, and
* their allocation values won't be affected by the updated max values.
*/
- ecore_mcp_resc_lock_default_init(p_hwfn, &resc_lock_params,
- &resc_unlock_params,
+ ecore_mcp_resc_lock_default_init(&resc_lock_params, &resc_unlock_params,
ECORE_RESC_LOCK_RESC_ALLOC, false);
rc = ecore_mcp_resc_lock(p_hwfn, p_ptt, &resc_lock_params);
}
}
-static void
-ecore_init_wfq_default_param(struct ecore_hwfn *p_hwfn, u32 min_pf_rate)
+static void ecore_init_wfq_default_param(struct ecore_hwfn *p_hwfn)
{
int i;
}
static void ecore_disable_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u32 min_pf_rate)
+ struct ecore_ptt *p_ptt)
{
struct init_qm_vport_params *vport_params;
int i;
vport_params = p_hwfn->qm_info.qm_vport_params;
for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
- ecore_init_wfq_default_param(p_hwfn, min_pf_rate);
+ ecore_init_wfq_default_param(p_hwfn);
ecore_init_vport_wfq(p_hwfn, p_ptt,
vport_params[i].first_tx_pq_id,
vport_params[i].vport_wfq);
if (rc == ECORE_SUCCESS && use_wfq)
ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
else
- ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
+ ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt);
return rc;
}
p_link = &p_hwfn->mcp_info->link_output;
if (p_link->min_pf_rate)
- ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt,
- p_link->min_pf_rate);
+ ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt);
OSAL_MEMSET(p_hwfn->qm_info.wfq_data, 0,
sizeof(*p_hwfn->qm_info.wfq_data) *
OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
}
-u32 ecore_ptt_get_hw_addr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+u32 ecore_ptt_get_hw_addr(struct ecore_ptt *p_ptt)
{
/* The HW is using DWORDS and we need to translate it to Bytes */
return OSAL_LE32_TO_CPU(p_ptt->pxp.offset) << 2;
{
u32 prev_hw_addr;
- prev_hw_addr = ecore_ptt_get_hw_addr(p_hwfn, p_ptt);
+ prev_hw_addr = ecore_ptt_get_hw_addr(p_ptt);
if (new_hw_addr == prev_hw_addr)
return;
static u32 ecore_set_ptt(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u32 hw_addr)
{
- u32 win_hw_addr = ecore_ptt_get_hw_addr(p_hwfn, p_ptt);
+ u32 win_hw_addr = ecore_ptt_get_hw_addr(p_ptt);
u32 offset;
offset = hw_addr - win_hw_addr;
/**
* @brief ecore_ptt_get_hw_addr - Get PTT's GRC/HW address
*
- * @param p_hwfn
* @param p_ptt
*
* @return u32
*/
-u32 ecore_ptt_get_hw_addr(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt);
+u32 ecore_ptt_get_hw_addr(struct ecore_ptt *p_ptt);
/**
* @brief ecore_ptt_get_bar_addr - Get PPT's external BAR address
u8 port_id,
u8 pf_id,
u8 max_phys_tcs_per_port,
- bool is_first_pf,
u32 num_pf_cids,
u32 num_vf_cids,
u16 start_pq,
/* Prepare Other PQ mapping runtime init values for the specified PF */
static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
- u8 port_id,
u8 pf_id,
u32 num_pf_cids,
- u32 num_tids, u32 base_mem_addr_4kb)
+ u32 num_tids,
+ u32 base_mem_addr_4kb)
{
u32 pq_size, pq_mem_4kb, mem_addr_4kb;
u16 i, pq_id, pq_group;
/******************** INTERFACE IMPLEMENTATION *********************/
-u32 ecore_qm_pf_mem_size(u8 pf_id,
- u32 num_pf_cids,
+u32 ecore_qm_pf_mem_size(u32 num_pf_cids,
u32 num_vf_cids,
- u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs)
+ u32 num_tids,
+ u16 num_pf_pqs,
+ u16 num_vf_pqs)
{
return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
u8 port_id,
u8 pf_id,
u8 max_phys_tcs_per_port,
- bool is_first_pf,
u32 num_pf_cids,
u32 num_vf_cids,
u32 num_tids,
/* Map Other PQs (if any) */
#if QM_OTHER_PQS_PER_PF > 0
- ecore_other_pq_map_rt_init(p_hwfn, port_id, pf_id, num_pf_cids,
- num_tids, 0);
+ ecore_other_pq_map_rt_init(p_hwfn, pf_id, num_pf_cids, num_tids, 0);
#endif
/* Map Tx PQs */
ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, port_id, pf_id,
- max_phys_tcs_per_port, is_first_pf, num_pf_cids,
- num_vf_cids, start_pq, num_pf_pqs, num_vf_pqs,
- start_vport, other_mem_size_4kb, pq_params,
- vport_params);
+ max_phys_tcs_per_port, num_pf_cids, num_vf_cids,
+ start_pq, num_pf_pqs, num_vf_pqs, start_vport,
+ other_mem_size_4kb, pq_params, vport_params);
/* Init PF WFQ */
if (pf_wfq)
}
}
-/* In MF should be called once per engine to set EtherType of OuterTag */
-void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u32 ethType)
-{
- /* Update PRS register */
- STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
-
- /* Update NIG register */
- STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
-
- /* Update PBF register */
- STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
-}
-
/* In MF should be called once per port to set EtherType of OuterTag */
-void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u32 ethType)
+void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType)
{
/* Update DORQ register */
STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, ethType);
/* Calculate and return CDU validation byte per connection type / region /
* cid
*/
-static u8 ecore_calc_cdu_validation_byte(struct ecore_hwfn *p_hwfn,
- u8 conn_type,
- u8 region, u32 cid)
+static u8 ecore_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)
{
const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
}
/* Calcualte and set validation bytes for session context */
-void ecore_calc_session_ctx_validation(struct ecore_hwfn *p_hwfn,
- void *p_ctx_mem,
- u16 ctx_size, u8 ctx_type, u32 cid)
+void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
+ u8 ctx_type, u32 cid)
{
u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
OSAL_MEMSET(p_ctx, 0, ctx_size);
- *x_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 3, cid);
- *t_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 4, cid);
- *u_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 5, cid);
+ *x_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 3, cid);
+ *t_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 4, cid);
+ *u_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 5, cid);
}
/* Calcualte and set validation bytes for task context */
-void ecore_calc_task_ctx_validation(struct ecore_hwfn *p_hwfn, void *p_ctx_mem,
- u16 ctx_size, u8 ctx_type, u32 tid)
+void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type,
+ u32 tid)
{
u8 *p_ctx, *region1_val_ptr;
OSAL_MEMSET(p_ctx, 0, ctx_size);
- *region1_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type,
- 1, tid);
+ *region1_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 1, tid);
}
/* Memset session context to 0 while preserving validation bytes */
* Returns the required host memory size in 4KB units.
* Must be called before all QM init HSI functions.
*
- * @param pf_id - physical function ID
* @param num_pf_cids - number of connections used by this PF
* @param num_vf_cids - number of connections used by VFs of this PF
* @param num_tids - number of tasks used by this PF
*
* @return The required host memory size in 4KB units.
*/
-u32 ecore_qm_pf_mem_size(u8 pf_id,
- u32 num_pf_cids,
- u32 num_vf_cids,
- u32 num_tids,
- u16 num_pf_pqs,
- u16 num_vf_pqs);
+u32 ecore_qm_pf_mem_size(u32 num_pf_cids,
+ u32 num_vf_cids,
+ u32 num_tids,
+ u16 num_pf_pqs,
+ u16 num_vf_pqs);
/**
* @brief ecore_qm_common_rt_init - Prepare QM runtime init values for engine
* @param port_id - port ID
* @param pf_id - PF ID
* @param max_phys_tcs_per_port - max number of physical TCs per port in HW
- * @param is_first_pf - 1 = first PF in engine, 0 = othwerwise
* @param num_pf_cids - number of connections used by this PF
* @param num_vf_cids - number of connections used by VFs of this PF
* @param num_tids - number of tasks used by this PF
* @return 0 on success, -1 on error.
*/
int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u8 port_id,
- u8 pf_id,
- u8 max_phys_tcs_per_port,
- bool is_first_pf,
- u32 num_pf_cids,
- u32 num_vf_cids,
- u32 num_tids,
- u16 start_pq,
- u16 num_pf_pqs,
- u16 num_vf_pqs,
- u8 start_vport,
- u8 num_vports,
- u16 pf_wfq,
- u32 pf_rl,
- struct init_qm_pq_params *pq_params,
- struct init_qm_vport_params *vport_params);
+ struct ecore_ptt *p_ptt,
+ u8 port_id,
+ u8 pf_id,
+ u8 max_phys_tcs_per_port,
+ u32 num_pf_cids,
+ u32 num_vf_cids,
+ u32 num_tids,
+ u16 start_pq,
+ u16 num_pf_pqs,
+ u16 num_vf_pqs,
+ u8 start_vport,
+ u8 num_vports,
+ u16 pf_wfq,
+ u32 pf_rl,
+ struct init_qm_pq_params *pq_params,
+ struct init_qm_vport_params *vport_params);
/**
* @brief ecore_init_pf_wfq Initializes the WFQ weight of the specified PF
#endif /* UNUSED_HSI_FUNC */
#ifndef UNUSED_HSI_FUNC
-/**
- * @brief ecore_set_engine_mf_ovlan_eth_type - initializes Nig,Prs,Pbf and llh
- * ethType Regs to input ethType
- * should Be called once per engine
- * if engine
- * is in BD mode.
- *
- * @param p_ptt - ptt window used for writing the registers.
- * @param ethType - etherType to configure
- */
-void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u32 ethType);
-
/**
* @brief ecore_set_port_mf_ovlan_eth_type - initializes DORQ ethType Regs to
* input ethType should Be called
* once per port.
*
- * @param p_ptt - ptt window used for writing the registers.
+ * @param p_hwfn - HW device data
* @param ethType - etherType to configure
*/
-void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u32 ethType);
+void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType);
#endif /* UNUSED_HSI_FUNC */
/**
* @param ctx_type - context type.
* @param cid - context cid.
*/
-void ecore_calc_session_ctx_validation(struct ecore_hwfn *p_hwfn,
- void *p_ctx_mem,
- u16 ctx_size,
- u8 ctx_type,
- u32 cid);
+void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
+ u8 ctx_type, u32 cid);
+
/**
* @brief ecore_calc_task_ctx_validation - Calcualte validation byte for task
* context.
*
- * @param p_hwfn - HW device data
* @param p_ctx_mem - pointer to context memory.
* @param ctx_size - context size.
* @param ctx_type - context type.
* @param tid - context tid.
*/
-void ecore_calc_task_ctx_validation(struct ecore_hwfn *p_hwfn,
- void *p_ctx_mem,
- u16 ctx_size,
- u8 ctx_type,
+void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type,
u32 tid);
/**
* @brief ecore_memset_session_ctx - Memset session context to 0 while
static enum _ecore_status_t ecore_init_fill_dmae(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- u32 addr, u32 fill,
- u32 fill_count)
+ u32 addr, u32 fill_count)
{
static u32 zero_buffer[DMAE_MAX_RW_SIZE];
case INIT_SRC_ZEROS:
data = OSAL_LE32_TO_CPU(p_cmd->args.zeros_count);
if (b_must_dmae || (b_can_dmae && (data >= 64)))
- rc = ecore_init_fill_dmae(p_hwfn, p_ptt, addr, 0, data);
+ rc = ecore_init_fill_dmae(p_hwfn, p_ptt, addr, data);
else
ecore_init_fill(p_hwfn, p_ptt, addr, 0, data);
break;
OSAL_LE32_TO_CPU(cmd->op_data));
}
-/* init_ops callbacks entry point */
+/* init_ops callbacks entry point.
+ * OSAL_UNUSED is temporary used to avoid unused-parameter compilation warnings.
+ * Should be removed when the function is actually used.
+ */
static void ecore_init_cmd_cb(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct init_callback_op *p_cmd)
+ struct ecore_ptt OSAL_UNUSED * p_ptt,
+ struct init_callback_op OSAL_UNUSED * p_cmd)
{
DP_NOTICE(p_hwfn, true,
"Currently init values have no need of callbacks\n");
INIT_IF_MODE_OP_CMD_OFFSET);
}
-static u32 ecore_init_cmd_phase(struct ecore_hwfn *p_hwfn,
- struct init_if_phase_op *p_cmd,
+static u32 ecore_init_cmd_phase(struct init_if_phase_op *p_cmd,
u32 phase, u32 phase_id)
{
u32 data = OSAL_LE32_TO_CPU(p_cmd->phase_data);
modes);
break;
case INIT_OP_IF_PHASE:
- cmd_num += ecore_init_cmd_phase(p_hwfn, &cmd->if_phase,
- phase, phase_id);
+ cmd_num += ecore_init_cmd_phase(&cmd->if_phase, phase,
+ phase_id);
b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE);
break;
case INIT_OP_DELAY:
}
enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
- const u8 *data)
+#ifdef CONFIG_ECORE_BINARY_FW
+ const u8 *fw_data)
+#else
+ const u8 OSAL_UNUSED * fw_data)
+#endif
{
struct ecore_fw_data *fw = p_dev->fw_data;
struct bin_buffer_hdr *buf_hdr;
u32 offset, len;
- if (!data) {
+ if (!fw_data) {
DP_NOTICE(p_dev, true, "Invalid fw data\n");
return ECORE_INVAL;
}
- buf_hdr = (struct bin_buffer_hdr *)(uintptr_t)data;
+ buf_hdr = (struct bin_buffer_hdr *)(uintptr_t)fw_data;
offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
- fw->fw_ver_info = (struct fw_ver_info *)((uintptr_t)(data + offset));
+ fw->fw_ver_info = (struct fw_ver_info *)((uintptr_t)(fw_data + offset));
offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
- fw->init_ops = (union init_op *)((uintptr_t)(data + offset));
+ fw->init_ops = (union init_op *)((uintptr_t)(fw_data + offset));
offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
- fw->arr_data = (u32 *)((uintptr_t)(data + offset));
+ fw->arr_data = (u32 *)((uintptr_t)(fw_data + offset));
offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
- fw->modes_tree_buf = (u8 *)((uintptr_t)(data + offset));
+ fw->modes_tree_buf = (u8 *)((uintptr_t)(fw_data + offset));
len = buf_hdr[BIN_BUF_INIT_CMD].length;
fw->init_ops_size = len / sizeof(struct init_raw_op);
#else
p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype;
/* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
- p_ramrod->sw_fid = ecore_concrete_to_sw_fid(p_hwfn->p_dev,
- p_params->concrete_fid);
+ p_ramrod->sw_fid = ecore_concrete_to_sw_fid(p_params->concrete_fid);
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
}
static void
-ecore_sp_vport_update_sge_tpa(struct ecore_hwfn *p_hwfn,
- struct vport_update_ramrod_data *p_ramrod,
+ecore_sp_vport_update_sge_tpa(struct vport_update_ramrod_data *p_ramrod,
struct ecore_sge_tpa_params *p_params)
{
struct eth_vport_tpa_param *p_tpa;
}
static void
-ecore_sp_update_mcast_bin(struct ecore_hwfn *p_hwfn,
- struct vport_update_ramrod_data *p_ramrod,
+ecore_sp_update_mcast_bin(struct vport_update_ramrod_data *p_ramrod,
struct ecore_sp_vport_update_params *p_params)
{
int i;
}
/* Update mcast bins for VFs, PF doesn't use this functionality */
- ecore_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
+ ecore_sp_update_mcast_bin(p_ramrod, p_params);
ecore_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
- ecore_sp_vport_update_sge_tpa(p_hwfn, p_ramrod,
- p_params->sge_tpa_params);
+ ecore_sp_vport_update_sge_tpa(p_ramrod, p_params->sge_tpa_params);
if (p_params->mtu) {
p_ramrod->common.update_mtu_flg = 1;
p_ramrod->common.mtu = OSAL_CPU_TO_LE16(p_params->mtu);
* Note: crc32_length MUST be aligned to 8
* Return:
******************************************************************************/
-static u32 ecore_calc_crc32c(u8 *crc32_packet,
- u32 crc32_length, u32 crc32_seed, u8 complement)
+static u32 ecore_calc_crc32c(u8 *crc32_packet, u32 crc32_length, u32 crc32_seed)
{
u32 byte = 0, bit = 0, crc32_result = crc32_seed;
u8 msb = 0, current_byte = 0;
return crc32_result;
}
-static u32 ecore_crc32c_le(u32 seed, u8 *mac, u32 len)
+static u32 ecore_crc32c_le(u32 seed, u8 *mac)
{
u32 packet_buf[2] = { 0 };
OSAL_MEMCPY((u8 *)(&packet_buf[0]), &mac[0], 6);
- return ecore_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
+ return ecore_calc_crc32c((u8 *)packet_buf, 8, seed);
}
u8 ecore_mcast_bin_from_mac(u8 *mac)
{
- u32 crc = ecore_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
- mac, ETH_ALEN);
+ u32 crc = ecore_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED, mac);
return crc & 0xff;
}
static enum _ecore_status_t
ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn,
- u16 opaque_fid,
struct ecore_filter_mcast *p_filter_cmd,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_data)
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
- u16 opaque_fid;
if (IS_VF(p_dev)) {
ecore_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
continue;
}
- opaque_fid = p_hwfn->hw_info.opaque_fid;
rc = ecore_sp_eth_filter_mcast(p_hwfn,
- opaque_fid,
p_filter_cmd,
comp_mode, p_comp_data);
if (rc != ECORE_SUCCESS)
static void __ecore_get_vport_tstats(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- struct ecore_eth_stats *p_stats,
- u16 statistics_bin)
+ struct ecore_eth_stats *p_stats)
{
struct tstorm_per_port_stat tstats;
u32 tstats_addr, tstats_len;
{
__ecore_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
__ecore_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
- __ecore_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
+ __ecore_get_vport_tstats(p_hwfn, p_ptt, stats);
__ecore_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
#ifndef ASIC_ONLY
enum _ecore_status_t
ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
struct ecore_spq_comp_cb *p_cb,
dma_addr_t p_addr, u16 length,
u16 qid, u8 vport_id,
u16 pq_id);
u8 ecore_mcast_bin_from_mac(u8 *mac);
-
-/**
- * @brief - ecore_configure_rfs_ntuple_filter
- *
- * This ramrod should be used to add or remove arfs hw filter
- *
- * @params p_hwfn
- * @params p_ptt
- * @params p_cb Used for ECORE_SPQ_MODE_CB,where client would initialize
- it with cookie and callback function address, if not
- using this mode then client must pass NULL.
- * @params p_addr p_addr is an actual packet header that needs to be
- * filter. It has to mapped with IO to read prior to
- * calling this, [contains 4 tuples- src ip, dest ip,
- * src port, dest port].
- * @params length length of p_addr header up to past the transport header.
- * @params qid receive packet will be directed to this queue.
- * @params vport_id
- * @params b_is_add flag to add or remove filter.
- *
- */
-enum _ecore_status_t
-ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct ecore_spq_comp_cb *p_cb,
- dma_addr_t p_addr, u16 length,
- u16 qid, u8 vport_id,
- bool b_is_add);
#endif
void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_arfs_config_params *p_cfg_params);
+
+/**
+ * @brief - ecore_configure_rfs_ntuple_filter
+ *
+ * This ramrod should be used to add or remove arfs hw filter
+ *
+ * @params p_hwfn
+ * @params p_cb Used for ECORE_SPQ_MODE_CB,where client would initialize
+ * it with cookie and callback function address, if not
+ * using this mode then client must pass NULL.
+ * @params p_addr p_addr is an actual packet header that needs to be
+ * filter. It has to mapped with IO to read prior to
+ * calling this, [contains 4 tuples- src ip, dest ip,
+ * src port, dest port].
+ * @params length length of p_addr header up to past the transport header.
+ * @params qid receive packet will be directed to this queue.
+ * @params vport_id
+ * @params b_is_add flag to add or remove filter.
+ *
+ */
+enum _ecore_status_t
+ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq_comp_cb *p_cb,
+ dma_addr_t p_addr, u16 length,
+ u16 qid, u8 vport_id,
+ bool b_is_add);
#endif
return ECORE_SUCCESS;
}
-static void ecore_get_mfw_drv_role(struct ecore_hwfn *p_hwfn,
- enum ecore_drv_role drv_role,
+static void ecore_get_mfw_drv_role(enum ecore_drv_role drv_role,
u8 *p_mfw_drv_role)
{
switch (drv_role) {
ECORE_LOAD_REQ_FORCE_ALL,
};
-static void ecore_get_mfw_force_cmd(struct ecore_hwfn *p_hwfn,
- enum ecore_load_req_force force_cmd,
+static void ecore_get_mfw_force_cmd(enum ecore_load_req_force force_cmd,
u8 *p_mfw_force_cmd)
{
switch (force_cmd) {
in_params.drv_ver_0 = ECORE_VERSION;
in_params.drv_ver_1 = ecore_get_config_bitmap();
in_params.fw_ver = STORM_FW_VERSION;
- ecore_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
+ ecore_get_mfw_drv_role(p_params->drv_role, &mfw_drv_role);
in_params.drv_role = mfw_drv_role;
in_params.timeout_val = p_params->timeout_val;
- ecore_get_mfw_force_cmd(p_hwfn, ECORE_LOAD_REQ_FORCE_NONE,
- &mfw_force_cmd);
+ ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_NONE, &mfw_force_cmd);
in_params.force_cmd = mfw_force_cmd;
in_params.avoid_eng_reset = p_params->avoid_eng_reset;
out_params.exist_drv_ver_0,
out_params.exist_drv_ver_1);
- ecore_get_mfw_force_cmd(p_hwfn,
- ECORE_LOAD_REQ_FORCE_ALL,
+ ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_ALL,
&mfw_force_cmd);
in_params.force_cmd = mfw_force_cmd;
¶m);
}
-static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
+static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn)
{
/* A single notification should be sent to upper driver in CMT mode */
if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
ecore_mcp_update_bw(p_hwfn, p_ptt);
break;
case MFW_DRV_MSG_FAILURE_DETECTED:
- ecore_mcp_handle_fan_failure(p_hwfn, p_ptt);
+ ecore_mcp_handle_fan_failure(p_hwfn);
break;
case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
ecore_mcp_handle_critical_error(p_hwfn, p_ptt);
return ECORE_SUCCESS;
}
-void
-ecore_mcp_resc_lock_default_init(struct ecore_hwfn *p_hwfn,
- struct ecore_resc_lock_params *p_lock,
- struct ecore_resc_unlock_params *p_unlock,
- enum ecore_resc_lock resource,
- bool b_is_permanent)
+void ecore_mcp_resc_lock_default_init(struct ecore_resc_lock_params *p_lock,
+ struct ecore_resc_unlock_params *p_unlock,
+ enum ecore_resc_lock resource,
+ bool b_is_permanent)
{
if (p_lock != OSAL_NULL) {
OSAL_MEM_ZERO(p_lock, sizeof(*p_lock));
/**
* @brief - default initialization for lock/unlock resource structs
*
- * @param p_hwfn
* @param p_lock - lock params struct to be initialized; Can be OSAL_NULL
* @param p_unlock - unlock params struct to be initialized; Can be OSAL_NULL
* @param resource - the requested resource
* @paral b_is_permanent - disable retries & aging when set
*/
-void
-ecore_mcp_resc_lock_default_init(struct ecore_hwfn *p_hwfn,
- struct ecore_resc_lock_params *p_lock,
- struct ecore_resc_unlock_params *p_unlock,
- enum ecore_resc_lock resource,
- bool b_is_permanent);
+void ecore_mcp_resc_lock_default_init(struct ecore_resc_lock_params *p_lock,
+ struct ecore_resc_unlock_params *p_unlock,
+ enum ecore_resc_lock resource,
+ bool b_is_permanent);
/**
* @brief Learn of supported MFW features; To be done during early init
return -1;
}
-static enum _ecore_status_t
-ecore_mfw_update_tlvs(u8 tlv_group, struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u8 *p_mfw_buf, u32 size)
+static enum _ecore_status_t ecore_mfw_update_tlvs(struct ecore_hwfn *p_hwfn,
+ u8 tlv_group, u8 *p_mfw_buf,
+ u32 size)
{
union ecore_mfw_tlv_data *p_tlv_data;
struct ecore_drv_tlv_hdr tlv;
/* Update the TLV values in the local buffer */
for (id = ECORE_MFW_TLV_GENERIC; id < ECORE_MFW_TLV_MAX; id <<= 1) {
if (tlv_group & id) {
- if (ecore_mfw_update_tlvs(id, p_hwfn, p_ptt, p_mfw_buf,
- size))
+ if (ecore_mfw_update_tlvs(p_hwfn, id, p_mfw_buf, size))
goto drv_done;
}
}
/***************************************************************************
* Blocking Imp. (BLOCK/EBLOCK mode)
***************************************************************************/
-static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn,
- void *cookie,
- union event_ring_data *data,
+static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn, void *cookie,
+ union event_ring_data OSAL_UNUSED * data,
u8 fw_return_code)
{
struct ecore_spq_comp_done *comp_done;
"CHANNEL_TLV_MAX"
};
-static u8 ecore_vf_calculate_legacy(struct ecore_hwfn *p_hwfn,
- struct ecore_vf_info *p_vf)
+static u8 ecore_vf_calculate_legacy(struct ecore_vf_info *p_vf)
{
u8 legacy = 0;
}
static struct ecore_queue_cid *
-ecore_iov_get_vf_rx_queue_cid(struct ecore_hwfn *p_hwfn,
- struct ecore_vf_info *p_vf,
- struct ecore_vf_queue *p_queue)
+ecore_iov_get_vf_rx_queue_cid(struct ecore_vf_queue *p_queue)
{
int i;
ECORE_IOV_VALIDATE_Q_DISABLE,
};
-static bool ecore_iov_validate_queue_mode(struct ecore_hwfn *p_hwfn,
- struct ecore_vf_info *p_vf,
+static bool ecore_iov_validate_queue_mode(struct ecore_vf_info *p_vf,
u16 qid,
enum ecore_iov_validate_q_mode mode,
bool b_is_tx)
return false;
}
- return ecore_iov_validate_queue_mode(p_hwfn, p_vf, rx_qid,
- mode, false);
+ return ecore_iov_validate_queue_mode(p_vf, rx_qid, mode, false);
}
static bool ecore_iov_validate_txq(struct ecore_hwfn *p_hwfn,
return false;
}
- return ecore_iov_validate_queue_mode(p_hwfn, p_vf, tx_qid,
- mode, true);
+ return ecore_iov_validate_queue_mode(p_vf, tx_qid, mode, true);
}
static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn,
}
/* Is there at least 1 queue open? */
-static bool ecore_iov_validate_active_rxq(struct ecore_hwfn *p_hwfn,
- struct ecore_vf_info *p_vf)
+static bool ecore_iov_validate_active_rxq(struct ecore_vf_info *p_vf)
{
u8 i;
for (i = 0; i < p_vf->num_rxqs; i++)
- if (ecore_iov_validate_queue_mode(p_hwfn, p_vf, i,
+ if (ecore_iov_validate_queue_mode(p_vf, i,
ECORE_IOV_VALIDATE_Q_ENABLE,
false))
return true;
return false;
}
-static bool ecore_iov_validate_active_txq(struct ecore_hwfn *p_hwfn,
- struct ecore_vf_info *p_vf)
+static bool ecore_iov_validate_active_txq(struct ecore_vf_info *p_vf)
{
u8 i;
for (i = 0; i < p_vf->num_txqs; i++)
- if (ecore_iov_validate_queue_mode(p_hwfn, p_vf, i,
+ if (ecore_iov_validate_queue_mode(p_vf, i,
ECORE_IOV_VALIDATE_Q_ENABLE,
true))
return true;
}
/* place a given tlv on the tlv buffer, continuing current tlv list */
-void *ecore_add_tlv(struct ecore_hwfn *p_hwfn,
- u8 **offset, u16 type, u16 length)
+void *ecore_add_tlv(u8 **offset, u16 type, u16 length)
{
struct channel_tlv *tl = (struct channel_tlv *)*offset;
static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *p_vf,
- u16 length, u8 status)
+#ifdef CONFIG_ECORE_SW_CHANNEL
+ u16 length,
+#else
+ u16 OSAL_UNUSED length,
+#endif
+ u8 status)
{
struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
struct ecore_dmae_params params;
USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
}
-static u16 ecore_iov_vport_to_tlv(struct ecore_hwfn *p_hwfn,
- enum ecore_iov_vport_update_flag flag)
+static u16 ecore_iov_vport_to_tlv(enum ecore_iov_vport_update_flag flag)
{
switch (flag) {
case ECORE_IOV_VP_UPDATE_ACTIVATE:
size = sizeof(struct pfvf_def_resp_tlv);
total_len = size;
- ecore_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
+ ecore_add_tlv(&p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
/* Prepare response for all extended tlvs if they are found by PF */
for (i = 0; i < ECORE_IOV_VP_UPDATE_MAX; i++) {
if (!(tlvs_mask & (1 << i)))
continue;
- resp = ecore_add_tlv(p_hwfn, &p_mbx->offset,
- ecore_iov_vport_to_tlv(p_hwfn, i), size);
+ resp = ecore_add_tlv(&p_mbx->offset, ecore_iov_vport_to_tlv(i),
+ size);
if (tlvs_accepted & (1 << i))
resp->hdr.status = status;
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"VF[%d] - vport_update resp: TLV %d, status %02x\n",
p_vf->relative_vf_id,
- ecore_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
+ ecore_iov_vport_to_tlv(i),
+ resp->hdr.status);
total_len += size;
}
- ecore_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
+ ecore_add_tlv(&p_mbx->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
return total_len;
mbx->offset = (u8 *)mbx->reply_virt;
- ecore_add_tlv(p_hwfn, &mbx->offset, type, length);
- ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ ecore_add_tlv(&mbx->offset, type, length);
+ ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
ecore_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
}
static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
struct ecore_vf_info *p_vf,
struct vf_pf_resc_request *p_req,
struct pf_vf_resc *p_resp)
return PFVF_STATUS_SUCCESS;
}
-static void ecore_iov_vf_mbx_acquire_stats(struct ecore_hwfn *p_hwfn,
- struct pfvf_stats_info *p_stats)
+static void ecore_iov_vf_mbx_acquire_stats(struct pfvf_stats_info *p_stats)
{
p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
OFFSETOF(struct mstorm_vf_zone,
if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS)
pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS;
- ecore_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
+ ecore_iov_vf_mbx_acquire_stats(&pfdev_info->stats_info);
OSAL_MEMCPY(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr,
ETH_ALEN);
/* Fill resources available to VF; Make sure there are enough to
* satisfy the VF's request.
*/
- vfpf_status = ecore_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
+ vfpf_status = ecore_iov_vf_mbx_acquire_resc(p_hwfn, vf,
&req->resc_request, resc);
if (vfpf_status != PFVF_STATUS_SUCCESS)
goto out;
struct ecore_queue_cid *p_cid = OSAL_NULL;
/* There can be at most 1 Rx queue on qzone. Find it */
- p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, p_vf,
- p_queue);
+ p_cid = ecore_iov_get_vf_rx_queue_cid(p_queue);
if (p_cid == OSAL_NULL)
continue;
vf->vport_instance--;
vf->spoof_chk = false;
- if ((ecore_iov_validate_active_rxq(p_hwfn, vf)) ||
- (ecore_iov_validate_active_txq(p_hwfn, vf))) {
+ if ((ecore_iov_validate_active_rxq(vf)) ||
+ (ecore_iov_validate_active_txq(vf))) {
vf->b_malicious = true;
DP_NOTICE(p_hwfn, false,
"VF [%02x] - considered malicious;"
else
length = sizeof(struct pfvf_def_resp_tlv);
- p_tlv = ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
- length);
- ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ p_tlv = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_START_RXQ, length);
+ ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
/* Update the TLV with the response */
if (p_queue->cids[qid_usage_idx].p_cid)
goto out;
- vf_legacy = ecore_vf_calculate_legacy(p_hwfn, vf);
+ vf_legacy = ecore_vf_calculate_legacy(vf);
/* Acquire a new queue-cid */
OSAL_MEMSET(¶ms, 0, sizeof(params));
}
send_resp:
- p_resp = ecore_add_tlv(p_hwfn, &mbx->offset,
+ p_resp = ecore_add_tlv(&mbx->offset,
CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp));
ecore_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask);
- ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
else
length = sizeof(struct pfvf_def_resp_tlv);
- p_tlv = ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ,
- length);
- ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ p_tlv = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_START_TXQ, length);
+ ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
/* Update the TLV with the response */
if (p_queue->cids[qid_usage_idx].p_cid)
goto out;
- vf_legacy = ecore_vf_calculate_legacy(p_hwfn, vf);
+ vf_legacy = ecore_vf_calculate_legacy(vf);
/* Acquire a new queue-cid */
params.queue_id = p_queue->fw_tx_qid;
p_queue->cids[qid_usage_idx].b_is_tx) {
struct ecore_queue_cid *p_cid;
- p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf, p_queue);
+ p_cid = ecore_iov_get_vf_rx_queue_cid(p_queue);
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n",
vf->relative_vf_id, rxq_id, qid_usage_idx,
goto out;
}
- p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf,
- &vf->vf_queues[q_idx]);
+ p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[q_idx]);
p_rss->rss_ind_table[i] = p_cid;
}
static void
ecore_iov_vp_update_sge_tpa_param(struct ecore_hwfn *p_hwfn,
- struct ecore_vf_info *vf,
struct ecore_sp_vport_update_params *p_data,
struct ecore_sge_tpa_params *p_sge_tpa,
struct ecore_iov_vf_mbx *p_mbx,
ecore_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
ecore_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask);
ecore_iov_vp_update_accept_any_vlan(p_hwfn, ¶ms, mbx, &tlvs_mask);
- ecore_iov_vp_update_sge_tpa_param(p_hwfn, vf, ¶ms,
+ ecore_iov_vp_update_sge_tpa_param(p_hwfn, ¶ms,
&sge_tpa_params, mbx, &tlvs_mask);
tlvs_accepted = tlvs_mask;
vf->abs_vf_id, rx_coal, tx_coal, qid);
if (rx_coal) {
- p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf,
- &vf->vf_queues[qid]);
+ p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
if (rc != ECORE_SUCCESS) {
vf->abs_vf_id, rx_coal, tx_coal, qid);
if (rx_coal) {
- p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf,
- &vf->vf_queues[qid]);
+ p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
if (rc != ECORE_SUCCESS) {
p_bulletin = p_vf->bulletin.p_virt;
if (p_params)
- __ecore_vf_get_link_params(p_hwfn, p_params, p_bulletin);
+ __ecore_vf_get_link_params(p_params, p_bulletin);
if (p_link)
- __ecore_vf_get_link_state(p_hwfn, p_link, p_bulletin);
+ __ecore_vf_get_link_state(p_link, p_bulletin);
if (p_caps)
- __ecore_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
+ __ecore_vf_get_link_caps(p_caps, p_bulletin);
}
void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
/**
* @brief ecore_add_tlv - place a given tlv on the tlv buffer at next offset
*
- * @param p_hwfn
- * @param p_iov
+ * @param offset
* @param type
* @param length
*
* @return pointer to the newly placed tlv
*/
-void *ecore_add_tlv(struct ecore_hwfn *p_hwfn,
- u8 **offset,
- u16 type,
- u16 length);
+void *ecore_add_tlv(u8 **offset, u16 type, u16 length);
/**
* @brief list the types and lengths of the tlvs on the buffer
OSAL_MEMSET(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
/* Init type and length */
- p_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset, type, length);
+ p_tlv = ecore_add_tlv(&p_iov->offset, type, length);
/* Init first tlv header */
((struct vfpf_first_tlv *)p_tlv)->reply_address =
PFVF_ACQUIRE_CAP_QUEUE_QIDS))
return;
- p_qid_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+ p_qid_tlv = ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_QID, sizeof(*p_qid_tlv));
p_qid_tlv->qid = p_cid->qid_usage_idx;
}
req->bulletin_size = p_iov->bulletin.size;
/* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset,
+ ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
ECORE_MODE_IPGRE_TUNN, &p_req->ipgre_clss);
/* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset,
+ ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
ecore_vf_pf_add_qid(p_hwfn, p_cid);
/* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset,
+ ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
ecore_vf_pf_add_qid(p_hwfn, p_cid);
/* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset,
+ ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
ecore_vf_pf_add_qid(p_hwfn, p_cid);
/* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset,
+ ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
ecore_vf_pf_add_qid(p_hwfn, p_cid);
/* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset,
+ ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
ecore_vf_pf_add_qid(p_hwfn, *pp_cid);
/* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset,
+ ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
}
/* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset,
+ ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
sizeof(struct vfpf_first_tlv));
/* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset,
+ ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
struct vfpf_vport_update_activate_tlv *p_act_tlv;
size = sizeof(struct vfpf_vport_update_activate_tlv);
- p_act_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+ p_act_tlv = ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
size);
resp_size += sizeof(struct pfvf_def_resp_tlv);
struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
size = sizeof(struct vfpf_vport_update_vlan_strip_tlv);
- p_vlan_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+ p_vlan_tlv = ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,
size);
resp_size += sizeof(struct pfvf_def_resp_tlv);
size = sizeof(struct vfpf_vport_update_tx_switch_tlv);
tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
- p_tx_switch_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+ p_tx_switch_tlv = ecore_add_tlv(&p_iov->offset,
tlv, size);
resp_size += sizeof(struct pfvf_def_resp_tlv);
struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
size = sizeof(struct vfpf_vport_update_mcast_bin_tlv);
- p_mcast_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+ p_mcast_tlv = ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_VPORT_UPDATE_MCAST,
size);
resp_size += sizeof(struct pfvf_def_resp_tlv);
tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
size = sizeof(struct vfpf_vport_update_accept_param_tlv);
- p_accept_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
+ p_accept_tlv = ecore_add_tlv(&p_iov->offset, tlv, size);
resp_size += sizeof(struct pfvf_def_resp_tlv);
if (update_rx) {
int i, table_size;
size = sizeof(struct vfpf_vport_update_rss_tlv);
- p_rss_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+ p_rss_tlv = ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_VPORT_UPDATE_RSS, size);
resp_size += sizeof(struct pfvf_def_resp_tlv);
size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv);
tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
- p_any_vlan_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
- tlv, size);
+ p_any_vlan_tlv = ecore_add_tlv(&p_iov->offset, tlv, size);
resp_size += sizeof(struct pfvf_def_resp_tlv);
p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan;
sge_tpa_params = p_params->sge_tpa_params;
size = sizeof(struct vfpf_vport_update_sge_tpa_tlv);
- p_sge_tpa_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+ p_sge_tpa_tlv = ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
size);
resp_size += sizeof(struct pfvf_def_resp_tlv);
}
/* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset,
+ ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req));
/* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset,
+ ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
/* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset,
+ ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
req->vlan = p_ucast->vlan;
/* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset,
+ ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
sizeof(struct vfpf_first_tlv));
/* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset,
+ ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
rx_coal, tx_coal, req->qid);
/* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END,
+ ecore_add_tlv(&p_iov->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
resp = &p_iov->pf2vf_reply->default_resp;
return ECORE_SUCCESS;
}
-void __ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
- struct ecore_mcp_link_params *p_params,
+void __ecore_vf_get_link_params(struct ecore_mcp_link_params *p_params,
struct ecore_bulletin_content *p_bulletin)
{
OSAL_MEMSET(p_params, 0, sizeof(*p_params));
void ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
struct ecore_mcp_link_params *params)
{
- __ecore_vf_get_link_params(p_hwfn, params,
+ __ecore_vf_get_link_params(params,
&p_hwfn->vf_iov_info->bulletin_shadow);
}
-void __ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
- struct ecore_mcp_link_state *p_link,
+void __ecore_vf_get_link_state(struct ecore_mcp_link_state *p_link,
struct ecore_bulletin_content *p_bulletin)
{
OSAL_MEMSET(p_link, 0, sizeof(*p_link));
void ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
struct ecore_mcp_link_state *link)
{
- __ecore_vf_get_link_state(p_hwfn, link,
+ __ecore_vf_get_link_state(link,
&p_hwfn->vf_iov_info->bulletin_shadow);
}
-void __ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
- struct ecore_mcp_link_capabilities *p_link_caps,
+void __ecore_vf_get_link_caps(struct ecore_mcp_link_capabilities *p_link_caps,
struct ecore_bulletin_content *p_bulletin)
{
OSAL_MEMSET(p_link_caps, 0, sizeof(*p_link_caps));
void ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
struct ecore_mcp_link_capabilities *p_link_caps)
{
- __ecore_vf_get_link_caps(p_hwfn, p_link_caps,
+ __ecore_vf_get_link_caps(p_link_caps,
&p_hwfn->vf_iov_info->bulletin_shadow);
}
/**
* @brief - return the link params in a given bulletin board
*
- * @param p_hwfn
* @param p_params - pointer to a struct to fill with link params
* @param p_bulletin
*/
-void __ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
- struct ecore_mcp_link_params *p_params,
+void __ecore_vf_get_link_params(struct ecore_mcp_link_params *p_params,
struct ecore_bulletin_content *p_bulletin);
/**
* @brief - return the link state in a given bulletin board
*
- * @param p_hwfn
* @param p_link - pointer to a struct to fill with link state
* @param p_bulletin
*/
-void __ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
- struct ecore_mcp_link_state *p_link,
+void __ecore_vf_get_link_state(struct ecore_mcp_link_state *p_link,
struct ecore_bulletin_content *p_bulletin);
/**
* @brief - return the link capabilities in a given bulletin board
*
- * @param p_hwfn
* @param p_link - pointer to a struct to fill with link capabilities
* @param p_bulletin
*/
-void __ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
- struct ecore_mcp_link_capabilities *p_link_caps,
+void __ecore_vf_get_link_caps(struct ecore_mcp_link_capabilities *p_link_caps,
struct ecore_bulletin_content *p_bulletin);
enum _ecore_status_t
&qdev->fdir_info.arfs);
}
/* configure filter with ECORE_SPQ_MODE_EBLOCK */
- rc = ecore_configure_rfs_ntuple_filter(p_hwfn, p_hwfn->p_arfs_ptt, NULL,
+ rc = ecore_configure_rfs_ntuple_filter(p_hwfn, NULL,
(dma_addr_t)mz->phys_addr,
pkt_len,
fdir_filter->action.rx_queue,