OSAL_FREE(p_dev, p_dev->reset_stats);
- for_each_hwfn(p_dev, i) {
- struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
-
- OSAL_FREE(p_dev, p_hwfn->p_tx_cids);
- OSAL_FREE(p_dev, p_hwfn->p_rx_cids);
- }
-
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
if (!p_dev->fw_data)
return ECORE_NOMEM;
- /* Allocate Memory for the Queue->CID mapping */
- for_each_hwfn(p_dev, i) {
- struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
- u32 num_tx_conns = RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
- int tx_size, rx_size;
-
- /* @@@TMP - resc management, change to actual required size */
- if (p_hwfn->pf_params.eth_pf_params.num_cons > num_tx_conns)
- num_tx_conns = p_hwfn->pf_params.eth_pf_params.num_cons;
- tx_size = sizeof(struct ecore_hw_cid_data) * num_tx_conns;
- rx_size = sizeof(struct ecore_hw_cid_data) *
- RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
-
- p_hwfn->p_tx_cids = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
- tx_size);
- if (!p_hwfn->p_tx_cids) {
- DP_NOTICE(p_hwfn, true,
- "Failed to allocate memory for Tx Cids\n");
- goto alloc_no_mem;
- }
-
- p_hwfn->p_rx_cids = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
- rx_size);
- if (!p_hwfn->p_rx_cids) {
- DP_NOTICE(p_hwfn, true,
- "Failed to allocate memory for Rx Cids\n");
- goto alloc_no_mem;
- }
- }
-
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
u32 n_eqes, num_cons;
static enum _ecore_status_t
ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- struct ecore_tunn_start_params *p_tunn,
+ struct ecore_tunnel_info *p_tunn,
int hw_mode,
bool b_hw_start,
enum ecore_int_mode int_mode, bool allow_npar_tx_switch)
p_hwfn->mcp_info->mfw_mb_length);
}
+enum _ecore_status_t ecore_vf_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_hw_init_params *p_params)
+{
+ if (p_params->p_tunn) {
+ ecore_vf_set_vf_start_tunn_update_param(p_params->p_tunn);
+ ecore_vf_pf_tunnel_param_update(p_hwfn, p_params->p_tunn);
+ }
+
+ p_hwfn->b_int_enabled = 1;
+
+ return ECORE_SUCCESS;
+}
+
enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
struct ecore_hw_init_params *p_params)
{
}
if (IS_VF(p_dev)) {
- p_hwfn->b_int_enabled = 1;
+ ecore_vf_start(p_hwfn, p_params);
continue;
}
struct ecore_chain *p_chain)
{
void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl;
- u8 *p_pbl_virt = (u8 *)p_chain->pbl.p_virt_table;
+ u8 *p_pbl_virt = (u8 *)p_chain->pbl_sp.p_virt_table;
u32 page_cnt = p_chain->page_cnt, i, pbl_size;
if (!pp_virt_addr_tbl)
return;
- if (!p_chain->pbl.p_virt_table)
+ if (!p_pbl_virt)
goto out;
for (i = 0; i < page_cnt; i++) {
pbl_size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE;
- if (!p_chain->pbl.external)
- OSAL_DMA_FREE_COHERENT(p_dev, p_chain->pbl.p_virt_table,
- p_chain->pbl.p_phys_table, pbl_size);
-out:
+ if (!p_chain->b_external_pbl)
+ OSAL_DMA_FREE_COHERENT(p_dev, p_chain->pbl_sp.p_virt_table,
+ p_chain->pbl_sp.p_phys_table, pbl_size);
+ out:
OSAL_VFREE(p_dev, p_chain->pbl.pp_virt_addr_tbl);
}
} else {
p_pbl_virt = ext_pbl->p_pbl_virt;
p_pbl_phys = ext_pbl->p_pbl_phys;
- p_chain->pbl.external = true;
+ p_chain->b_external_pbl = true;
}
ecore_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,