#include "ice_protocol_type.h"
#include "ice_flow.h"
+/* To support tunneling entries by PF, the package will append the PF number to
+ * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc.
+ */
static const struct ice_tunnel_type_scan tnls[] = {
- { TNL_VXLAN, "TNL_VXLAN" },
- { TNL_GTPC, "TNL_GTPC" },
- { TNL_GTPC_TEID, "TNL_GTPC_TEID" },
- { TNL_GTPU, "TNL_GTPC" },
- { TNL_GTPU_TEID, "TNL_GTPU_TEID" },
- { TNL_VXLAN_GPE, "TNL_VXLAN_GPE" },
- { TNL_GENEVE, "TNL_GENEVE" },
- { TNL_NAT, "TNL_NAT" },
- { TNL_ROCE_V2, "TNL_ROCE_V2" },
- { TNL_MPLSO_UDP, "TNL_MPLSO_UDP" },
- { TNL_UDP2_END, "TNL_UDP2_END" },
- { TNL_UPD_END, "TNL_UPD_END" },
+ { TNL_VXLAN, "TNL_VXLAN_PF" },
+ { TNL_GENEVE, "TNL_GENEVE_PF" },
{ TNL_LAST, "" }
};
while (label_name && hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
for (i = 0; tnls[i].type != TNL_LAST; i++) {
- if (!strncmp(label_name, tnls[i].label_prefix,
- strlen(tnls[i].label_prefix))) {
+ size_t len = strlen(tnls[i].label_prefix);
+
+ /* Look for matching label start, before continuing */
+ if (strncmp(label_name, tnls[i].label_prefix, len))
+ continue;
+
+ /* Make sure this label matches our PF. Note that the PF
+ * character ('0' - '7') will be located where our
+ * prefix string's null terminator is located.
+ */
+ if ((label_name[len] - '0') == hw->pf_id) {
hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
hw->tnl.tbl[hw->tnl.count].valid = false;
hw->tnl.tbl[hw->tnl.count].in_use = false;
*
* This function will request ownership of the change lock.
*/
-static enum ice_status
+enum ice_status
ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
{
ice_debug(hw, ICE_DBG_TRACE, "ice_acquire_change_lock");
*
* This function will release the change lock using the proper Admin Command.
*/
-static void ice_release_change_lock(struct ice_hw *hw)
+void ice_release_change_lock(struct ice_hw *hw)
{
ice_debug(hw, ICE_DBG_TRACE, "ice_release_change_lock");
enum ice_status
ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
{
- struct ice_aqc_get_pkg_info_resp *pkg_info;
struct ice_global_metadata_seg *meta_seg;
struct ice_generic_seg_hdr *seg_hdr;
- enum ice_status status;
- u16 size;
- u32 i;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
if (!pkg_hdr)
return ICE_ERR_CFG;
}
-#define ICE_PKG_CNT 4
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_get_pkg_info
+ * @hw: pointer to the hardware structure
+ *
+ * Store details of the package currently loaded in HW into the HW structure.
+ */
+enum ice_status
+ice_get_pkg_info(struct ice_hw *hw)
+{
+ struct ice_aqc_get_pkg_info_resp *pkg_info;
+ enum ice_status status;
+ u16 size;
+ u32 i;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
size = sizeof(*pkg_info) + (sizeof(pkg_info->pkg_info[0]) *
(ICE_PKG_CNT - 1));
pkg_info = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
ice_init_fd_mask_regs(hw);
}
+/**
+ * ice_chk_pkg_version - check package version for compatibility with driver
+ * @hw: pointer to the hardware structure
+ * @pkg_ver: pointer to a version structure to check
+ *
+ * Check to make sure that the package about to be downloaded is compatible with
+ * the driver. To be compatible, the major and minor components of the package
+ * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR
+ * definitions.
+ */
+static enum ice_status
+ice_chk_pkg_version(struct ice_hw *hw, struct ice_pkg_ver *pkg_ver)
+{
+ if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ ||
+ pkg_ver->minor != ICE_PKG_SUPP_VER_MNR) {
+ ice_info(hw, "ERROR: Incompatible package: %d.%d.%d.%d - requires package version: %d.%d.*.*\n",
+ pkg_ver->major, pkg_ver->minor, pkg_ver->update,
+ pkg_ver->draft, ICE_PKG_SUPP_VER_MAJ,
+ ICE_PKG_SUPP_VER_MNR);
+
+ return ICE_ERR_NOT_SUPPORTED;
+ }
+
+ return ICE_SUCCESS;
+}
+
/**
* ice_init_pkg - initialize/download package
* @hw: pointer to the hardware structure
if (status)
return status;
+ /* before downloading the package, check package version for
+ * compatibility with driver
+ */
+ status = ice_chk_pkg_version(hw, &hw->pkg_ver);
+ if (status)
+ return status;
+
/* find segment in given package */
seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg);
if (!seg) {
status = ICE_SUCCESS;
}
+ /* Get information on the package currently loaded in HW, then make sure
+ * the driver is compatible with this version.
+ */
+ if (!status) {
+ status = ice_get_pkg_info(hw);
+ if (!status)
+ status = ice_chk_pkg_version(hw, &hw->active_pkg_ver);
+ }
+
if (!status) {
hw->seg = seg;
- /* on successful package download, update other required
- * registers to support the package
+ /* on successful package download update other required
+ * registers to support the package and fill HW tables
+ * with package content.
*/
ice_init_pkg_regs(hw);
+ ice_fill_blk_tbls(hw);
} else {
ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
status);
ice_free(hw, bld);
}
+/**
+ * ice_find_prot_off - find prot ID and offset pair, based on prof and FV index
+ * @hw: pointer to the hardware structure
+ * @blk: hardware block
+ * @prof: profile ID
+ * @fv_idx: field vector word index
+ * @prot: variable to receive the protocol ID
+ * @off: variable to receive the protocol offset
+ */
+enum ice_status
+ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u8 fv_idx,
+ u8 *prot, u16 *off)
+{
+ struct ice_fv_word *fv_ext;
+
+ if (prof >= hw->blk[blk].es.count)
+ return ICE_ERR_PARAM;
+
+ if (fv_idx >= hw->blk[blk].es.fvw)
+ return ICE_ERR_PARAM;
+
+ fv_ext = hw->blk[blk].es.t + (prof * hw->blk[blk].es.fvw);
+
+ *prot = fv_ext[fv_idx].prot_id;
+ *off = fv_ext[fv_idx].off;
+
+ return ICE_SUCCESS;
+}
+
/* PTG Management */
/**
ice_free(hw, del);
}
+ /* if VSIG characteristic list was cleared for reset
+ * re-initialize the list head
+ */
+ INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
+
return ICE_SUCCESS;
}
}
};
+/**
+ * ice_init_sw_xlt1_db - init software XLT1 database from HW tables
+ * @hw: pointer to the hardware structure
+ * @blk: the HW block to initialize
+ */
+static
+void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk)
+{
+ u16 pt;
+
+ for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) {
+ u8 ptg;
+
+ ptg = hw->blk[blk].xlt1.t[pt];
+ if (ptg != ICE_DEFAULT_PTG) {
+ ice_ptg_alloc_val(hw, blk, ptg);
+ ice_ptg_add_mv_ptype(hw, blk, pt, ptg);
+ }
+ }
+}
+
+/**
+ * ice_init_sw_xlt2_db - init software XLT2 database from HW tables
+ * @hw: pointer to the hardware structure
+ * @blk: the HW block to initialize
+ */
+static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk)
+{
+ u16 vsi;
+
+ for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) {
+ u16 vsig;
+
+ vsig = hw->blk[blk].xlt2.t[vsi];
+ if (vsig) {
+ ice_vsig_alloc_val(hw, blk, vsig);
+ ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
+ /* no changes at this time, since this has been
+ * initialized from the original package
+ */
+ hw->blk[blk].xlt2.vsis[vsi].changed = 0;
+ }
+ }
+}
+
+/**
+ * ice_init_sw_db - init software database from HW tables
+ * @hw: pointer to the hardware structure
+ */
+static void ice_init_sw_db(struct ice_hw *hw)
+{
+ u16 i;
+
+ for (i = 0; i < ICE_BLK_COUNT; i++) {
+ ice_init_sw_xlt1_db(hw, (enum ice_block)i);
+ ice_init_sw_xlt2_db(hw, (enum ice_block)i);
+ }
+}
+
/**
* ice_fill_tbl - Reads content of a single table type into database
* @hw: pointer to the hardware structure
case ICE_SID_FLD_VEC_PE:
es = (struct ice_sw_fv_section *)sect;
src = (u8 *)es->fv;
- sect_len = LE16_TO_CPU(es->count) *
- hw->blk[block_id].es.fvw *
+ sect_len = (u32)(LE16_TO_CPU(es->count) *
+ hw->blk[block_id].es.fvw) *
sizeof(*hw->blk[block_id].es.t);
dst = (u8 *)hw->blk[block_id].es.t;
- dst_len = hw->blk[block_id].es.count *
- hw->blk[block_id].es.fvw *
+ dst_len = (u32)(hw->blk[block_id].es.count *
+ hw->blk[block_id].es.fvw) *
sizeof(*hw->blk[block_id].es.t);
break;
default:
}
/**
- * ice_fill_blk_tbls - Read package content for tables of a block
+ * ice_fill_blk_tbls - Read package context for tables
* @hw: pointer to the hardware structure
- * @block_id: The block ID which contains the tables to be copied
*
* Reads the current package contents and populates the driver
- * database with the data it contains to allow for advanced driver
- * features.
- */
-static void ice_fill_blk_tbls(struct ice_hw *hw, enum ice_block block_id)
-{
- ice_fill_tbl(hw, block_id, hw->blk[block_id].xlt1.sid);
- ice_fill_tbl(hw, block_id, hw->blk[block_id].xlt2.sid);
- ice_fill_tbl(hw, block_id, hw->blk[block_id].prof.sid);
- ice_fill_tbl(hw, block_id, hw->blk[block_id].prof_redir.sid);
- ice_fill_tbl(hw, block_id, hw->blk[block_id].es.sid);
-}
-
-/**
- * ice_free_flow_profs - free flow profile entries
- * @hw: pointer to the hardware structure
+ * database with the data iteratively for all advanced feature
+ * blocks. Assume that the Hw tables have been allocated.
*/
-static void ice_free_flow_profs(struct ice_hw *hw)
+void ice_fill_blk_tbls(struct ice_hw *hw)
{
u8 i;
for (i = 0; i < ICE_BLK_COUNT; i++) {
- struct ice_flow_prof *p, *tmp;
-
- if (!&hw->fl_profs[i])
- continue;
-
- /* This call is being made as part of resource deallocation
- * during unload. Lock acquire and release will not be
- * necessary here.
- */
- LIST_FOR_EACH_ENTRY_SAFE(p, tmp, &hw->fl_profs[i],
- ice_flow_prof, l_entry) {
- struct ice_flow_entry *e, *t;
+ enum ice_block blk_id = (enum ice_block)i;
- LIST_FOR_EACH_ENTRY_SAFE(e, t, &p->entries,
- ice_flow_entry, l_entry)
- ice_flow_rem_entry(hw, ICE_FLOW_ENTRY_HNDL(e));
-
- LIST_DEL(&p->l_entry);
- if (p->acts)
- ice_free(hw, p->acts);
- ice_free(hw, p);
- }
-
- ice_destroy_lock(&hw->fl_profs_locks[i]);
+ ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt1.sid);
+ ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt2.sid);
+ ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid);
+ ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof_redir.sid);
+ ice_fill_tbl(hw, blk_id, hw->blk[blk_id].es.sid);
}
+
+ ice_init_sw_db(hw);
}
/**
- * ice_free_prof_map - frees the profile map
+ * ice_free_flow_profs - free flow profile entries
* @hw: pointer to the hardware structure
- * @blk: the HW block which contains the profile map to be freed
+ * @blk_idx: HW block index
*/
-static void ice_free_prof_map(struct ice_hw *hw, enum ice_block blk)
+static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx)
{
- struct ice_prof_map *del, *tmp;
+ struct ice_flow_prof *p, *tmp;
- if (LIST_EMPTY(&hw->blk[blk].es.prof_map))
- return;
+ /* This call is being made as part of resource deallocation
+ * during unload. Lock acquire and release will not be
+ * necessary here.
+ */
+ LIST_FOR_EACH_ENTRY_SAFE(p, tmp, &hw->fl_profs[blk_idx],
+ ice_flow_prof, l_entry) {
+ struct ice_flow_entry *e, *t;
- LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &hw->blk[blk].es.prof_map,
- ice_prof_map, list) {
- ice_rem_prof(hw, blk, del->profile_cookie);
+ LIST_FOR_EACH_ENTRY_SAFE(e, t, &p->entries,
+ ice_flow_entry, l_entry)
+ ice_flow_rem_entry(hw, ICE_FLOW_ENTRY_HNDL(e));
+
+ LIST_DEL(&p->l_entry);
+ if (p->acts)
+ ice_free(hw, p->acts);
+ ice_free(hw, p);
}
+
+ /* if driver is in reset and tables are being cleared
+ * re-initialize the flow profile list heads
+ */
+ INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
}
/**
*/
void ice_free_hw_tbls(struct ice_hw *hw)
{
+ struct ice_rss_cfg *r, *rt;
u8 i;
for (i = 0; i < ICE_BLK_COUNT; i++) {
- ice_free_prof_map(hw, (enum ice_block)i);
+ if (hw->blk[i].is_list_init) {
+ struct ice_es *es = &hw->blk[i].es;
+ struct ice_prof_map *del, *tmp;
+
+ LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &es->prof_map,
+ ice_prof_map, list) {
+ LIST_DEL(&del->list);
+ ice_free(hw, del);
+ }
+
+ ice_destroy_lock(&es->prof_map_lock);
+ ice_free_flow_profs(hw, i);
+ ice_destroy_lock(&hw->fl_profs_locks[i]);
+ hw->blk[i].is_list_init = false;
+ }
ice_free_vsig_tbl(hw, (enum ice_block)i);
ice_free(hw, hw->blk[i].xlt1.ptypes);
ice_free(hw, hw->blk[i].xlt1.ptg_tbl);
ice_free(hw, hw->blk[i].es.written);
}
+ LIST_FOR_EACH_ENTRY_SAFE(r, rt, &hw->rss_list_head,
+ ice_rss_cfg, l_entry) {
+ LIST_DEL(&r->l_entry);
+ ice_free(hw, r);
+ }
+ ice_destroy_lock(&hw->rss_locks);
ice_memset(hw->blk, 0, sizeof(hw->blk), ICE_NONDMA_MEM);
-
- ice_free_flow_profs(hw);
}
/**
* ice_init_flow_profs - init flow profile locks and list heads
* @hw: pointer to the hardware structure
+ * @blk_idx: HW block index
*/
-static void ice_init_flow_profs(struct ice_hw *hw)
-{
- u8 i;
-
- for (i = 0; i < ICE_BLK_COUNT; i++) {
- ice_init_lock(&hw->fl_profs_locks[i]);
- INIT_LIST_HEAD(&hw->fl_profs[i]);
- }
-}
-
-/**
- * ice_init_sw_xlt1_db - init software XLT1 database from HW tables
- * @hw: pointer to the hardware structure
- * @blk: the HW block to initialize
- */
-static
-void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk)
+static void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx)
{
- u16 pt;
-
- for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) {
- u8 ptg;
-
- ptg = hw->blk[blk].xlt1.t[pt];
- if (ptg != ICE_DEFAULT_PTG) {
- ice_ptg_alloc_val(hw, blk, ptg);
- ice_ptg_add_mv_ptype(hw, blk, pt, ptg);
- }
- }
-}
-
-/**
- * ice_init_sw_xlt2_db - init software XLT2 database from HW tables
- * @hw: pointer to the hardware structure
- * @blk: the HW block to initialize
- */
-static
-void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk)
-{
- u16 vsi;
-
- for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) {
- u16 vsig;
-
- vsig = hw->blk[blk].xlt2.t[vsi];
- if (vsig) {
- ice_vsig_alloc_val(hw, blk, vsig);
- ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
- /* no changes at this time, since this has been
- * initialized from the original package
- */
- hw->blk[blk].xlt2.vsis[vsi].changed = 0;
- }
- }
-}
-
-/**
- * ice_init_sw_db - init software database from HW tables
- * @hw: pointer to the hardware structure
- */
-static
-void ice_init_sw_db(struct ice_hw *hw)
-{
- u16 i;
-
- for (i = 0; i < ICE_BLK_COUNT; i++) {
- ice_init_sw_xlt1_db(hw, (enum ice_block)i);
- ice_init_sw_xlt2_db(hw, (enum ice_block)i);
- }
+ ice_init_lock(&hw->fl_profs_locks[blk_idx]);
+ INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
}
/**
{
u8 i;
- ice_init_flow_profs(hw);
-
+ ice_init_lock(&hw->rss_locks);
+ INIT_LIST_HEAD(&hw->rss_list_head);
for (i = 0; i < ICE_BLK_COUNT; i++) {
struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
struct ice_prof_tcam *prof = &hw->blk[i].prof;
struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
struct ice_es *es = &hw->blk[i].es;
+ u16 j;
+
+ if (hw->blk[i].is_list_init)
+ continue;
+
+ ice_init_flow_profs(hw, i);
+ ice_init_lock(&es->prof_map_lock);
+ INIT_LIST_HEAD(&es->prof_map);
+ hw->blk[i].is_list_init = true;
hw->blk[i].overwrite = blk_sizes[i].overwrite;
es->reverse = blk_sizes[i].reverse;
if (!xlt2->vsig_tbl)
goto err;
+ for (j = 0; j < xlt2->count; j++)
+ INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst);
+
xlt2->t = (u16 *)ice_calloc(hw, xlt2->count, sizeof(*xlt2->t));
if (!xlt2->t)
goto err;
es->count = blk_sizes[i].es;
es->fvw = blk_sizes[i].fvw;
es->t = (struct ice_fv_word *)
- ice_calloc(hw, es->count * es->fvw, sizeof(*es->t));
-
+ ice_calloc(hw, (u32)(es->count * es->fvw),
+ sizeof(*es->t));
if (!es->t)
goto err;
if (!es->ref_count)
goto err;
-
- INIT_LIST_HEAD(&es->prof_map);
-
- /* Now that tables are allocated, read in package data */
- ice_fill_blk_tbls(hw, (enum ice_block)i);
}
-
- ice_init_sw_db(hw);
-
return ICE_SUCCESS;
err:
idx = (j * 4) + k;
if (used[idx])
- raw_entry |= used[idx] << (k * 8);
+ raw_entry |= used[idx] << (k * BITS_PER_BYTE);
}
/* write the appropriate register set, based on HW block */
u32 byte = 0;
u8 prof_id;
+ ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
+
/* search for existing profile */
status = ice_find_prof_id(hw, blk, es, &prof_id);
if (status) {
u16 ptype;
u8 m;
- ptype = byte * 8 + bit;
+ ptype = byte * BITS_PER_BYTE + bit;
if (ptype < ICE_FLOW_PTYPE_MAX) {
prof->ptype[prof->ptype_count] = ptype;
bytes--;
byte++;
}
- LIST_ADD(&prof->list, &hw->blk[blk].es.prof_map);
- return ICE_SUCCESS;
+ LIST_ADD(&prof->list, &hw->blk[blk].es.prof_map);
+ status = ICE_SUCCESS;
err_ice_add_prof:
+ ice_release_lock(&hw->blk[blk].es.prof_map_lock);
return status;
}
/**
- * ice_search_prof_id - Search for a profile tracking ID
+ * ice_search_prof_id_low - Search for a profile tracking ID low level
* @hw: pointer to the HW struct
* @blk: hardware block
* @id: profile tracking ID
*
- * This will search for a profile tracking ID which was previously added.
+ * This will search for a profile tracking ID which was previously added. This
+ * version assumes that the caller has already acquired the prof map lock.
*/
-struct ice_prof_map *
-ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
+static struct ice_prof_map *
+ice_search_prof_id_low(struct ice_hw *hw, enum ice_block blk, u64 id)
{
struct ice_prof_map *entry = NULL;
struct ice_prof_map *map;
return entry;
}
+/**
+ * ice_search_prof_id - Search for a profile tracking ID
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @id: profile tracking ID
+ *
+ * This will search for a profile tracking ID which was previously added.
+ */
+struct ice_prof_map *
+ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
+{
+ struct ice_prof_map *entry;
+
+ ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
+ entry = ice_search_prof_id_low(hw, blk, id);
+ ice_release_lock(&hw->blk[blk].es.prof_map_lock);
+
+ return entry;
+}
+
/**
* ice_set_prof_context - Set context for a given profile
* @hw: pointer to the HW struct
*/
enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
{
- enum ice_status status;
struct ice_prof_map *pmap;
+ enum ice_status status;
- pmap = ice_search_prof_id(hw, blk, id);
- if (!pmap)
- return ICE_ERR_DOES_NOT_EXIST;
+ ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
+
+ pmap = ice_search_prof_id_low(hw, blk, id);
+ if (!pmap) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ goto err_ice_rem_prof;
+ }
/* remove all flows with this profile */
status = ice_rem_flow_all(hw, blk, pmap->profile_cookie);
if (status)
- return status;
+ goto err_ice_rem_prof;
- /* remove profile */
- status = ice_free_prof_id(hw, blk, pmap->prof_id);
- if (status)
- return status;
/* dereference profile, and possibly remove */
ice_prof_dec_ref(hw, blk, pmap->prof_id);
LIST_DEL(&pmap->list);
ice_free(hw, pmap);
- return ICE_SUCCESS;
+ status = ICE_SUCCESS;
+
+err_ice_rem_prof:
+ ice_release_lock(&hw->blk[blk].es.prof_map_lock);
+ return status;
}
/**