X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fice%2Fbase%2Fice_flex_pipe.c;h=3631ddba2c060b3012598dce3518927012c274d6;hb=8e9091f57e9cda8256cfc86f2db101fa38c78111;hp=851f0273b010f0265fd2a082e0c8044bf5a46f3d;hpb=c4afa96e4de1f9da0379022ad3f86ff2e34bc12b;p=dpdk.git diff --git a/drivers/net/ice/base/ice_flex_pipe.c b/drivers/net/ice/base/ice_flex_pipe.c index 851f0273b0..3631ddba2c 100644 --- a/drivers/net/ice/base/ice_flex_pipe.c +++ b/drivers/net/ice/base/ice_flex_pipe.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2001-2020 + * Copyright(c) 2001-2021 Intel Corporation */ #include "ice_common.h" @@ -7,12 +7,21 @@ #include "ice_protocol_type.h" #include "ice_flow.h" +/* For supporting double VLAN mode, it is necessary to enable or disable certain + * boost tcam entries. The metadata labels names that match the following + * prefixes will be saved to allow enabling double VLAN mode. + */ +#define ICE_DVM_PRE "BOOST_MAC_VLAN_DVM" /* enable these entries */ +#define ICE_SVM_PRE "BOOST_MAC_VLAN_SVM" /* disable these entries */ + /* To support tunneling entries by PF, the package will append the PF number to * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc. */ +#define ICE_TNL_PRE "TNL_" static const struct ice_tunnel_type_scan tnls[] = { { TNL_VXLAN, "TNL_VXLAN_PF" }, { TNL_GENEVE, "TNL_GENEVE_PF" }, + { TNL_ECPRI, "TNL_UDP_ECPRI_PF" }, { TNL_LAST, "" } }; @@ -315,6 +324,83 @@ ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state, return entry; } +/** + * ice_hw_ptype_ena - check if the PTYPE is enabled or not + * @hw: pointer to the HW structure + * @ptype: the hardware PTYPE + */ +bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype) +{ + return ptype < ICE_FLOW_PTYPE_MAX && + ice_is_bit_set(hw->hw_ptype, ptype); +} + +/** + * ice_marker_ptype_tcam_handler + * @sect_type: section type + * @section: pointer to section + * @index: index of the Marker PType TCAM entry to be returned + * @offset: pointer to receive absolute offset, always 0 for ptype TCAM sections + * + * This is a callback function that can be passed to ice_pkg_enum_entry. + * Handles enumeration of individual Marker PType TCAM entries. + */ +static void * +ice_marker_ptype_tcam_handler(u32 sect_type, void *section, u32 index, + u32 *offset) +{ + struct ice_marker_ptype_tcam_section *marker_ptype; + + if (!section) + return NULL; + + if (sect_type != ICE_SID_RXPARSER_MARKER_PTYPE) + return NULL; + + if (index > ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF) + return NULL; + + if (offset) + *offset = 0; + + marker_ptype = (struct ice_marker_ptype_tcam_section *)section; + if (index >= LE16_TO_CPU(marker_ptype->count)) + return NULL; + + return marker_ptype->tcam + index; +} + +/** + * ice_fill_hw_ptype - fill the enabled PTYPE bit information + * @hw: pointer to the HW structure + */ +static void +ice_fill_hw_ptype(struct ice_hw *hw) +{ + struct ice_marker_ptype_tcam_entry *tcam; + struct ice_seg *seg = hw->seg; + struct ice_pkg_enum state; + + ice_zero_bitmap(hw->hw_ptype, ICE_FLOW_PTYPE_MAX); + if (!seg) + return; + + ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); + + do { + tcam = (struct ice_marker_ptype_tcam_entry *) + ice_pkg_enum_entry(seg, &state, + ICE_SID_RXPARSER_MARKER_PTYPE, NULL, + ice_marker_ptype_tcam_handler); + if (tcam && + LE16_TO_CPU(tcam->addr) < ICE_MARKER_PTYPE_TCAM_ADDR_MAX && + LE16_TO_CPU(tcam->ptype) < ICE_FLOW_PTYPE_MAX) + ice_set_bit(LE16_TO_CPU(tcam->ptype), hw->hw_ptype); + + seg = NULL; + } while (tcam); +} + /** * ice_boost_tcam_handler * @sect_type: section type @@ -452,6 +538,57 @@ ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state, return label->name; } +/** + * ice_add_tunnel_hint + * @hw: pointer to the HW structure + * @label_name: label text + * @val: value of the tunnel port boost entry + */ +static void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val) +{ + if (hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) { + u16 i; + + for (i = 0; tnls[i].type != TNL_LAST; i++) { + size_t len = strlen(tnls[i].label_prefix); + + /* Look for matching label start, before continuing */ + if (strncmp(label_name, tnls[i].label_prefix, len)) + continue; + + /* Make sure this label matches our PF. Note that the PF + * character ('0' - '7') will be located where our + * prefix string's null terminator is located. + */ + if ((label_name[len] - '0') == hw->pf_id) { + hw->tnl.tbl[hw->tnl.count].type = tnls[i].type; + hw->tnl.tbl[hw->tnl.count].valid = false; + hw->tnl.tbl[hw->tnl.count].in_use = false; + hw->tnl.tbl[hw->tnl.count].marked = false; + hw->tnl.tbl[hw->tnl.count].boost_addr = val; + hw->tnl.tbl[hw->tnl.count].port = 0; + hw->tnl.count++; + break; + } + } + } +} + +/** + * ice_add_dvm_hint + * @hw: pointer to the HW structure + * @val: value of the boost entry + * @enable: true if entry needs to be enabled, or false if needs to be disabled + */ +static void ice_add_dvm_hint(struct ice_hw *hw, u16 val, bool enable) +{ + if (hw->dvm_upd.count < ICE_DVM_MAX_ENTRIES) { + hw->dvm_upd.tbl[hw->dvm_upd.count].boost_addr = val; + hw->dvm_upd.tbl[hw->dvm_upd.count].enable = enable; + hw->dvm_upd.count++; + } +} + /** * ice_init_pkg_hints * @hw: pointer to the HW structure @@ -478,40 +615,34 @@ static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg) label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state, &val); - while (label_name && hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) { - for (i = 0; tnls[i].type != TNL_LAST; i++) { - size_t len = strlen(tnls[i].label_prefix); + while (label_name) { + if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE))) + /* check for a tunnel entry */ + ice_add_tunnel_hint(hw, label_name, val); - /* Look for matching label start, before continuing */ - if (strncmp(label_name, tnls[i].label_prefix, len)) - continue; + /* check for a dvm mode entry */ + else if (!strncmp(label_name, ICE_DVM_PRE, strlen(ICE_DVM_PRE))) + ice_add_dvm_hint(hw, val, true); - /* Make sure this label matches our PF. Note that the PF - * character ('0' - '7') will be located where our - * prefix string's null terminator is located. - */ - if ((label_name[len] - '0') == hw->pf_id) { - hw->tnl.tbl[hw->tnl.count].type = tnls[i].type; - hw->tnl.tbl[hw->tnl.count].valid = false; - hw->tnl.tbl[hw->tnl.count].in_use = false; - hw->tnl.tbl[hw->tnl.count].marked = false; - hw->tnl.tbl[hw->tnl.count].boost_addr = val; - hw->tnl.tbl[hw->tnl.count].port = 0; - hw->tnl.count++; - break; - } - } + /* check for a svm mode entry */ + else if (!strncmp(label_name, ICE_SVM_PRE, strlen(ICE_SVM_PRE))) + ice_add_dvm_hint(hw, val, false); label_name = ice_enum_labels(NULL, 0, &state, &val); } - /* Cache the appropriate boost TCAM entry pointers */ + /* Cache the appropriate boost TCAM entry pointers for tunnels */ for (i = 0; i < hw->tnl.count; i++) { ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr, &hw->tnl.tbl[i].boost_entry); if (hw->tnl.tbl[i].boost_entry) hw->tnl.tbl[i].valid = true; } + + /* Cache the appropriate boost TCAM entry pointers for DVM and SVM */ + for (i = 0; i < hw->dvm_upd.count; i++) + ice_find_boost_entry(ice_seg, hw->dvm_upd.tbl[i].boost_addr, + &hw->dvm_upd.tbl[i].boost_entry); } /* Key creation */ @@ -648,7 +779,7 @@ static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max) * This function generates a key from a value, a don't care mask and a never * match mask. * upd, dc, and nm are optional parameters, and can be NULL: - * upd == NULL --> udp mask is all 1's (update all bits) + * upd == NULL --> upd mask is all 1's (update all bits) * dc == NULL --> dc mask is all 0's (no don't care bits) * nm == NULL --> nm mask is all 0's (no never match bits) */ @@ -712,8 +843,7 @@ ice_acquire_global_cfg_lock(struct ice_hw *hw, ICE_GLOBAL_CFG_LOCK_TIMEOUT); if (status == ICE_ERR_AQ_NO_WORK) - ice_debug(hw, ICE_DBG_PKG, - "Global config lock: No work to do\n"); + ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n"); return status; } @@ -808,6 +938,28 @@ ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, return status; } +/** + * ice_aq_upload_section + * @hw: pointer to the hardware structure + * @pkg_buf: the package buffer which will receive the section + * @buf_size: the size of the package buffer + * @cd: pointer to command details structure or NULL + * + * Upload Section (0x0C41) + */ +enum ice_status +ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, + u16 buf_size, struct ice_sq_cd *cd) +{ + struct ice_aq_desc desc; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section); + desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); + + return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); +} + /** * ice_aq_update_pkg * @hw: pointer to the hardware structure @@ -895,38 +1047,54 @@ ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, } /** - * ice_update_pkg + * ice_update_pkg_no_lock * @hw: pointer to the hardware structure * @bufs: pointer to an array of buffers * @count: the number of buffers in the array - * - * Obtains change lock and updates package. */ -enum ice_status -ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count) +static enum ice_status +ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count) { - enum ice_status status; - u32 offset, info, i; - - status = ice_acquire_change_lock(hw, ICE_RES_WRITE); - if (status) - return status; + enum ice_status status = ICE_SUCCESS; + u32 i; for (i = 0; i < count; i++) { struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i); bool last = ((i + 1) == count); + u32 offset, info; status = ice_aq_update_pkg(hw, bh, LE16_TO_CPU(bh->data_end), last, &offset, &info, NULL); if (status) { - ice_debug(hw, ICE_DBG_PKG, - "Update pkg failed: err %d off %d inf %d\n", + ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n", status, offset, info); break; } } + return status; +} + +/** + * ice_update_pkg + * @hw: pointer to the hardware structure + * @bufs: pointer to an array of buffers + * @count: the number of buffers in the array + * + * Obtains change lock and updates package. + */ +enum ice_status +ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count) +{ + enum ice_status status; + + status = ice_acquire_change_lock(hw, ICE_RES_WRITE); + if (status) + return status; + + status = ice_update_pkg_no_lock(hw, bufs, count); + ice_release_change_lock(hw); return status; @@ -999,8 +1167,7 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) /* Save AQ status from download package */ hw->pkg_dwnld_status = hw->adminq.sq_last_status; if (status) { - ice_debug(hw, ICE_DBG_PKG, - "Pkg download failed: err %d off %d inf %d\n", + ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n", status, offset, info); break; } @@ -1009,6 +1176,13 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) break; } + if (!status) { + status = ice_set_vlan_mode(hw); + if (status) + ice_debug(hw, ICE_DBG_PKG, "Failed to set VLAN mode: err %d\n", + status); + } + ice_release_global_cfg_lock(hw); return status; @@ -1047,6 +1221,7 @@ static enum ice_status ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg) { struct ice_buf_table *ice_buf_tbl; + enum ice_status status; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n", @@ -1064,8 +1239,12 @@ ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg) ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n", LE32_TO_CPU(ice_buf_tbl->buf_count)); - return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array, - LE32_TO_CPU(ice_buf_tbl->buf_count)); + status = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array, + LE32_TO_CPU(ice_buf_tbl->buf_count)); + + ice_post_pkg_dwnld_vlan_mode_cfg(hw); + + return status; } /** @@ -1078,35 +1257,45 @@ ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg) static enum ice_status ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) { - struct ice_global_metadata_seg *meta_seg; struct ice_generic_seg_hdr *seg_hdr; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); if (!pkg_hdr) return ICE_ERR_PARAM; - meta_seg = (struct ice_global_metadata_seg *) - ice_find_seg_in_pkg(hw, SEGMENT_TYPE_METADATA, pkg_hdr); - if (meta_seg) { - hw->pkg_ver = meta_seg->pkg_ver; - ice_memcpy(hw->pkg_name, meta_seg->pkg_name, - sizeof(hw->pkg_name), ICE_NONDMA_TO_NONDMA); + hw->pkg_seg_id = SEGMENT_TYPE_ICE_E810; - ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n", - meta_seg->pkg_ver.major, meta_seg->pkg_ver.minor, - meta_seg->pkg_ver.update, meta_seg->pkg_ver.draft, - meta_seg->pkg_name); - } else { - ice_debug(hw, ICE_DBG_INIT, - "Did not find metadata segment in driver package\n"); - return ICE_ERR_CFG; - } + ice_debug(hw, ICE_DBG_INIT, "Pkg using segment id: 0x%08X\n", + hw->pkg_seg_id); - seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr); + seg_hdr = (struct ice_generic_seg_hdr *) + ice_find_seg_in_pkg(hw, hw->pkg_seg_id, pkg_hdr); if (seg_hdr) { - hw->ice_pkg_ver = seg_hdr->seg_format_ver; - ice_memcpy(hw->ice_pkg_name, seg_hdr->seg_id, - sizeof(hw->ice_pkg_name), ICE_NONDMA_TO_NONDMA); + struct ice_meta_sect *meta; + struct ice_pkg_enum state; + + ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); + + /* Get package information from the Metadata Section */ + meta = (struct ice_meta_sect *) + ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state, + ICE_SID_METADATA); + if (!meta) { + ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n"); + return ICE_ERR_CFG; + } + + hw->pkg_ver = meta->ver; + ice_memcpy(hw->pkg_name, meta->name, sizeof(meta->name), + ICE_NONDMA_TO_NONDMA); + + ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n", + meta->ver.major, meta->ver.minor, meta->ver.update, + meta->ver.draft, meta->name); + + hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver; + ice_memcpy(hw->ice_seg_id, seg_hdr->seg_id, + sizeof(hw->ice_seg_id), ICE_NONDMA_TO_NONDMA); ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n", seg_hdr->seg_format_ver.major, @@ -1115,8 +1304,7 @@ ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) seg_hdr->seg_format_ver.draft, seg_hdr->seg_id); } else { - ice_debug(hw, ICE_DBG_INIT, - "Did not find ice segment in driver package\n"); + ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n"); return ICE_ERR_CFG; } @@ -1138,7 +1326,7 @@ static enum ice_status ice_get_pkg_info(struct ice_hw *hw) ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); - size = ice_struct_size(pkg_info, pkg_info, ICE_PKG_CNT - 1); + size = ice_struct_size(pkg_info, pkg_info, ICE_PKG_CNT); pkg_info = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size); if (!pkg_info) return ICE_ERR_NO_MEMORY; @@ -1197,7 +1385,7 @@ static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) u32 seg_count; u32 i; - if (len < sizeof(*pkg)) + if (len < ice_struct_size(pkg, seg_offset, 1)) return ICE_ERR_BUF_TOO_SHORT; if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ || @@ -1212,7 +1400,7 @@ static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) return ICE_ERR_CFG; /* make sure segment array fits in package length */ - if (len < ice_struct_size(pkg, seg_offset, seg_count - 1)) + if (len < ice_struct_size(pkg, seg_offset, seg_count)) return ICE_ERR_BUF_TOO_SHORT; /* all segments must fit within length */ @@ -1313,7 +1501,7 @@ ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg, } /* find ICE segment in given package */ - *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, + *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, hw->pkg_seg_id, ospkg); if (!*seg) { ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n"); @@ -1321,7 +1509,7 @@ ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg, } /* Check if FW is compatible with the OS package */ - size = ice_struct_size(pkg, pkg_info, ICE_PKG_CNT - 1); + size = ice_struct_size(pkg, pkg_info, ICE_PKG_CNT); pkg = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size); if (!pkg) return ICE_ERR_NO_MEMORY; @@ -1339,8 +1527,7 @@ ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg, (*seg)->hdr.seg_format_ver.minor > pkg->pkg_info[i].ver.minor) { status = ICE_ERR_FW_DDP_MISMATCH; - ice_debug(hw, ICE_DBG_INIT, - "OS package is not compatible with NVM.\n"); + ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n"); } /* done processing NVM package so break */ break; @@ -1350,6 +1537,88 @@ fw_ddp_compat_free_alloc: return status; } +/** + * ice_sw_fv_handler + * @sect_type: section type + * @section: pointer to section + * @index: index of the field vector entry to be returned + * @offset: ptr to variable that receives the offset in the field vector table + * + * This is a callback function that can be passed to ice_pkg_enum_entry. + * This function treats the given section as of type ice_sw_fv_section and + * enumerates offset field. "offset" is an index into the field vector table. + */ +static void * +ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset) +{ + struct ice_sw_fv_section *fv_section = + (struct ice_sw_fv_section *)section; + + if (!section || sect_type != ICE_SID_FLD_VEC_SW) + return NULL; + if (index >= LE16_TO_CPU(fv_section->count)) + return NULL; + if (offset) + /* "index" passed in to this function is relative to a given + * 4k block. To get to the true index into the field vector + * table need to add the relative index to the base_offset + * field of this section + */ + *offset = LE16_TO_CPU(fv_section->base_offset) + index; + return fv_section->fv + index; +} + +/** + * ice_get_prof_index_max - get the max profile index for used profile + * @hw: pointer to the HW struct + * + * Calling this function will get the max profile index for used profile + * and store the index number in struct ice_switch_info *switch_info + * in hw for following use. + */ +static int ice_get_prof_index_max(struct ice_hw *hw) +{ + u16 prof_index = 0, j, max_prof_index = 0; + struct ice_pkg_enum state; + struct ice_seg *ice_seg; + bool flag = false; + struct ice_fv *fv; + u32 offset; + + ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); + + if (!hw->seg) + return ICE_ERR_PARAM; + + ice_seg = hw->seg; + + do { + fv = (struct ice_fv *) + ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, + &offset, ice_sw_fv_handler); + if (!fv) + break; + ice_seg = NULL; + + /* in the profile that not be used, the prot_id is set to 0xff + * and the off is set to 0x1ff for all the field vectors. + */ + for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) + if (fv->ew[j].prot_id != ICE_PROT_INVALID || + fv->ew[j].off != ICE_FV_OFFSET_INVAL) + flag = true; + if (flag && prof_index > max_prof_index) + max_prof_index = prof_index; + + prof_index++; + flag = false; + } while (fv); + + hw->switch_info->max_used_prof_index = max_prof_index; + + return ICE_SUCCESS; +} + /** * ice_init_pkg - initialize/download package * @hw: pointer to the hardware structure @@ -1408,8 +1677,7 @@ enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) ice_init_pkg_hints(hw, seg); status = ice_download_pkg(hw, seg); if (status == ICE_ERR_AQ_NO_WORK) { - ice_debug(hw, ICE_DBG_INIT, - "package previously loaded - no work.\n"); + ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n"); status = ICE_SUCCESS; } @@ -1430,6 +1698,8 @@ enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) */ ice_init_pkg_regs(hw); ice_fill_blk_tbls(hw); + ice_fill_hw_ptype(hw); + ice_get_prof_index_max(hw); } else { ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n", status); @@ -1503,39 +1773,7 @@ static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw) buf = (struct ice_buf_hdr *)bld; buf->data_end = CPU_TO_LE16(offsetof(struct ice_buf_hdr, section_entry)); - return bld; -} - -/** - * ice_sw_fv_handler - * @sect_type: section type - * @section: pointer to section - * @index: index of the field vector entry to be returned - * @offset: ptr to variable that receives the offset in the field vector table - * - * This is a callback function that can be passed to ice_pkg_enum_entry. - * This function treats the given section as of type ice_sw_fv_section and - * enumerates offset field. "offset" is an index into the field vector - * vector table. - */ -static void * -ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset) -{ - struct ice_sw_fv_section *fv_section = - (struct ice_sw_fv_section *)section; - - if (!section || sect_type != ICE_SID_FLD_VEC_SW) - return NULL; - if (index >= LE16_TO_CPU(fv_section->count)) - return NULL; - if (offset) - /* "index" passed in to this function is relative to a given - * 4k block. To get to the true index into the field vector - * table need to add the relative index to the base_offset - * field of this section - */ - *offset = LE16_TO_CPU(fv_section->base_offset) + index; - return fv_section->fv + index; + return bld; } /** @@ -1580,18 +1818,13 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs, struct ice_seg *ice_seg; struct ice_fv *fv; - ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); - if (req_profs == ICE_PROF_ALL) { - u16 i; - - for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) - ice_set_bit(i, bm); + ice_bitmap_set(bm, 0, ICE_MAX_NUM_PROFILES); return; } + ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES); - ice_seg = hw->seg; do { enum ice_prof_type prof_type; @@ -1748,7 +1981,7 @@ void ice_init_prof_result_bm(struct ice_hw *hw) * * Frees a package buffer */ -static void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld) +void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld) { ice_free(hw, bld); } @@ -1788,7 +2021,7 @@ ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count) bld->reserved_section_table_entries += count; data_end = LE16_TO_CPU(buf->data_end) + - (count * sizeof(buf->section_entry[0])); + FLEX_ARRAY_SIZE(buf, section_entry, count); buf->data_end = CPU_TO_LE16(data_end); return ICE_SUCCESS; @@ -1847,6 +2080,43 @@ ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size) return NULL; } +/** + * ice_pkg_buf_alloc_single_section + * @hw: pointer to the HW structure + * @type: the section type value + * @size: the size of the section to reserve (in bytes) + * @section: returns pointer to the section + * + * Allocates a package buffer with a single section. + * Note: all package contents must be in Little Endian form. + */ +struct ice_buf_build * +ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size, + void **section) +{ + struct ice_buf_build *buf; + + if (!section) + return NULL; + + buf = ice_pkg_buf_alloc(hw); + if (!buf) + return NULL; + + if (ice_pkg_buf_reserve_section(buf, 1)) + goto ice_pkg_buf_alloc_single_section_err; + + *section = ice_pkg_buf_alloc_section(buf, type, size); + if (!*section) + goto ice_pkg_buf_alloc_single_section_err; + + return buf; + +ice_pkg_buf_alloc_single_section_err: + ice_pkg_buf_free(hw, buf); + return NULL; +} + /** * ice_pkg_buf_get_active_sections * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) @@ -1874,7 +2144,7 @@ static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld) * * Return a pointer to the buffer's header */ -static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld) +struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld) { if (!bld) return NULL; @@ -1883,7 +2153,7 @@ static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld) } /** - * ice_tunnel_port_in_use + * ice_tunnel_port_in_use_hlpr - helper function to determine tunnel usage * @hw: pointer to the HW structure * @port: port to search for * @index: optionally returns index @@ -1891,7 +2161,7 @@ static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld) * Returns whether a port is already in use as a tunnel, and optionally its * index */ -bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index) +static bool ice_tunnel_port_in_use_hlpr(struct ice_hw *hw, u16 port, u16 *index) { u16 i; @@ -1905,6 +2175,26 @@ bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index) return false; } +/** + * ice_tunnel_port_in_use + * @hw: pointer to the HW structure + * @port: port to search for + * @index: optionally returns index + * + * Returns whether a port is already in use as a tunnel, and optionally its + * index + */ +bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index) +{ + bool res; + + ice_acquire_lock(&hw->tnl_lock); + res = ice_tunnel_port_in_use_hlpr(hw, port, index); + ice_release_lock(&hw->tnl_lock); + + return res; +} + /** * ice_tunnel_get_type * @hw: pointer to the HW structure @@ -1916,15 +2206,21 @@ bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index) bool ice_tunnel_get_type(struct ice_hw *hw, u16 port, enum ice_tunnel_type *type) { + bool res = false; u16 i; + ice_acquire_lock(&hw->tnl_lock); + for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) { *type = hw->tnl.tbl[i].type; - return true; + res = true; + break; } - return false; + ice_release_lock(&hw->tnl_lock); + + return res; } /** @@ -1953,7 +2249,7 @@ ice_find_free_tunnel_entry(struct ice_hw *hw, enum ice_tunnel_type type, } /** - * ice_get_tunnel_port - retrieve an open tunnel port + * ice_get_open_tunnel_port - retrieve an open tunnel port * @hw: pointer to the HW structure * @type: tunnel type (TNL_ALL will return any open port) * @port: returns open port @@ -1962,16 +2258,109 @@ bool ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type, u16 *port) { + bool res = false; u16 i; + ice_acquire_lock(&hw->tnl_lock); + for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use && (type == TNL_ALL || hw->tnl.tbl[i].type == type)) { *port = hw->tnl.tbl[i].port; - return true; + res = true; + break; } - return false; + ice_release_lock(&hw->tnl_lock); + + return res; +} + +/** + * ice_upd_dvm_boost_entry + * @hw: pointer to the HW structure + * @entry: pointer to double vlan boost entry info + */ +static enum ice_status +ice_upd_dvm_boost_entry(struct ice_hw *hw, struct ice_dvm_entry *entry) +{ + struct ice_boost_tcam_section *sect_rx, *sect_tx; + enum ice_status status = ICE_ERR_MAX_LIMIT; + struct ice_buf_build *bld; + u8 val, dc, nm; + + bld = ice_pkg_buf_alloc(hw); + if (!bld) + return ICE_ERR_NO_MEMORY; + + /* allocate 2 sections, one for Rx parser, one for Tx parser */ + if (ice_pkg_buf_reserve_section(bld, 2)) + goto ice_upd_dvm_boost_entry_err; + + sect_rx = (struct ice_boost_tcam_section *) + ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM, + ice_struct_size(sect_rx, tcam, 1)); + if (!sect_rx) + goto ice_upd_dvm_boost_entry_err; + sect_rx->count = CPU_TO_LE16(1); + + sect_tx = (struct ice_boost_tcam_section *) + ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM, + ice_struct_size(sect_tx, tcam, 1)); + if (!sect_tx) + goto ice_upd_dvm_boost_entry_err; + sect_tx->count = CPU_TO_LE16(1); + + /* copy original boost entry to update package buffer */ + ice_memcpy(sect_rx->tcam, entry->boost_entry, sizeof(*sect_rx->tcam), + ICE_NONDMA_TO_NONDMA); + + /* re-write the don't care and never match bits accordingly */ + if (entry->enable) { + /* all bits are don't care */ + val = 0x00; + dc = 0xFF; + nm = 0x00; + } else { + /* disable, one never match bit, the rest are don't care */ + val = 0x00; + dc = 0xF7; + nm = 0x08; + } + + ice_set_key((u8 *)§_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key), + &val, NULL, &dc, &nm, 0, sizeof(u8)); + + /* exact copy of entry to Tx section entry */ + ice_memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam), + ICE_NONDMA_TO_NONDMA); + + status = ice_update_pkg_no_lock(hw, ice_pkg_buf(bld), 1); + +ice_upd_dvm_boost_entry_err: + ice_pkg_buf_free(hw, bld); + + return status; +} + +/** + * ice_set_dvm_boost_entries + * @hw: pointer to the HW structure + * + * Enable double vlan by updating the appropriate boost tcam entries. + */ +enum ice_status ice_set_dvm_boost_entries(struct ice_hw *hw) +{ + enum ice_status status; + u16 i; + + for (i = 0; i < hw->dvm_upd.count; i++) { + status = ice_upd_dvm_boost_entry(hw, &hw->dvm_upd.tbl[i]); + if (status) + return status; + } + + return ICE_SUCCESS; } /** @@ -1992,15 +2381,24 @@ ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port) struct ice_buf_build *bld; u16 index; - if (ice_tunnel_port_in_use(hw, port, NULL)) - return ICE_ERR_ALREADY_EXISTS; + ice_acquire_lock(&hw->tnl_lock); - if (!ice_find_free_tunnel_entry(hw, type, &index)) - return ICE_ERR_OUT_OF_RANGE; + if (ice_tunnel_port_in_use_hlpr(hw, port, &index)) { + hw->tnl.tbl[index].ref++; + status = ICE_SUCCESS; + goto ice_create_tunnel_end; + } + + if (!ice_find_free_tunnel_entry(hw, type, &index)) { + status = ICE_ERR_OUT_OF_RANGE; + goto ice_create_tunnel_end; + } bld = ice_pkg_buf_alloc(hw); - if (!bld) - return ICE_ERR_NO_MEMORY; + if (!bld) { + status = ICE_ERR_NO_MEMORY; + goto ice_create_tunnel_end; + } /* allocate 2 sections, one for Rx parser, one for Tx parser */ if (ice_pkg_buf_reserve_section(bld, 2)) @@ -2008,14 +2406,14 @@ ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port) sect_rx = (struct ice_boost_tcam_section *) ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM, - sizeof(*sect_rx)); + ice_struct_size(sect_rx, tcam, 1)); if (!sect_rx) goto ice_create_tunnel_err; sect_rx->count = CPU_TO_LE16(1); sect_tx = (struct ice_boost_tcam_section *) ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM, - sizeof(*sect_tx)); + ice_struct_size(sect_tx, tcam, 1)); if (!sect_tx) goto ice_create_tunnel_err; sect_tx->count = CPU_TO_LE16(1); @@ -2040,11 +2438,15 @@ ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port) if (!status) { hw->tnl.tbl[index].port = port; hw->tnl.tbl[index].in_use = true; + hw->tnl.tbl[index].ref = 1; } ice_create_tunnel_err: ice_pkg_buf_free(hw, bld); +ice_create_tunnel_end: + ice_release_lock(&hw->tnl_lock); + return status; } @@ -2064,8 +2466,18 @@ enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all) enum ice_status status = ICE_ERR_MAX_LIMIT; struct ice_buf_build *bld; u16 count = 0; + u16 index; u16 size; - u16 i; + u16 i, j; + + ice_acquire_lock(&hw->tnl_lock); + + if (!all && ice_tunnel_port_in_use_hlpr(hw, port, &index)) + if (hw->tnl.tbl[index].ref > 1) { + hw->tnl.tbl[index].ref--; + status = ICE_SUCCESS; + goto ice_destroy_tunnel_end; + } /* determine count */ for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) @@ -2073,15 +2485,19 @@ enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all) (all || hw->tnl.tbl[i].port == port)) count++; - if (!count) - return ICE_ERR_PARAM; + if (!count) { + status = ICE_ERR_PARAM; + goto ice_destroy_tunnel_end; + } /* size of section - there is at least one entry */ - size = ice_struct_size(sect_rx, tcam, count - 1); + size = ice_struct_size(sect_rx, tcam, count); bld = ice_pkg_buf_alloc(hw); - if (!bld) - return ICE_ERR_NO_MEMORY; + if (!bld) { + status = ICE_ERR_NO_MEMORY; + goto ice_destroy_tunnel_end; + } /* allocate 2 sections, one for Rx parser, one for Tx parser */ if (ice_pkg_buf_reserve_section(bld, 2)) @@ -2092,30 +2508,31 @@ enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all) size); if (!sect_rx) goto ice_destroy_tunnel_err; - sect_rx->count = CPU_TO_LE16(1); + sect_rx->count = CPU_TO_LE16(count); sect_tx = (struct ice_boost_tcam_section *) ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM, size); if (!sect_tx) goto ice_destroy_tunnel_err; - sect_tx->count = CPU_TO_LE16(1); + sect_tx->count = CPU_TO_LE16(count); /* copy original boost entry to update package buffer, one copy to Rx * section, another copy to the Tx section */ - for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) + for (i = 0, j = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use && (all || hw->tnl.tbl[i].port == port)) { - ice_memcpy(sect_rx->tcam + i, + ice_memcpy(sect_rx->tcam + j, hw->tnl.tbl[i].boost_entry, sizeof(*sect_rx->tcam), ICE_NONDMA_TO_NONDMA); - ice_memcpy(sect_tx->tcam + i, + ice_memcpy(sect_tx->tcam + j, hw->tnl.tbl[i].boost_entry, sizeof(*sect_tx->tcam), ICE_NONDMA_TO_NONDMA); hw->tnl.tbl[i].marked = true; + j++; } status = ice_update_pkg(hw, ice_pkg_buf(bld), 1); @@ -2123,6 +2540,7 @@ enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all) for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) if (hw->tnl.tbl[i].marked) { + hw->tnl.tbl[i].ref = 0; hw->tnl.tbl[i].port = 0; hw->tnl.tbl[i].in_use = false; hw->tnl.tbl[i].marked = false; @@ -2131,6 +2549,9 @@ enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all) ice_destroy_tunnel_err: ice_pkg_buf_free(hw, bld); +ice_destroy_tunnel_end: + ice_release_lock(&hw->tnl_lock); + return status; } @@ -2365,12 +2786,10 @@ ice_match_prop_lst(struct LIST_HEAD_TYPE *list1, struct LIST_HEAD_TYPE *list2) u16 count = 0; /* compare counts */ - LIST_FOR_EACH_ENTRY(tmp1, list1, ice_vsig_prof, list) { + LIST_FOR_EACH_ENTRY(tmp1, list1, ice_vsig_prof, list) count++; - } - LIST_FOR_EACH_ENTRY(tmp2, list2, ice_vsig_prof, list) { + LIST_FOR_EACH_ENTRY(tmp2, list2, ice_vsig_prof, list) chk_count++; - } if (!count || count != chk_count) return false; @@ -2479,13 +2898,12 @@ ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk, struct ice_xlt2 *xlt2 = &hw->blk[blk].xlt2; u16 i; - for (i = 0; i < xlt2->count; i++) { + for (i = 0; i < xlt2->count; i++) if (xlt2->vsig_tbl[i].in_use && ice_match_prop_lst(chs, &xlt2->vsig_tbl[i].prop_lst)) { *vsig = ICE_VSIG_VALUE(i, hw->pf_id); return ICE_SUCCESS; } - } return ICE_ERR_DOES_NOT_EXIST; } @@ -2675,7 +3093,7 @@ ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) * @blk: HW block * @prof: profile to check * @idx: profile index to check - * @masks: masks to match + * @mask: mask to match */ static bool ice_prof_has_mask_idx(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 idx, @@ -2748,6 +3166,12 @@ ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk, struct ice_es *es = &hw->blk[blk].es; u8 i; + /* For FD and RSS, we don't want to re-use an existed profile with the + * same field vector and mask. This will cause rule interference. + */ + if (blk == ICE_BLK_FD || blk == ICE_BLK_RSS) + return ICE_ERR_DOES_NOT_EXIST; + for (i = 0; i < (u8)es->count; i++) { u16 off = i * es->fvw; @@ -2827,20 +3251,22 @@ static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type) * ice_alloc_tcam_ent - allocate hardware TCAM entry * @hw: pointer to the HW struct * @blk: the block to allocate the TCAM for + * @btm: true to allocate from bottom of table, false to allocate from top * @tcam_idx: pointer to variable to receive the TCAM entry * * This function allocates a new entry in a Profile ID TCAM for a specific * block. */ static enum ice_status -ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 *tcam_idx) +ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm, + u16 *tcam_idx) { u16 res_type; if (!ice_tcam_ent_rsrc_type(blk, &res_type)) return ICE_ERR_PARAM; - return ice_alloc_hw_res(hw, res_type, 1, true, tcam_idx); + return ice_alloc_hw_res(hw, res_type, 1, btm, tcam_idx); } /** @@ -3011,7 +3437,7 @@ static void ice_init_prof_masks(struct ice_hw *hw, enum ice_block blk) per_pf = ICE_PROF_MASK_COUNT / hw->dev_caps.num_funcs; hw->blk[blk].masks.count = per_pf; - hw->blk[blk].masks.first = hw->pf_id * per_pf; + hw->blk[blk].masks.first = hw->logical_pf_id * per_pf; ice_memset(hw->blk[blk].masks.masks, 0, sizeof(hw->blk[blk].masks.masks), ICE_NONDMA_MEM); @@ -3195,7 +3621,6 @@ static void ice_shutdown_prof_masks(struct ice_hw *hw, enum ice_block blk) /** * ice_shutdown_all_prof_masks - releases all locks for masking * @hw: pointer to the HW struct - * @blk: hardware block * * This should be called before unloading the driver */ @@ -3210,12 +3635,11 @@ void ice_shutdown_all_prof_masks(struct ice_hw *hw) * @hw: pointer to the HW struct * @blk: hardware block * @prof_id: profile ID - * @es: field vector * @masks: masks */ static enum ice_status ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id, - struct ice_fv_word *es, u16 *masks) + u16 *masks) { bool err = false; u32 ena_mask = 0; @@ -3600,6 +4024,8 @@ static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx) LIST_DEL(&p->l_entry); if (p->acts) ice_free(hw, p->acts); + + ice_destroy_lock(&p->entries_lock); ice_free(hw, p); } ice_release_lock(&hw->fl_profs_locks[blk_idx]); @@ -3728,7 +4154,7 @@ void ice_clear_hw_tbls(struct ice_hw *hw) prof_redir->count * sizeof(*prof_redir->t), ICE_NONDMA_MEM); - ice_memset(es->t, 0, es->count * sizeof(*es->t), + ice_memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw, ICE_NONDMA_MEM); ice_memset(es->ref_count, 0, es->count * sizeof(*es->ref_count), ICE_NONDMA_MEM); @@ -3840,11 +4266,19 @@ enum ice_status ice_init_hw_tbls(struct ice_hw *hw) es->ref_count = (u16 *) ice_calloc(hw, es->count, sizeof(*es->ref_count)); + if (!es->ref_count) + goto err; + es->written = (u8 *) ice_calloc(hw, es->count, sizeof(*es->written)); + + if (!es->written) + goto err; + es->mask_ena = (u32 *) ice_calloc(hw, es->count, sizeof(*es->mask_ena)); - if (!es->ref_count) + + if (!es->mask_ena) goto err; } return ICE_SUCCESS; @@ -3917,7 +4351,7 @@ ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig, * @prof_id: profile ID * @ptg: packet type group (PTG) portion of key * @vsig: VSIG portion of key - * @cdid: CDID: portion of key + * @cdid: CDID portion of key * @flags: flag portion of key * @vl_msk: valid mask * @dc_msk: don't care mask @@ -3984,13 +4418,11 @@ ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl) struct ice_vsig_prof *ent; LIST_FOR_EACH_ENTRY(ent, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, - ice_vsig_prof, list) { + ice_vsig_prof, list) if (ent->profile_cookie == hdl) return true; - } - ice_debug(hw, ICE_DBG_INIT, - "Characteristic list for VSI group %d not found.\n", + ice_debug(hw, ICE_DBG_INIT, "Characteristic list for VSI group %d not found.\n", vsig); return false; } @@ -4009,7 +4441,7 @@ ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk, u16 vec_size = hw->blk[blk].es.fvw * sizeof(struct ice_fv_word); struct ice_chs_chg *tmp; - LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) { + LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) if (tmp->type == ICE_PTG_ES_ADD && tmp->add_prof) { u16 off = tmp->prof_id * hw->blk[blk].es.fvw; struct ice_pkg_es *p; @@ -4017,7 +4449,9 @@ ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk, id = ice_sect_id(blk, ICE_VEC_TBL); p = (struct ice_pkg_es *) - ice_pkg_buf_alloc_section(bld, id, sizeof(*p) + + ice_pkg_buf_alloc_section(bld, id, + ice_struct_size(p, es, + 1) + vec_size - sizeof(p->es[0])); @@ -4030,7 +4464,6 @@ ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk, ice_memcpy(p->es, &hw->blk[blk].es.t[off], vec_size, ICE_NONDMA_TO_NONDMA); } - } return ICE_SUCCESS; } @@ -4048,14 +4481,17 @@ ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk, { struct ice_chs_chg *tmp; - LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) { + LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) if (tmp->type == ICE_TCAM_ADD && tmp->add_tcam_idx) { struct ice_prof_id_section *p; u32 id; id = ice_sect_id(blk, ICE_PROF_TCAM); p = (struct ice_prof_id_section *) - ice_pkg_buf_alloc_section(bld, id, sizeof(*p)); + ice_pkg_buf_alloc_section(bld, id, + ice_struct_size(p, + entry, + 1)); if (!p) return ICE_ERR_MAX_LIMIT; @@ -4069,7 +4505,6 @@ ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk, sizeof(hw->blk[blk].prof.t->key), ICE_NONDMA_TO_NONDMA); } - } return ICE_SUCCESS; } @@ -4086,14 +4521,17 @@ ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld, { struct ice_chs_chg *tmp; - LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) { + LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) if (tmp->type == ICE_PTG_ES_ADD && tmp->add_ptg) { struct ice_xlt1_section *p; u32 id; id = ice_sect_id(blk, ICE_XLT1); p = (struct ice_xlt1_section *) - ice_pkg_buf_alloc_section(bld, id, sizeof(*p)); + ice_pkg_buf_alloc_section(bld, id, + ice_struct_size(p, + value, + 1)); if (!p) return ICE_ERR_MAX_LIMIT; @@ -4102,7 +4540,6 @@ ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld, p->offset = CPU_TO_LE16(tmp->ptype); p->value[0] = tmp->ptg; } - } return ICE_SUCCESS; } @@ -4129,7 +4566,10 @@ ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld, case ICE_VSIG_REM: id = ice_sect_id(blk, ICE_XLT2); p = (struct ice_xlt2_section *) - ice_pkg_buf_alloc_section(bld, id, sizeof(*p)); + ice_pkg_buf_alloc_section(bld, id, + ice_struct_size(p, + value, + 1)); if (!p) return ICE_ERR_MAX_LIMIT; @@ -4277,12 +4717,18 @@ static const struct ice_fd_src_dst_pair ice_fd_pairs[] = { { ICE_PROT_IPV4_IL, 2, 12 }, { ICE_PROT_IPV4_IL, 2, 16 }, + { ICE_PROT_IPV4_IL_IL, 2, 12 }, + { ICE_PROT_IPV4_IL_IL, 2, 16 }, + { ICE_PROT_IPV6_OF_OR_S, 8, 8 }, { ICE_PROT_IPV6_OF_OR_S, 8, 24 }, { ICE_PROT_IPV6_IL, 8, 8 }, { ICE_PROT_IPV6_IL, 8, 24 }, + { ICE_PROT_IPV6_IL_IL, 8, 8 }, + { ICE_PROT_IPV6_IL_IL, 8, 24 }, + { ICE_PROT_TCP_IL, 1, 0 }, { ICE_PROT_TCP_IL, 1, 2 }, @@ -4333,13 +4779,12 @@ ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es) ICE_PROT_INVALID) first_free = i - 1; - for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++) { + for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++) if (es[i].prot_id == ice_fd_pairs[j].prot_id && es[i].off == ice_fd_pairs[j].off) { ice_set_bit(j, pair_list); pair_start[j] = i; } - } } orig_free = first_free; @@ -4396,7 +4841,7 @@ ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es) } /* check for a swap location */ - for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++) { + for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++) if (es[si].prot_id == ice_fd_pairs[j].prot_id && es[si].off == ice_fd_pairs[j].off) { u8 idx; @@ -4412,7 +4857,6 @@ ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es) break; } - } si -= indexes_used; } @@ -4559,7 +5003,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], if (status) goto err_ice_add_prof; } - status = ice_update_prof_masking(hw, blk, prof_id, es, masks); + status = ice_update_prof_masking(hw, blk, prof_id, masks); if (status) goto err_ice_add_prof; @@ -4589,50 +5033,42 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], byte++; continue; } + /* Examine 8 bits per byte */ - for (bit = 0; bit < 8; bit++) { - if (ptypes[byte] & BIT(bit)) { - u16 ptype; - u8 ptg; - u8 m; + ice_for_each_set_bit(bit, (ice_bitmap_t *)&ptypes[byte], + BITS_PER_BYTE) { + u16 ptype; + u8 ptg; - ptype = byte * BITS_PER_BYTE + bit; + ptype = byte * BITS_PER_BYTE + bit; - /* The package should place all ptypes in a - * non-zero PTG, so the following call should - * never fail. - */ - if (ice_ptg_find_ptype(hw, blk, ptype, &ptg)) - continue; + /* The package should place all ptypes in a non-zero + * PTG, so the following call should never fail. + */ + if (ice_ptg_find_ptype(hw, blk, ptype, &ptg)) + continue; - /* If PTG is already added, skip and continue */ - if (ice_is_bit_set(ptgs_used, ptg)) - continue; + /* If PTG is already added, skip and continue */ + if (ice_is_bit_set(ptgs_used, ptg)) + continue; - ice_set_bit(ptg, ptgs_used); - /* Check to see there are any attributes for - * this ptype, and add them if found. + ice_set_bit(ptg, ptgs_used); + /* Check to see there are any attributes for this + * ptype, and add them if found. + */ + status = ice_add_prof_attrib(prof, ptg, ptype, attr, + attr_cnt); + if (status == ICE_ERR_MAX_LIMIT) + break; + if (status) { + /* This is simple a ptype/PTG with no + * attribute */ - status = ice_add_prof_attrib(prof, ptg, ptype, - attr, attr_cnt); - if (status == ICE_ERR_MAX_LIMIT) - break; - if (status) { - /* This is simple a ptype/PTG with no - * attribute - */ - prof->ptg[prof->ptg_cnt] = ptg; - prof->attr[prof->ptg_cnt].flags = 0; - prof->attr[prof->ptg_cnt].mask = 0; - - if (++prof->ptg_cnt >= - ICE_MAX_PTG_PER_PROFILE) - break; - } + prof->ptg[prof->ptg_cnt] = ptg; + prof->attr[prof->ptg_cnt].flags = 0; + prof->attr[prof->ptg_cnt].mask = 0; - /* nothing left in byte, then exit */ - m = ~(u8)((1 << (bit + 1)) - 1); - if (!(ptypes[byte] & m)) + if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE) break; } } @@ -4650,47 +5086,25 @@ err_ice_add_prof: } /** - * ice_search_prof_id_low - Search for a profile tracking ID low level + * ice_search_prof_id - Search for a profile tracking ID * @hw: pointer to the HW struct * @blk: hardware block * @id: profile tracking ID * - * This will search for a profile tracking ID which was previously added. This - * version assumes that the caller has already acquired the prof map lock. + * This will search for a profile tracking ID which was previously added. + * The profile map lock should be held before calling this function. */ -static struct ice_prof_map * -ice_search_prof_id_low(struct ice_hw *hw, enum ice_block blk, u64 id) +struct ice_prof_map * +ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id) { struct ice_prof_map *entry = NULL; struct ice_prof_map *map; - LIST_FOR_EACH_ENTRY(map, &hw->blk[blk].es.prof_map, ice_prof_map, - list) { + LIST_FOR_EACH_ENTRY(map, &hw->blk[blk].es.prof_map, ice_prof_map, list) if (map->profile_cookie == id) { entry = map; break; } - } - - return entry; -} - -/** - * ice_search_prof_id - Search for a profile tracking ID - * @hw: pointer to the HW struct - * @blk: hardware block - * @id: profile tracking ID - * - * This will search for a profile tracking ID which was previously added. - */ -struct ice_prof_map * -ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id) -{ - struct ice_prof_map *entry; - - ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); - entry = ice_search_prof_id_low(hw, blk, id); - ice_release_lock(&hw->blk[blk].es.prof_map_lock); return entry; } @@ -4708,9 +5122,8 @@ ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig) struct ice_vsig_prof *p; LIST_FOR_EACH_ENTRY(p, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, - ice_vsig_prof, list) { + ice_vsig_prof, list) count++; - } return count; } @@ -4755,7 +5168,7 @@ ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk, enum ice_status status; u16 i; - for (i = 0; i < prof->tcam_count; i++) { + for (i = 0; i < prof->tcam_count; i++) if (prof->tcam[i].in_use) { prof->tcam[i].in_use = false; status = ice_rel_tcam_idx(hw, blk, @@ -4763,7 +5176,6 @@ ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk, if (status) return ICE_ERR_HW_TABLE; } - } return ICE_SUCCESS; } @@ -4801,7 +5213,7 @@ ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, /* If the VSIG has at least 1 VSI then iterate through the list * and remove the VSIs before deleting the group. */ - if (vsi_cur) { + if (vsi_cur) do { struct ice_vsig_vsi *tmp = vsi_cur->next_vsi; struct ice_chs_chg *p; @@ -4819,7 +5231,6 @@ ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, vsi_cur = tmp; } while (vsi_cur); - } return ice_vsig_free(hw, blk, vsig); } @@ -4842,7 +5253,7 @@ ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, - ice_vsig_prof, list) { + ice_vsig_prof, list) if (p->profile_cookie == hdl) { if (ice_vsig_prof_id_count(hw, blk, vsig) == 1) /* this is the last profile, remove the VSIG */ @@ -4855,7 +5266,6 @@ ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, } return status; } - } return ICE_ERR_DOES_NOT_EXIST; } @@ -4876,7 +5286,7 @@ ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id) INIT_LIST_HEAD(&chg); - for (i = 1; i < ICE_MAX_VSIGS; i++) { + for (i = 1; i < ICE_MAX_VSIGS; i++) if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) { if (ice_has_prof_vsig(hw, blk, i, id)) { status = ice_rem_prof_id_vsig(hw, blk, i, id, @@ -4885,7 +5295,6 @@ ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id) goto err_ice_rem_flow_all; } } - } status = ice_upd_prof_hw(hw, blk, &chg); @@ -4915,7 +5324,7 @@ enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id) ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); - pmap = ice_search_prof_id_low(hw, blk, id); + pmap = ice_search_prof_id(hw, blk, id); if (!pmap) { status = ICE_ERR_DOES_NOT_EXIST; goto err_ice_rem_prof; @@ -4948,21 +5357,27 @@ static enum ice_status ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl, struct LIST_HEAD_TYPE *chg) { + enum ice_status status = ICE_SUCCESS; struct ice_prof_map *map; struct ice_chs_chg *p; u16 i; + ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); /* Get the details on the profile specified by the handle ID */ map = ice_search_prof_id(hw, blk, hdl); - if (!map) - return ICE_ERR_DOES_NOT_EXIST; + if (!map) { + status = ICE_ERR_DOES_NOT_EXIST; + goto err_ice_get_prof; + } - for (i = 0; i < map->ptg_cnt; i++) { + for (i = 0; i < map->ptg_cnt; i++) if (!hw->blk[blk].es.written[map->prof_id]) { /* add ES to change list */ p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p)); - if (!p) + if (!p) { + status = ICE_ERR_NO_MEMORY; goto err_ice_get_prof; + } p->type = ICE_PTG_ES_ADD; p->ptype = 0; @@ -4977,13 +5392,11 @@ ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl, LIST_ADD(&p->list_entry, chg); } - } - - return ICE_SUCCESS; err_ice_get_prof: + ice_release_lock(&hw->blk[blk].es.prof_map_lock); /* let caller clean up the change list */ - return ICE_ERR_NO_MEMORY; + return status; } /** @@ -5037,17 +5450,23 @@ static enum ice_status ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk, struct LIST_HEAD_TYPE *lst, u64 hdl) { + enum ice_status status = ICE_SUCCESS; struct ice_prof_map *map; struct ice_vsig_prof *p; u16 i; + ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); map = ice_search_prof_id(hw, blk, hdl); - if (!map) - return ICE_ERR_DOES_NOT_EXIST; + if (!map) { + status = ICE_ERR_DOES_NOT_EXIST; + goto err_ice_add_prof_to_lst; + } p = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*p)); - if (!p) - return ICE_ERR_NO_MEMORY; + if (!p) { + status = ICE_ERR_NO_MEMORY; + goto err_ice_add_prof_to_lst; + } p->profile_cookie = map->profile_cookie; p->prof_id = map->prof_id; @@ -5062,7 +5481,9 @@ ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk, LIST_ADD(&p->list, lst); - return ICE_SUCCESS; +err_ice_add_prof_to_lst: + ice_release_lock(&hw->blk[blk].es.prof_map_lock); + return status; } /** @@ -5129,12 +5550,11 @@ ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct LIST_HEAD_TYPE *chg) { struct ice_chs_chg *pos, *tmp; - LIST_FOR_EACH_ENTRY_SAFE(tmp, pos, chg, ice_chs_chg, list_entry) { + LIST_FOR_EACH_ENTRY_SAFE(tmp, pos, chg, ice_chs_chg, list_entry) if (tmp->type == ICE_TCAM_ADD && tmp->tcam_idx == idx) { LIST_DEL(&tmp->list_entry); ice_free(hw, tmp); } - } } /** @@ -5175,7 +5595,12 @@ ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable, } /* for re-enabling, reallocate a TCAM */ - status = ice_alloc_tcam_ent(hw, blk, &tcam->tcam_idx); + /* for entries with empty attribute masks, allocate entry from + * the bottom of the TCAM table; otherwise, allocate from the + * top of the table in order to give it higher priority + */ + status = ice_alloc_tcam_ent(hw, blk, tcam->attr.mask == 0, + &tcam->tcam_idx); if (status) return status; @@ -5315,8 +5740,7 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig, if (attr_used_cnt < ICE_MAX_PTG_ATTRS) attr_used[attr_used_cnt++] = &t->tcam[i]; else - ice_debug(hw, ICE_DBG_INIT, - "Warn: ICE_MAX_PTG_ATTRS exceeded\n"); + ice_debug(hw, ICE_DBG_INIT, "Warn: ICE_MAX_PTG_ATTRS exceeded\n"); } } @@ -5342,16 +5766,12 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 }; u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 }; + enum ice_status status = ICE_SUCCESS; struct ice_prof_map *map; struct ice_vsig_prof *t; struct ice_chs_chg *p; u16 vsig_idx, i; - /* Get the details on the profile specified by the handle ID */ - map = ice_search_prof_id(hw, blk, hdl); - if (!map) - return ICE_ERR_DOES_NOT_EXIST; - /* Error, if this VSIG already has this profile */ if (ice_has_prof_vsig(hw, blk, vsig, hdl)) return ICE_ERR_ALREADY_EXISTS; @@ -5361,22 +5781,36 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, if (!t) return ICE_ERR_NO_MEMORY; + ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); + /* Get the details on the profile specified by the handle ID */ + map = ice_search_prof_id(hw, blk, hdl); + if (!map) { + status = ICE_ERR_DOES_NOT_EXIST; + goto err_ice_add_prof_id_vsig; + } + t->profile_cookie = map->profile_cookie; t->prof_id = map->prof_id; t->tcam_count = map->ptg_cnt; /* create TCAM entries */ for (i = 0; i < map->ptg_cnt; i++) { - enum ice_status status; u16 tcam_idx; /* add TCAM to change list */ p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p)); - if (!p) + if (!p) { + status = ICE_ERR_NO_MEMORY; goto err_ice_add_prof_id_vsig; + } /* allocate the TCAM entry index */ - status = ice_alloc_tcam_ent(hw, blk, &tcam_idx); + /* for entries with empty attribute masks, allocate entry from + * the bottom of the TCAM table; otherwise, allocate from the + * top of the table in order to give it higher priority + */ + status = ice_alloc_tcam_ent(hw, blk, map->attr[i].mask == 0, + &tcam_idx); if (status) { ice_free(hw, p); goto err_ice_add_prof_id_vsig; @@ -5422,12 +5856,14 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, LIST_ADD(&t->list, &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst); - return ICE_SUCCESS; + ice_release_lock(&hw->blk[blk].es.prof_map_lock); + return status; err_ice_add_prof_id_vsig: + ice_release_lock(&hw->blk[blk].es.prof_map_lock); /* let caller clean up the change list */ ice_free(hw, t); - return ICE_ERR_NO_MEMORY; + return status; } /** @@ -5747,13 +6183,12 @@ ice_rem_prof_from_list(struct ice_hw *hw, struct LIST_HEAD_TYPE *lst, u64 hdl) { struct ice_vsig_prof *ent, *tmp; - LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, lst, ice_vsig_prof, list) { + LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, lst, ice_vsig_prof, list) if (ent->profile_cookie == hdl) { LIST_DEL(&ent->list); ice_free(hw, ent); return ICE_SUCCESS; } - } return ICE_ERR_DOES_NOT_EXIST; } @@ -5804,7 +6239,7 @@ ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl) if (last_profile) { /* If there are no profiles left for this VSIG, - * then simply remove the the VSIG. + * then simply remove the VSIG. */ status = ice_rem_vsig(hw, blk, vsig, &chg); if (status)