entry_tcam = ICE_ACL_TBL_TCAM_IDX(scen->start);
idx = ICE_ACL_TBL_TCAM_ENTRY_IDX(scen->start + entry_idx);
- i = ice_find_first_bit(scen->act_mem_bitmap,
- ICE_AQC_MAX_ACTION_MEMORIES);
- while (i < ICE_AQC_MAX_ACTION_MEMORIES) {
+ ice_for_each_set_bit(i, scen->act_mem_bitmap,
+ ICE_AQC_MAX_ACTION_MEMORIES) {
struct ice_acl_act_mem *mem = &hw->acl_tbl->act_mems[i];
if (actx_idx >= acts_cnt)
}
actx_idx++;
}
-
- i = ice_find_next_bit(scen->act_mem_bitmap,
- ICE_AQC_MAX_ACTION_MEMORIES, i + 1);
}
if (!status && actx_idx < acts_cnt)
}
ice_memset(&act_buf, 0, sizeof(act_buf), ICE_NONDMA_MEM);
- i = ice_find_first_bit(scen->act_mem_bitmap,
- ICE_AQC_MAX_ACTION_MEMORIES);
- while (i < ICE_AQC_MAX_ACTION_MEMORIES) {
+
+ ice_for_each_set_bit(i, scen->act_mem_bitmap,
+ ICE_AQC_MAX_ACTION_MEMORIES) {
struct ice_acl_act_mem *mem = &hw->acl_tbl->act_mems[i];
if (mem->member_of_tcam >= entry_tcam &&
"program actpair failed.status: %d\n",
status);
}
-
- i = ice_find_next_bit(scen->act_mem_bitmap,
- ICE_AQC_MAX_ACTION_MEMORIES, i + 1);
}
ice_acl_scen_free_entry_idx(scen, entry_idx);
return ice_find_next_bit(bitmap, size, 0);
}
+#define ice_for_each_set_bit(_bitpos, _addr, _maxlen) \
+ for ((_bitpos) = ice_find_first_bit((_addr), (_maxlen)); \
+ (_bitpos) < (_maxlen); \
+ (_bitpos) = ice_find_next_bit((_addr), (_maxlen), (_bitpos) + 1))
+
/**
* ice_is_any_bit_set - Return true of any bit in the bitmap is set
* @bitmap: the bitmap to check
byte++;
continue;
}
+
/* Examine 8 bits per byte */
- for (bit = 0; bit < 8; bit++) {
- if (ptypes[byte] & BIT(bit)) {
- u16 ptype;
- u8 ptg;
- u8 m;
+ ice_for_each_set_bit(bit, (ice_bitmap_t *)&ptypes[byte],
+ BITS_PER_BYTE) {
+ u16 ptype;
+ u8 ptg;
- ptype = byte * BITS_PER_BYTE + bit;
+ ptype = byte * BITS_PER_BYTE + bit;
- /* The package should place all ptypes in a
- * non-zero PTG, so the following call should
- * never fail.
- */
- if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
- continue;
+ /* The package should place all ptypes in a non-zero
+ * PTG, so the following call should never fail.
+ */
+ if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
+ continue;
- /* If PTG is already added, skip and continue */
- if (ice_is_bit_set(ptgs_used, ptg))
- continue;
+ /* If PTG is already added, skip and continue */
+ if (ice_is_bit_set(ptgs_used, ptg))
+ continue;
- ice_set_bit(ptg, ptgs_used);
- /* Check to see there are any attributes for
- * this ptype, and add them if found.
+ ice_set_bit(ptg, ptgs_used);
+ /* Check to see there are any attributes for this
+ * ptype, and add them if found.
+ */
+ status = ice_add_prof_attrib(prof, ptg, ptype, attr,
+ attr_cnt);
+ if (status == ICE_ERR_MAX_LIMIT)
+ break;
+ if (status) {
+ /* This is simple a ptype/PTG with no
+ * attribute
*/
- status = ice_add_prof_attrib(prof, ptg, ptype,
- attr, attr_cnt);
- if (status == ICE_ERR_MAX_LIMIT)
- break;
- if (status) {
- /* This is simple a ptype/PTG with no
- * attribute
- */
- prof->ptg[prof->ptg_cnt] = ptg;
- prof->attr[prof->ptg_cnt].flags = 0;
- prof->attr[prof->ptg_cnt].mask = 0;
-
- if (++prof->ptg_cnt >=
- ICE_MAX_PTG_PER_PROFILE)
- break;
- }
+ prof->ptg[prof->ptg_cnt] = ptg;
+ prof->attr[prof->ptg_cnt].flags = 0;
+ prof->attr[prof->ptg_cnt].mask = 0;
- /* nothing left in byte, then exit */
- m = ~(u8)((1 << (bit + 1)) - 1);
- if (!(ptypes[byte] & m))
+ if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
break;
}
}
u64 match = params->prof->segs[i].match;
enum ice_flow_field j;
- for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
- const u64 bit = BIT_ULL(j);
-
- if (match & bit) {
- status = ice_flow_xtract_fld(hw, params, i, j,
- match);
- if (status)
- return status;
- match &= ~bit;
- }
+ ice_for_each_set_bit(j, (ice_bitmap_t *)&match,
+ ICE_FLOW_FIELD_IDX_MAX) {
+ status = ice_flow_xtract_fld(hw, params, i, j, match);
+ if (status)
+ return status;
+ ice_clear_bit(j, (ice_bitmap_t *)&match);
}
/* Process raw matching bytes */
for (i = 0; i < params->prof->segs_cnt; i++) {
struct ice_flow_seg_info *seg = ¶ms->prof->segs[i];
- u64 match = seg->match;
u8 j;
- for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
- struct ice_flow_fld_info *fld;
- const u64 bit = BIT_ULL(j);
+ ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
+ ICE_FLOW_FIELD_IDX_MAX) {
+ struct ice_flow_fld_info *fld = &seg->fields[j];
- if (!(match & bit))
- continue;
-
- fld = &seg->fields[j];
fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
fld->entry.val = index;
index += fld->entry.last;
}
-
- match &= ~bit;
}
for (j = 0; j < seg->raws_cnt; j++) {
for (i = 0; i < prof->segs_cnt; i++) {
struct ice_flow_seg_info *seg = &prof->segs[i];
- u64 match = seg->match;
u16 j;
- for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
- const u64 bit = BIT_ULL(j);
-
- if (!(match & bit))
- continue;
-
+ ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
+ ICE_FLOW_FIELD_IDX_MAX) {
info = &seg->fields[j];
if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
buf.word_selection[info->entry.val] =
- info->xtrct.idx;
+ info->xtrct.idx;
else
ice_flow_acl_set_xtrct_seq_fld(&buf,
info);
-
- match &= ~bit;
}
for (j = 0; j < seg->raws_cnt; j++) {
for (i = 0; i < prof->segs_cnt; i++) {
struct ice_flow_seg_info *seg = &prof->segs[i];
- u64 match = seg->match;
- u16 j;
-
- for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
- struct ice_flow_fld_info *info;
- const u64 bit = BIT_ULL(j);
-
- if (!(match & bit))
- continue;
+ u8 j;
- info = &seg->fields[j];
+ ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
+ ICE_FLOW_FIELD_IDX_MAX) {
+ struct ice_flow_fld_info *info = &seg->fields[j];
if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
ice_flow_acl_frmt_entry_range(j, info,
else
ice_flow_acl_frmt_entry_fld(j, info, buf,
dontcare, data);
-
- match &= ~bit;
}
for (j = 0; j < seg->raws_cnt; j++) {
ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
u32 flow_hdr)
{
- u64 val = hash_fields;
+ u64 val;
u8 i;
- for (i = 0; val && i < ICE_FLOW_FIELD_IDX_MAX; i++) {
- u64 bit = BIT_ULL(i);
+ ice_for_each_set_bit(i, (ice_bitmap_t *)&hash_fields,
+ ICE_FLOW_FIELD_IDX_MAX)
+ ice_flow_set_fld(segs, (enum ice_flow_field)i,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
- if (val & bit) {
- ice_flow_set_fld(segs, (enum ice_flow_field)i,
- ICE_FLOW_FLD_OFF_INVAL,
- ICE_FLOW_FLD_OFF_INVAL,
- ICE_FLOW_FLD_OFF_INVAL, false);
- val &= ~bit;
- }
- }
ICE_FLOW_SET_HDRS(segs, flow_hdr);
if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
continue;
ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
ICE_MAX_NUM_RECIPES);
- for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
- if (ice_is_bit_set(r_bitmap, j))
- ice_set_bit(i, recipe_to_profile[j]);
+ ice_for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
+ ice_set_bit(i, recipe_to_profile[j]);
}
}
* the set of recipes that our recipe may collide with. Also, determine
* what possible result indexes are usable given this set of profiles.
*/
- bit = 0;
- while (ICE_MAX_NUM_PROFILES >
- (bit = ice_find_next_bit(profiles, ICE_MAX_NUM_PROFILES, bit))) {
+ ice_for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
ICE_MAX_NUM_RECIPES);
ice_and_bitmap(possible_idx, possible_idx,
hw->switch_info->prof_res_bm[bit],
ICE_MAX_FV_WORDS);
- bit++;
}
/* For each recipe that our new recipe may collide with, determine
* which indexes have been used.
*/
- for (bit = 0; bit < ICE_MAX_NUM_RECIPES; bit++)
- if (ice_is_bit_set(recipes, bit)) {
- ice_or_bitmap(used_idx, used_idx,
- hw->switch_info->recp_list[bit].res_idxs,
- ICE_MAX_FV_WORDS);
- }
+ ice_for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
+ ice_or_bitmap(used_idx, used_idx,
+ hw->switch_info->recp_list[bit].res_idxs,
+ ICE_MAX_FV_WORDS);
ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
if (LIST_EMPTY(&rm->fv_list)) {
u16 j;
- for (j = 0; j < ICE_MAX_NUM_PROFILES; j++)
- if (ice_is_bit_set(fv_bitmap, j)) {
- struct ice_sw_fv_list_entry *fvl;
-
- fvl = (struct ice_sw_fv_list_entry *)
- ice_malloc(hw, sizeof(*fvl));
- if (!fvl)
- goto err_unroll;
- fvl->fv_ptr = NULL;
- fvl->profile_id = j;
- LIST_ADD(&fvl->list_entry, &rm->fv_list);
- }
+ ice_for_each_set_bit(j, fv_bitmap, ICE_MAX_NUM_PROFILES) {
+ struct ice_sw_fv_list_entry *fvl;
+
+ fvl = (struct ice_sw_fv_list_entry *)
+ ice_malloc(hw, sizeof(*fvl));
+ if (!fvl)
+ goto err_unroll;
+ fvl->fv_ptr = NULL;
+ fvl->profile_id = j;
+ LIST_ADD(&fvl->list_entry, &rm->fv_list);
+ }
}
/* get bitmap of all profiles the recipe will be associated with */
ICE_MAX_NUM_RECIPES);
/* Update recipe to profile bitmap array */
- for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
- if (ice_is_bit_set(r_bitmap, j))
- ice_set_bit((u16)fvit->profile_id,
- recipe_to_profile[j]);
+ ice_for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
+ ice_set_bit((u16)fvit->profile_id,
+ recipe_to_profile[j]);
}
*rid = rm->root_rid;
LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
list_entry) {
struct ice_fltr_list_entry f_entry;
+ u16 vsi_handle;
f_entry.fltr_info = itr->fltr_info;
if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
}
/* Add a filter per VSI separately */
- while (1) {
- u16 vsi_handle;
-
- vsi_handle =
- ice_find_first_bit(itr->vsi_list_info->vsi_map,
- ICE_MAX_VSI);
+ ice_for_each_set_bit(vsi_handle, itr->vsi_list_info->vsi_map,
+ ICE_MAX_VSI) {
if (!ice_is_vsi_valid(hw, vsi_handle))
break;