*/
static const u32 ice_ptypes_mac_ofos[] = {
0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
- 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
- 0x00400000, 0x03FFF000, 0x7FFFFFE0, 0x00000000,
+ 0x0000077E, 0x000003FF, 0x00000000, 0x00000000,
+ 0x00400000, 0x03FFF000, 0xFFFFFFE0, 0x00000307,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
* include IPV4 other PTYPEs
*/
static const u32 ice_ptypes_ipv4_ofos[] = {
- 0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
+ 0x1DC00000, 0x24000800, 0x00000000, 0x00000000,
0x00000000, 0x00000155, 0x00000000, 0x00000000,
- 0x00000000, 0x000FC000, 0x00000000, 0x00000000,
+ 0x00000000, 0x000FC000, 0x000002A0, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
* IPV4 other PTYPEs
*/
static const u32 ice_ptypes_ipv4_ofos_all[] = {
- 0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
+ 0x1DC00000, 0x24000800, 0x00000000, 0x00000000,
0x00000000, 0x00000155, 0x00000000, 0x00000000,
- 0x00000000, 0x000FC000, 0x83E0F800, 0x00000101,
+ 0x00000000, 0x000FC000, 0x83E0FAA0, 0x00000101,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
static const u32 ice_ptypes_ipv6_ofos[] = {
0x00000000, 0x00000000, 0x77000000, 0x10002000,
0x00000000, 0x000002AA, 0x00000000, 0x00000000,
- 0x00000000, 0x03F00000, 0x00000000, 0x00000000,
+ 0x00000000, 0x03F00000, 0x00000540, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
static const u32 ice_ptypes_ipv6_ofos_all[] = {
0x00000000, 0x00000000, 0x77000000, 0x10002000,
0x00000000, 0x000002AA, 0x00000000, 0x00000000,
- 0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000206,
+ 0x00000000, 0x03F00000, 0x7C1F0540, 0x00000206,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
/* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */
-static const u32 ice_ipv4_ofos_no_l4[] = {
+static const u32 ice_ptypes_ipv4_ofos_no_l4[] = {
0x10C00000, 0x04000800, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x000cc000, 0x00000000, 0x00000000,
+ 0x00000000, 0x000cc000, 0x000002A0, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
/* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
-static const u32 ice_ipv4_il_no_l4[] = {
+static const u32 ice_ptypes_ipv4_il_no_l4[] = {
0x60000000, 0x18043008, 0x80000002, 0x6010c021,
0x00000008, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00139800, 0x00000000,
};
/* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */
-static const u32 ice_ipv6_ofos_no_l4[] = {
+static const u32 ice_ptypes_ipv6_ofos_no_l4[] = {
0x00000000, 0x00000000, 0x43000000, 0x10002000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x02300000, 0x00000000, 0x00000000,
+ 0x00000000, 0x02300000, 0x00000540, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
/* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
-static const u32 ice_ipv6_il_no_l4[] = {
+static const u32 ice_ptypes_ipv6_il_no_l4[] = {
0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
0x00000430, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x4e600000, 0x00000000,
/* Packet types for packets with an Innermost/Last MAC header */
static const u32 ice_ptypes_mac_il[] = {
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x20000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
static const u32 ice_ptypes_gtpc[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000180, 0x00000000,
+ 0x00000000, 0x00000000, 0x000001E0, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
+static const u32 ice_ptypes_gtpu_no_ip[] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000600, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
/* Manage parameters and info. used during the creation of a flow profile */
struct ice_flow_prof_params {
enum ice_block blk;
ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
- ICE_FLOW_SEG_HDR_NAT_T_ESP)
+ ICE_FLOW_SEG_HDR_NAT_T_ESP | ICE_FLOW_SEG_HDR_GTPU_NON_IP)
#define ICE_FLOW_SEG_HDRS_L2_MASK \
(ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
ICE_FLOW_PTYPE_MAX);
} else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
!(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
- src = !i ? (const ice_bitmap_t *)ice_ipv4_ofos_no_l4 :
- (const ice_bitmap_t *)ice_ipv4_il_no_l4;
+ src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_no_l4 :
+ (const ice_bitmap_t *)ice_ptypes_ipv4_il_no_l4;
ice_and_bitmap(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
} else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
ICE_FLOW_PTYPE_MAX);
} else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
!(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
- src = !i ? (const ice_bitmap_t *)ice_ipv6_ofos_no_l4 :
- (const ice_bitmap_t *)ice_ipv6_il_no_l4;
+ src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_no_l4 :
+ (const ice_bitmap_t *)ice_ptypes_ipv6_il_no_l4;
ice_and_bitmap(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
} else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
ice_and_bitmap(params->ptypes, params->ptypes,
src, ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_NON_IP) {
+ src = (const ice_bitmap_t *)ice_ptypes_gtpu_no_ip;
+ ice_and_bitmap(params->ptypes, params->ptypes,
+ src, ICE_FLOW_PTYPE_MAX);
} else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
src = (const ice_bitmap_t *)ice_ptypes_gtpu;
ice_and_bitmap(params->ptypes, params->ptypes,
*/
if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
- else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
+ else
sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
/* If the sibling field is also included, that field's
*/
if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
- else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
+ else
sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
/* If the sibling field is also included, that field's
* ice_flow_xtract_raws - Create extract sequence entries for raw bytes
* @hw: pointer to the HW struct
* @params: information about the flow to be processed
- * @seg: index of packet segment whose raw fields are to be be extracted
+ * @seg: index of packet segment whose raw fields are to be extracted
*/
static enum ice_status
ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
u64 match = params->prof->segs[i].match;
enum ice_flow_field j;
- for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
- const u64 bit = BIT_ULL(j);
-
- if (match & bit) {
- status = ice_flow_xtract_fld(hw, params, i, j,
- match);
- if (status)
- return status;
- match &= ~bit;
- }
+ ice_for_each_set_bit(j, (ice_bitmap_t *)&match,
+ ICE_FLOW_FIELD_IDX_MAX) {
+ status = ice_flow_xtract_fld(hw, params, i, j, match);
+ if (status)
+ return status;
+ ice_clear_bit(j, (ice_bitmap_t *)&match);
}
/* Process raw matching bytes */
for (i = 0; i < params->prof->segs_cnt; i++) {
struct ice_flow_seg_info *seg = ¶ms->prof->segs[i];
- u64 match = seg->match;
u8 j;
- for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
- struct ice_flow_fld_info *fld;
- const u64 bit = BIT_ULL(j);
+ ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
+ ICE_FLOW_FIELD_IDX_MAX) {
+ struct ice_flow_fld_info *fld = &seg->fields[j];
- if (!(match & bit))
- continue;
-
- fld = &seg->fields[j];
fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
fld->entry.val = index;
index += fld->entry.last;
}
-
- match &= ~bit;
}
for (j = 0; j < seg->raws_cnt; j++) {
ice_free(hw, entry);
}
+/**
+ * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
+ * @hw: pointer to the HW struct
+ * @blk: classification stage
+ * @prof_id: the profile ID handle
+ * @hw_prof_id: pointer to variable to receive the HW profile ID
+ */
+enum ice_status
+ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
+ u8 *hw_prof_id)
+{
+ enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
+ struct ice_prof_map *map;
+
+ ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
+ map = ice_search_prof_id(hw, blk, prof_id);
+ if (map) {
+ *hw_prof_id = map->prof_id;
+ status = ICE_SUCCESS;
+ }
+ ice_release_lock(&hw->blk[blk].es.prof_map_lock);
+ return status;
+}
+
#define ICE_ACL_INVALID_SCEN 0x3f
/**
buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
return ICE_SUCCESS;
- else
- return ICE_ERR_IN_USE;
+
+ return ICE_ERR_IN_USE;
}
/**
/* Clear scenario for this PF */
buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
- status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
+ status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
return status;
}
struct ice_flow_action *acts, u8 acts_cnt,
struct ice_flow_prof **prof)
{
- struct ice_flow_prof_params params;
+ struct ice_flow_prof_params *params;
enum ice_status status;
u8 i;
if (!prof || (acts_cnt && !acts))
return ICE_ERR_BAD_PTR;
- ice_memset(¶ms, 0, sizeof(params), ICE_NONDMA_MEM);
- params.prof = (struct ice_flow_prof *)
- ice_malloc(hw, sizeof(*params.prof));
- if (!params.prof)
+ params = (struct ice_flow_prof_params *)ice_malloc(hw, sizeof(*params));
+ if (!params)
return ICE_ERR_NO_MEMORY;
+ params->prof = (struct ice_flow_prof *)
+ ice_malloc(hw, sizeof(*params->prof));
+ if (!params->prof) {
+ status = ICE_ERR_NO_MEMORY;
+ goto free_params;
+ }
+
/* initialize extraction sequence to all invalid (0xff) */
for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
- params.es[i].prot_id = ICE_PROT_INVALID;
- params.es[i].off = ICE_FV_OFFSET_INVAL;
+ params->es[i].prot_id = ICE_PROT_INVALID;
+ params->es[i].off = ICE_FV_OFFSET_INVAL;
}
- params.blk = blk;
- params.prof->id = prof_id;
- params.prof->dir = dir;
- params.prof->segs_cnt = segs_cnt;
+ params->blk = blk;
+ params->prof->id = prof_id;
+ params->prof->dir = dir;
+ params->prof->segs_cnt = segs_cnt;
/* Make a copy of the segments that need to be persistent in the flow
* profile instance
*/
for (i = 0; i < segs_cnt; i++)
- ice_memcpy(¶ms.prof->segs[i], &segs[i], sizeof(*segs),
+ ice_memcpy(¶ms->prof->segs[i], &segs[i], sizeof(*segs),
ICE_NONDMA_TO_NONDMA);
/* Make a copy of the actions that need to be persistent in the flow
* profile instance.
*/
if (acts_cnt) {
- params.prof->acts = (struct ice_flow_action *)
+ params->prof->acts = (struct ice_flow_action *)
ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
ICE_NONDMA_TO_NONDMA);
- if (!params.prof->acts) {
+ if (!params->prof->acts) {
status = ICE_ERR_NO_MEMORY;
goto out;
}
}
- status = ice_flow_proc_segs(hw, ¶ms);
+ status = ice_flow_proc_segs(hw, params);
if (status) {
- ice_debug(hw, ICE_DBG_FLOW,
- "Error processing a flow's packet segments\n");
+ ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n");
goto out;
}
/* Add a HW profile for this flow profile */
- status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes,
- params.attr, params.attr_cnt, params.es,
- params.mask);
+ status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
+ params->attr, params->attr_cnt, params->es,
+ params->mask);
if (status) {
ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
goto out;
}
- INIT_LIST_HEAD(¶ms.prof->entries);
- ice_init_lock(¶ms.prof->entries_lock);
- *prof = params.prof;
+ INIT_LIST_HEAD(¶ms->prof->entries);
+ ice_init_lock(¶ms->prof->entries_lock);
+ *prof = params->prof;
out:
if (status) {
- if (params.prof->acts)
- ice_free(hw, params.prof->acts);
- ice_free(hw, params.prof);
+ if (params->prof->acts)
+ ice_free(hw, params->prof->acts);
+ ice_free(hw, params->prof);
}
+free_params:
+ ice_free(hw, params);
return status;
}
for (i = 0; i < prof->segs_cnt; i++) {
struct ice_flow_seg_info *seg = &prof->segs[i];
- u64 match = seg->match;
u16 j;
- for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
- const u64 bit = BIT_ULL(j);
-
- if (!(match & bit))
- continue;
-
+ ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
+ ICE_FLOW_FIELD_IDX_MAX) {
info = &seg->fields[j];
if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
buf.word_selection[info->entry.val] =
- info->xtrct.idx;
+ info->xtrct.idx;
else
ice_flow_acl_set_xtrct_seq_fld(&buf,
info);
-
- match &= ~bit;
}
for (j = 0; j < seg->raws_cnt; j++) {
/* Update the current PF */
buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
- status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
+ status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
return status;
}
* Assumption: the caller has acquired the lock to the profile list
* and the software VSI handle has been validated
*/
-static enum ice_status
+enum ice_status
ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
struct ice_flow_prof *prof, u16 vsi_handle)
{
if (!status)
ice_set_bit(vsi_handle, prof->vsis);
else
- ice_debug(hw, ICE_DBG_FLOW,
- "HW profile add failed, %d\n",
+ ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n",
status);
}
if (!status)
ice_clear_bit(vsi_handle, prof->vsis);
else
- ice_debug(hw, ICE_DBG_FLOW,
- "HW profile remove failed, %d\n",
+ ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n",
status);
}
return status;
}
-/**
- * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
- * @hw: pointer to the HW struct
- * @blk: classification stage
- * @prof_id: the profile ID handle
- * @hw_prof_id: pointer to variable to receive the HW profile ID
- */
-enum ice_status
-ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
- u8 *hw_prof_id)
-{
- enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
- struct ice_prof_map *map;
-
- ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
- map = ice_search_prof_id(hw, blk, prof_id);
- if (map) {
- *hw_prof_id = map->prof_id;
- status = ICE_SUCCESS;
- }
- ice_release_lock(&hw->blk[blk].es.prof_map_lock);
- return status;
-}
-
/**
* ice_flow_find_entry - look for a flow entry using its unique ID
* @hw: pointer to the HW struct
e->acts = (struct ice_flow_action *)
ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
ICE_NONDMA_TO_NONDMA);
-
if (!e->acts)
goto out;
for (i = 0; i < prof->segs_cnt; i++) {
struct ice_flow_seg_info *seg = &prof->segs[i];
- u64 match = seg->match;
- u16 j;
-
- for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
- struct ice_flow_fld_info *info;
- const u64 bit = BIT_ULL(j);
-
- if (!(match & bit))
- continue;
+ u8 j;
- info = &seg->fields[j];
+ ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
+ ICE_FLOW_FIELD_IDX_MAX) {
+ struct ice_flow_fld_info *info = &seg->fields[j];
if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
ice_flow_acl_frmt_entry_range(j, info,
else
ice_flow_acl_frmt_entry_fld(j, info, buf,
dontcare, data);
-
- match &= ~bit;
}
for (j = 0; j < seg->raws_cnt; j++) {
}
/**
- * ice_flow_acl_convert_to_acl_prior - Convert to ACL priority
+ * ice_flow_acl_convert_to_acl_prio - Convert to ACL priority
* @p: flow priority
*/
-static enum ice_acl_entry_prior
-ice_flow_acl_convert_to_acl_prior(enum ice_flow_priority p)
+static enum ice_acl_entry_prio
+ice_flow_acl_convert_to_acl_prio(enum ice_flow_priority p)
{
- enum ice_acl_entry_prior acl_prior;
+ enum ice_acl_entry_prio acl_prio;
switch (p) {
case ICE_FLOW_PRIO_LOW:
- acl_prior = ICE_LOW;
+ acl_prio = ICE_ACL_PRIO_LOW;
break;
case ICE_FLOW_PRIO_NORMAL:
- acl_prior = ICE_NORMAL;
+ acl_prio = ICE_ACL_PRIO_NORMAL;
break;
case ICE_FLOW_PRIO_HIGH:
- acl_prior = ICE_HIGH;
+ acl_prio = ICE_ACL_PRIO_HIGH;
break;
default:
- acl_prior = ICE_NORMAL;
+ acl_prio = ICE_ACL_PRIO_NORMAL;
break;
}
- return acl_prior;
+ return acl_prio;
}
/**
if (!entry || !(*entry) || !prof)
return ICE_ERR_BAD_PTR;
- e = *(entry);
+ e = *entry;
do_chg_rng_chk = false;
if (e->range_buf) {
*/
exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
&do_add_entry, &do_rem_entry);
-
if (do_rem_entry) {
status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
if (status)
}
/* Prepare the result action buffer */
- acts = (struct ice_acl_act_entry *)ice_calloc
- (hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
+ acts = (struct ice_acl_act_entry *)
+ ice_calloc(hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
+ if (!acts)
+ return ICE_ERR_NO_MEMORY;
+
for (i = 0; i < e->acts_cnt; i++)
ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
sizeof(struct ice_acl_act_entry),
ICE_NONDMA_TO_NONDMA);
if (do_add_entry) {
- enum ice_acl_entry_prior prior;
+ enum ice_acl_entry_prio prio;
u8 *keys, *inverts;
u16 entry_idx;
keys = (u8 *)e->entry;
inverts = keys + (e->entry_sz / 2);
- prior = ice_flow_acl_convert_to_acl_prior(e->priority);
+ prio = ice_flow_acl_convert_to_acl_prio(e->priority);
- status = ice_acl_add_entry(hw, prof->cfg.scen, prior, keys,
+ status = ice_acl_add_entry(hw, prof->cfg.scen, prio, keys,
inverts, acts, e->acts_cnt,
&entry_idx);
if (status)
exist->acts = (struct ice_flow_action *)
ice_calloc(hw, exist->acts_cnt,
sizeof(struct ice_flow_action));
-
if (!exist->acts) {
status = ICE_ERR_NO_MEMORY;
goto out;
*(entry) = exist;
}
out:
- if (acts)
- ice_free(hw, acts);
+ ice_free(hw, acts);
return status;
}
*
* This helper function stores information of a field being matched, including
* the type of the field and the locations of the value to match, the mask, and
- * and the upper-bound value in the start of the input buffer for a flow entry.
+ * the upper-bound value in the start of the input buffer for a flow entry.
* This function should only be used for fixed-size data structures.
*
* This function also opportunistically determines the protocol headers to be
(ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
#define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
- (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
- ICE_FLOW_SEG_HDR_SCTP)
+ (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
#define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
(ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
u32 flow_hdr)
{
- u64 val = hash_fields;
+ u64 val;
u8 i;
- for (i = 0; val && i < ICE_FLOW_FIELD_IDX_MAX; i++) {
- u64 bit = BIT_ULL(i);
+ ice_for_each_set_bit(i, (ice_bitmap_t *)&hash_fields,
+ ICE_FLOW_FIELD_IDX_MAX)
+ ice_flow_set_fld(segs, (enum ice_flow_field)i,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
- if (val & bit) {
- ice_flow_set_fld(segs, (enum ice_flow_field)i,
- ICE_FLOW_FLD_OFF_INVAL,
- ICE_FLOW_FLD_OFF_INVAL,
- ICE_FLOW_FLD_OFF_INVAL, false);
- val &= ~bit;
- }
- }
ICE_FLOW_SET_HDRS(segs, flow_hdr);
if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
if (status)
goto exit;
- /* don't do RSS for GTPU outer */
+ /* Don't do RSS for GTPU Outer */
if (segs_cnt == ICE_RSS_OUTER_HEADERS &&
- segs[segs_cnt - 1].hdrs & ICE_FLOW_SEG_HDR_GTPU)
- return ICE_SUCCESS;
+ segs[segs_cnt - 1].hdrs & ICE_FLOW_SEG_HDR_GTPU) {
+ status = ICE_SUCCESS;
+ goto exit;
+ }
/* Search for a flow profile that has matching headers, hash fields
* and has the input VSI associated to it. If found, no further
ice_acquire_lock(&hw->rss_locks);
status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
ICE_RSS_OUTER_HEADERS, symm);
-
if (!status)
status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
addl_hdrs, ICE_RSS_INNER_HEADERS,
if (status)
goto out;
+ /* Don't do RSS for GTPU Outer */
if (segs_cnt == ICE_RSS_OUTER_HEADERS &&
- segs[segs_cnt - 1].hdrs & ICE_FLOW_SEG_HDR_GTPU)
- return ICE_SUCCESS;
+ segs[segs_cnt - 1].hdrs & ICE_FLOW_SEG_HDR_GTPU) {
+ status = ICE_SUCCESS;
+ goto out;
+ }
prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
vsi_handle,