#define ICE_FLOW_FLD_SZ_VLAN 2
#define ICE_FLOW_FLD_SZ_IPV4_ADDR 4
#define ICE_FLOW_FLD_SZ_IPV6_ADDR 16
+#define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
+#define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
+#define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
#define ICE_FLOW_FLD_SZ_IP_DSCP 1
#define ICE_FLOW_FLD_SZ_IP_TTL 1
#define ICE_FLOW_FLD_SZ_IP_PROT 1
/* ICE_FLOW_FIELD_IDX_C_VLAN */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
/* ICE_FLOW_FIELD_IDX_ETH_TYPE */
- ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 12, ICE_FLOW_FLD_SZ_ETH_TYPE),
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
/* IPv4 / IPv6 */
/* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
/* ICE_FLOW_FIELD_IDX_IPV6_DA */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
+ /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
+ ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
+ /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
+ ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
+ /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
+ ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
+ /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
+ ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
+ /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
+ ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
+ /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
+ ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
/* Transport */
/* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
*/
static const u32 ice_ptypes_mac_ofos[] = {
0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
- 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x03FFF000, 0x7FFFFFE0, 0x00000000,
+ 0x0000077E, 0x000003FF, 0x00000000, 0x00000000,
+ 0x00400000, 0x03FFF000, 0xFFFFFFE0, 0x00000307,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
-/* Packet types for packets with an Outer/First/Single IPv4 header */
+/* Packet types for packets with an Outer/First/Single IPv4 header, does NOT
+ * include IPV4 other PTYPEs
+ */
static const u32 ice_ptypes_ipv4_ofos[] = {
- 0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
+ 0x1DC00000, 0x24000800, 0x00000000, 0x00000000,
0x00000000, 0x00000155, 0x00000000, 0x00000000,
- 0x0003000F, 0x000FC000, 0x83E0F800, 0x00000101,
+ 0x00000000, 0x000FC000, 0x000002A0, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Outer/First/Single IPv4 header, includes
+ * IPV4 other PTYPEs
+ */
+static const u32 ice_ptypes_ipv4_ofos_all[] = {
+ 0x1DC00000, 0x24000800, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000155, 0x00000000, 0x00000000,
+ 0x00000000, 0x000FC000, 0x83E0FAA0, 0x00000101,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
-/* Packet types for packets with an Outer/First/Single IPv6 header */
+/* Packet types for packets with an Outer/First/Single IPv6 header, does NOT
+ * include IVP6 other PTYPEs
+ */
static const u32 ice_ptypes_ipv6_ofos[] = {
0x00000000, 0x00000000, 0x77000000, 0x10002000,
0x00000000, 0x000002AA, 0x00000000, 0x00000000,
- 0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000206,
+ 0x00000000, 0x03F00000, 0x00000540, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Outer/First/Single IPv6 header, includes
+ * IPV6 other PTYPEs
+ */
+static const u32 ice_ptypes_ipv6_ofos_all[] = {
+ 0x00000000, 0x00000000, 0x77000000, 0x10002000,
+ 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
+ 0x00000000, 0x03F00000, 0x7C1F0540, 0x00000206,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
+/* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */
+static const u32 ice_ptypes_ipv4_ofos_no_l4[] = {
+ 0x10C00000, 0x04000800, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x000cc000, 0x000002A0, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
+static const u32 ice_ptypes_ipv4_il_no_l4[] = {
+ 0x60000000, 0x18043008, 0x80000002, 0x6010c021,
+ 0x00000008, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00139800, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */
+static const u32 ice_ptypes_ipv6_ofos_no_l4[] = {
+ 0x00000000, 0x00000000, 0x43000000, 0x10002000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x02300000, 0x00000540, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
+static const u32 ice_ptypes_ipv6_il_no_l4[] = {
+ 0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
+ 0x00000430, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x4e600000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
/* Packet types for packets with an Outermost/First ARP header */
static const u32 ice_ptypes_arp_of[] = {
0x00000800, 0x00000000, 0x00000000, 0x00000000,
/* Packet types for packets with an Innermost/Last MAC header */
static const u32 ice_ptypes_mac_il[] = {
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x20000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
static const u32 ice_ptypes_gtpc[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000180, 0x00000000,
+ 0x00000000, 0x00000000, 0x000001E0, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
/* Packet types for GTPU */
+static const struct ice_ptype_attributes ice_attr_gtpu_session[] = {
+ { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
+};
+
static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
{ ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
{ ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
static const u32 ice_ptypes_pppoe[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x03FFF000, 0x00000000, 0x00000000,
+ 0x00000000, 0x03ffe000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
+static const u32 ice_ptypes_mac_non_ip_ofos[] = {
+ 0x00000846, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+static const u32 ice_ptypes_gtpu_no_ip[] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000600, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
/* Manage parameters and info. used during the creation of a flow profile */
struct ice_flow_prof_params {
enum ice_block blk;
ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
- ICE_FLOW_SEG_HDR_NAT_T_ESP)
+ ICE_FLOW_SEG_HDR_NAT_T_ESP | ICE_FLOW_SEG_HDR_GTPU_NON_IP)
#define ICE_FLOW_SEG_HDRS_L2_MASK \
(ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
#define ICE_FLOW_SEG_HDRS_L4_MASK \
(ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
ICE_FLOW_SEG_HDR_SCTP)
+/* mask for L4 protocols that are NOT part of IPV4/6 OTHER PTYPE groups */
+#define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER \
+ (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
/**
* ice_flow_val_hdrs - validates packet segments for valid protocol headers
ice_and_bitmap(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
}
-
- if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
+ if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
+ (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
+ src = i ?
+ (const ice_bitmap_t *)ice_ptypes_ipv4_il :
+ (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_all;
+ ice_and_bitmap(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
+ (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
+ src = i ?
+ (const ice_bitmap_t *)ice_ptypes_ipv6_il :
+ (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_all;
+ ice_and_bitmap(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
+ !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
+ src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_no_l4 :
+ (const ice_bitmap_t *)ice_ptypes_ipv4_il_no_l4;
+ ice_and_bitmap(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
(const ice_bitmap_t *)ice_ptypes_ipv4_il;
ice_and_bitmap(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
+ } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
+ !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
+ src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_no_l4 :
+ (const ice_bitmap_t *)ice_ptypes_ipv6_il_no_l4;
+ ice_and_bitmap(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
} else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
(const ice_bitmap_t *)ice_ptypes_ipv6_il;
ICE_FLOW_PTYPE_MAX);
}
- if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
- src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
- (const ice_bitmap_t *)ice_ptypes_icmp_il;
+ if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
+ src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
+ ice_and_bitmap(params->ptypes, params->ptypes,
+ src, ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
+ src = (const ice_bitmap_t *)ice_ptypes_pppoe;
ice_and_bitmap(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
- } else if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
+ } else {
+ src = (const ice_bitmap_t *)ice_ptypes_pppoe;
+ ice_andnot_bitmap(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ }
+
+ if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
src = (const ice_bitmap_t *)ice_ptypes_udp_il;
ice_and_bitmap(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
ice_and_bitmap(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
+ }
+
+ if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
+ src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
+ (const ice_bitmap_t *)ice_ptypes_icmp_il;
+ ice_and_bitmap(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
} else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
if (!i) {
src = (const ice_bitmap_t *)ice_ptypes_gre_of;
src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
ice_and_bitmap(params->ptypes, params->ptypes,
src, ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_NON_IP) {
+ src = (const ice_bitmap_t *)ice_ptypes_gtpu_no_ip;
+ ice_and_bitmap(params->ptypes, params->ptypes,
+ src, ICE_FLOW_PTYPE_MAX);
} else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
src = (const ice_bitmap_t *)ice_ptypes_gtpu;
ice_and_bitmap(params->ptypes, params->ptypes,
src = (const ice_bitmap_t *)ice_ptypes_gtpu;
ice_and_bitmap(params->ptypes, params->ptypes,
src, ICE_FLOW_PTYPE_MAX);
+
+ /* Attributes for GTP packet without Extension Header */
+ params->attr = ice_attr_gtpu_session;
+ params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session);
} else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
ice_and_bitmap(params->ptypes, params->ptypes,
struct ice_flow_fld_info *flds;
u16 cnt, ese_bits, i;
u16 sib_mask = 0;
- s16 adj = 0;
u16 mask;
u16 off;
*/
if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
- else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
+ else
sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
/* If the sibling field is also included, that field's
*/
if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
- else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
+ else
sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
/* If the sibling field is also included, that field's
break;
case ICE_FLOW_FIELD_IDX_IPV6_SA:
case ICE_FLOW_FIELD_IDX_IPV6_DA:
+ case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
+ case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
+ case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
+ case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
+ case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
+ case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
break;
case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
flds[fld].xtrct.prot_id = prot_id;
flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
ICE_FLOW_FV_EXTRACT_SZ;
- flds[fld].xtrct.disp = (u8)((ice_flds_info[fld].off + adj) % ese_bits);
+ flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
flds[fld].xtrct.idx = params->es_cnt;
flds[fld].xtrct.mask = ice_flds_info[fld].mask;
* ice_flow_xtract_raws - Create extract sequence entries for raw bytes
* @hw: pointer to the HW struct
* @params: information about the flow to be processed
- * @seg: index of packet segment whose raw fields are to be be extracted
+ * @seg: index of packet segment whose raw fields are to be extracted
*/
static enum ice_status
ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
u64 match = params->prof->segs[i].match;
enum ice_flow_field j;
- for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
- const u64 bit = BIT_ULL(j);
-
- if (match & bit) {
- status = ice_flow_xtract_fld(hw, params, i, j,
- match);
- if (status)
- return status;
- match &= ~bit;
- }
+ ice_for_each_set_bit(j, (ice_bitmap_t *)&match,
+ ICE_FLOW_FIELD_IDX_MAX) {
+ status = ice_flow_xtract_fld(hw, params, i, j, match);
+ if (status)
+ return status;
+ ice_clear_bit(j, (ice_bitmap_t *)&match);
}
/* Process raw matching bytes */
for (i = 0; i < params->prof->segs_cnt; i++) {
struct ice_flow_seg_info *seg = ¶ms->prof->segs[i];
- u64 match = seg->match;
u8 j;
- for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
- struct ice_flow_fld_info *fld;
- const u64 bit = BIT_ULL(j);
-
- if (!(match & bit))
- continue;
+ ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
+ ICE_FLOW_FIELD_IDX_MAX) {
+ struct ice_flow_fld_info *fld = &seg->fields[j];
- fld = &seg->fields[j];
fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
fld->entry.val = index;
index += fld->entry.last;
}
-
- match &= ~bit;
}
for (j = 0; j < seg->raws_cnt; j++) {
if (status)
return status;
break;
- case ICE_BLK_SW:
default:
return ICE_ERR_NOT_IMPL;
}
struct ice_flow_prof *p, *prof = NULL;
ice_acquire_lock(&hw->fl_profs_locks[blk]);
- LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
+ LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
segs_cnt && segs_cnt == p->segs_cnt) {
u8 i;
break;
}
}
- }
ice_release_lock(&hw->fl_profs_locks[blk]);
return prof;
{
struct ice_flow_prof *p;
- LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
+ LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
if (p->id == prof_id)
return p;
- }
return NULL;
}
ice_free(hw, entry);
}
+/**
+ * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
+ * @hw: pointer to the HW struct
+ * @blk: classification stage
+ * @prof_id: the profile ID handle
+ * @hw_prof_id: pointer to variable to receive the HW profile ID
+ */
+enum ice_status
+ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
+ u8 *hw_prof_id)
+{
+ enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
+ struct ice_prof_map *map;
+
+ ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
+ map = ice_search_prof_id(hw, blk, prof_id);
+ if (map) {
+ *hw_prof_id = map->prof_id;
+ status = ICE_SUCCESS;
+ }
+ ice_release_lock(&hw->blk[blk].es.prof_map_lock);
+ return status;
+}
+
#define ICE_ACL_INVALID_SCEN 0x3f
/**
- * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any pf
+ * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
* @hw: pointer to the hardware structure
* @prof: pointer to flow profile
- * @buf: destination buffer function writes partial xtrct sequence to
+ * @buf: destination buffer function writes partial extraction sequence to
*
- * returns ICE_SUCCESS if no pf is associated to the given profile
- * returns ICE_ERR_IN_USE if at least one pf is associated to the given profile
+ * returns ICE_SUCCESS if no PF is associated to the given profile
+ * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
* returns other error code for real error
*/
static enum ice_status
if (status)
return status;
- /* If all pf's associated scenarios are all 0 or all
+ /* If all PF's associated scenarios are all 0 or all
* ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
* not been configured yet.
*/
buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
return ICE_SUCCESS;
- else
- return ICE_ERR_IN_USE;
+
+ return ICE_ERR_IN_USE;
}
/**
- * ice_flow_acl_free_act_cntr - Free the acl rule's actions
+ * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
* @hw: pointer to the hardware structure
* @acts: array of actions to be performed on a match
* @acts_cnt: number of actions
}
/**
- * ice_flow_acl_disassoc_scen - Disassociate the scenario to the Profile
+ * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
* @hw: pointer to the hardware structure
* @prof: pointer to flow profile
*
- * Disassociate the scenario to the Profile for the PF of the VSI.
+ * Disassociate the scenario from the profile for the PF of the VSI.
*/
static enum ice_status
ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
if (status)
return status;
- /* Clear scenario for this pf */
+ /* Clear scenario for this PF */
buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
- status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
+ status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
return status;
}
struct ice_flow_action *acts, u8 acts_cnt,
struct ice_flow_prof **prof)
{
- struct ice_flow_prof_params params;
+ struct ice_flow_prof_params *params;
enum ice_status status;
u8 i;
if (!prof || (acts_cnt && !acts))
return ICE_ERR_BAD_PTR;
- ice_memset(¶ms, 0, sizeof(params), ICE_NONDMA_MEM);
- params.prof = (struct ice_flow_prof *)
- ice_malloc(hw, sizeof(*params.prof));
- if (!params.prof)
+ params = (struct ice_flow_prof_params *)ice_malloc(hw, sizeof(*params));
+ if (!params)
return ICE_ERR_NO_MEMORY;
+ params->prof = (struct ice_flow_prof *)
+ ice_malloc(hw, sizeof(*params->prof));
+ if (!params->prof) {
+ status = ICE_ERR_NO_MEMORY;
+ goto free_params;
+ }
+
/* initialize extraction sequence to all invalid (0xff) */
for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
- params.es[i].prot_id = ICE_PROT_INVALID;
- params.es[i].off = ICE_FV_OFFSET_INVAL;
+ params->es[i].prot_id = ICE_PROT_INVALID;
+ params->es[i].off = ICE_FV_OFFSET_INVAL;
}
- params.blk = blk;
- params.prof->id = prof_id;
- params.prof->dir = dir;
- params.prof->segs_cnt = segs_cnt;
+ params->blk = blk;
+ params->prof->id = prof_id;
+ params->prof->dir = dir;
+ params->prof->segs_cnt = segs_cnt;
/* Make a copy of the segments that need to be persistent in the flow
* profile instance
*/
for (i = 0; i < segs_cnt; i++)
- ice_memcpy(¶ms.prof->segs[i], &segs[i], sizeof(*segs),
+ ice_memcpy(¶ms->prof->segs[i], &segs[i], sizeof(*segs),
ICE_NONDMA_TO_NONDMA);
/* Make a copy of the actions that need to be persistent in the flow
* profile instance.
*/
if (acts_cnt) {
- params.prof->acts = (struct ice_flow_action *)
+ params->prof->acts = (struct ice_flow_action *)
ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
ICE_NONDMA_TO_NONDMA);
- if (!params.prof->acts) {
+ if (!params->prof->acts) {
status = ICE_ERR_NO_MEMORY;
goto out;
}
}
- status = ice_flow_proc_segs(hw, ¶ms);
+ status = ice_flow_proc_segs(hw, params);
if (status) {
- ice_debug(hw, ICE_DBG_FLOW,
- "Error processing a flow's packet segments\n");
+ ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n");
goto out;
}
/* Add a HW profile for this flow profile */
- status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes,
- params.attr, params.attr_cnt, params.es,
- params.mask);
+ status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
+ params->attr, params->attr_cnt, params->es,
+ params->mask);
if (status) {
ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
goto out;
}
- INIT_LIST_HEAD(¶ms.prof->entries);
- ice_init_lock(¶ms.prof->entries_lock);
- *prof = params.prof;
+ INIT_LIST_HEAD(¶ms->prof->entries);
+ ice_init_lock(¶ms->prof->entries_lock);
+ *prof = params->prof;
out:
if (status) {
- if (params.prof->acts)
- ice_free(hw, params.prof->acts);
- ice_free(hw, params.prof);
+ if (params->prof->acts)
+ ice_free(hw, params->prof->acts);
+ ice_free(hw, params->prof);
}
+free_params:
+ ice_free(hw, params);
return status;
}
struct ice_aqc_acl_prof_generic_frmt buf;
u8 prof_id = 0;
- /* Deassociate the scenario to the Profile for the PF */
+ /* Disassociate the scenario from the profile for the PF */
status = ice_flow_acl_disassoc_scen(hw, prof);
if (status)
return status;
for (i = 0; i < prof->segs_cnt; i++) {
struct ice_flow_seg_info *seg = &prof->segs[i];
- u64 match = seg->match;
u16 j;
- for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
- const u64 bit = BIT_ULL(j);
-
- if (!(match & bit))
- continue;
-
+ ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
+ ICE_FLOW_FIELD_IDX_MAX) {
info = &seg->fields[j];
if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
buf.word_selection[info->entry.val] =
- info->xtrct.idx;
+ info->xtrct.idx;
else
ice_flow_acl_set_xtrct_seq_fld(&buf,
info);
-
- match &= ~bit;
}
for (j = 0; j < seg->raws_cnt; j++) {
/* Update the current PF */
buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
- status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
+ status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
return status;
}
* Assumption: the caller has acquired the lock to the profile list
* and the software VSI handle has been validated
*/
-static enum ice_status
+enum ice_status
ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
struct ice_flow_prof *prof, u16 vsi_handle)
{
if (!status)
ice_set_bit(vsi_handle, prof->vsis);
else
- ice_debug(hw, ICE_DBG_FLOW,
- "HW profile add failed, %d\n",
+ ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n",
status);
}
if (!status)
ice_clear_bit(vsi_handle, prof->vsis);
else
- ice_debug(hw, ICE_DBG_FLOW,
- "HW profile remove failed, %d\n",
+ ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n",
status);
}
return status;
}
-/**
- * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
- * @hw: pointer to the HW struct
- * @blk: classification stage
- * @prof_id: the profile ID handle
- * @hw_prof_id: pointer to variable to receive the HW profile ID
- */
-enum ice_status
-ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
- u8 *hw_prof_id)
-{
- struct ice_prof_map *map;
-
- map = ice_search_prof_id(hw, blk, prof_id);
- if (map) {
- *hw_prof_id = map->prof_id;
- return ICE_SUCCESS;
- }
-
- return ICE_ERR_DOES_NOT_EXIST;
-}
-
/**
* ice_flow_find_entry - look for a flow entry using its unique ID
* @hw: pointer to the HW struct
}
/**
- * ice_flow_acl_check_actions - Checks the acl rule's actions
+ * ice_flow_acl_check_actions - Checks the ACL rule's actions
* @hw: pointer to the hardware structure
* @acts: array of actions to be performed on a match
* @acts_cnt: number of actions
- * @cnt_alloc: indicates if a ACL counter has been allocated.
+ * @cnt_alloc: indicates if an ACL counter has been allocated.
*/
static enum ice_status
ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
}
/**
- * ice_flow_acl_frmt_entry_range - Format an acl range checker for a given field
+ * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
* @fld: number of the given field
* @info: info about field
* @range_buf: range checker configuration buffer
}
/**
- * ice_flow_acl_frmt_entry_fld - Partially format acl entry for a given field
+ * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
* @fld: number of the given field
* @info: info about the field
* @buf: buffer containing the entry
}
/**
- * ice_flow_acl_frmt_entry - Format acl entry
+ * ice_flow_acl_frmt_entry - Format ACL entry
* @hw: pointer to the hardware structure
* @prof: pointer to flow profile
* @e: pointer to the flow entry
*
* Formats the key (and key_inverse) to be matched from the data passed in,
* along with data from the flow profile. This key/key_inverse pair makes up
- * the 'entry' for an acl flow entry.
+ * the 'entry' for an ACL flow entry.
*/
static enum ice_status
ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
e->acts = (struct ice_flow_action *)
ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
ICE_NONDMA_TO_NONDMA);
-
if (!e->acts)
goto out;
for (i = 0; i < prof->segs_cnt; i++) {
struct ice_flow_seg_info *seg = &prof->segs[i];
- u64 match = seg->match;
- u16 j;
-
- for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
- struct ice_flow_fld_info *info;
- const u64 bit = BIT_ULL(j);
-
- if (!(match & bit))
- continue;
+ u8 j;
- info = &seg->fields[j];
+ ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
+ ICE_FLOW_FIELD_IDX_MAX) {
+ struct ice_flow_fld_info *info = &seg->fields[j];
if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
ice_flow_acl_frmt_entry_range(j, info,
else
ice_flow_acl_frmt_entry_fld(j, info, buf,
dontcare, data);
-
- match &= ~bit;
}
for (j = 0; j < seg->raws_cnt; j++) {
}
/**
- * ice_flow_acl_convert_to_acl_prior - Convert to ACL priority
+ * ice_flow_acl_convert_to_acl_prio - Convert to ACL priority
* @p: flow priority
*/
-static enum ice_acl_entry_prior
-ice_flow_acl_convert_to_acl_prior(enum ice_flow_priority p)
+static enum ice_acl_entry_prio
+ice_flow_acl_convert_to_acl_prio(enum ice_flow_priority p)
{
- enum ice_acl_entry_prior acl_prior;
+ enum ice_acl_entry_prio acl_prio;
switch (p) {
case ICE_FLOW_PRIO_LOW:
- acl_prior = ICE_LOW;
+ acl_prio = ICE_ACL_PRIO_LOW;
break;
case ICE_FLOW_PRIO_NORMAL:
- acl_prior = ICE_NORMAL;
+ acl_prio = ICE_ACL_PRIO_NORMAL;
break;
case ICE_FLOW_PRIO_HIGH:
- acl_prior = ICE_HIGH;
+ acl_prio = ICE_ACL_PRIO_HIGH;
break;
default:
- acl_prior = ICE_NORMAL;
+ acl_prio = ICE_ACL_PRIO_NORMAL;
break;
}
- return acl_prior;
+ return acl_prio;
}
/**
if (!entry || !(*entry) || !prof)
return ICE_ERR_BAD_PTR;
- e = *(entry);
+ e = *entry;
do_chg_rng_chk = false;
if (e->range_buf) {
*/
exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
&do_add_entry, &do_rem_entry);
-
if (do_rem_entry) {
status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
if (status)
}
/* Prepare the result action buffer */
- acts = (struct ice_acl_act_entry *)ice_calloc
- (hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
+ acts = (struct ice_acl_act_entry *)
+ ice_calloc(hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
+ if (!acts)
+ return ICE_ERR_NO_MEMORY;
+
for (i = 0; i < e->acts_cnt; i++)
ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
sizeof(struct ice_acl_act_entry),
ICE_NONDMA_TO_NONDMA);
if (do_add_entry) {
- enum ice_acl_entry_prior prior;
+ enum ice_acl_entry_prio prio;
u8 *keys, *inverts;
u16 entry_idx;
keys = (u8 *)e->entry;
inverts = keys + (e->entry_sz / 2);
- prior = ice_flow_acl_convert_to_acl_prior(e->priority);
+ prio = ice_flow_acl_convert_to_acl_prio(e->priority);
- status = ice_acl_add_entry(hw, prof->cfg.scen, prior, keys,
+ status = ice_acl_add_entry(hw, prof->cfg.scen, prio, keys,
inverts, acts, e->acts_cnt,
&entry_idx);
if (status)
exist->acts = (struct ice_flow_action *)
ice_calloc(hw, exist->acts_cnt,
sizeof(struct ice_flow_action));
-
if (!exist->acts) {
status = ICE_ERR_NO_MEMORY;
goto out;
*(entry) = exist;
}
out:
- if (acts)
- ice_free(hw, acts);
+ ice_free(hw, acts);
return status;
}
goto out;
break;
- case ICE_BLK_SW:
- case ICE_BLK_PE:
default:
status = ICE_ERR_NOT_IMPL;
goto out;
*
* This helper function stores information of a field being matched, including
* the type of the field and the locations of the value to match, the mask, and
- * and the upper-bound value in the start of the input buffer for a flow entry.
+ * the upper-bound value in the start of the input buffer for a flow entry.
* This function should only be used for fixed-size data structures.
*
* This function also opportunistically determines the protocol headers to be
(ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
#define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
- (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
- ICE_FLOW_SEG_HDR_SCTP)
+ (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
#define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
(ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
u32 flow_hdr)
{
- u64 val = hash_fields;
+ u64 val;
u8 i;
- for (i = 0; val && i < ICE_FLOW_FIELD_IDX_MAX; i++) {
- u64 bit = BIT_ULL(i);
+ ice_for_each_set_bit(i, (ice_bitmap_t *)&hash_fields,
+ ICE_FLOW_FIELD_IDX_MAX)
+ ice_flow_set_fld(segs, (enum ice_flow_field)i,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
- if (val & bit) {
- ice_flow_set_fld(segs, (enum ice_flow_field)i,
- ICE_FLOW_FLD_OFF_INVAL,
- ICE_FLOW_FLD_OFF_INVAL,
- ICE_FLOW_FLD_OFF_INVAL, false);
- val &= ~bit;
- }
- }
ICE_FLOW_SET_HDRS(segs, flow_hdr);
if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
- ~ICE_FLOW_RSS_HDRS_INNER_MASK)
+ ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER)
return ICE_ERR_PARAM;
val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
ice_acquire_lock(&hw->rss_locks);
LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
- ice_rss_cfg, l_entry) {
+ ice_rss_cfg, l_entry)
if (ice_test_and_clear_bit(vsi_handle, r->vsis))
if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
LIST_DEL(&r->l_entry);
ice_free(hw, r);
}
- }
ice_release_lock(&hw->rss_locks);
}
if (LIST_EMPTY(&hw->fl_profs[blk]))
return ICE_SUCCESS;
- ice_acquire_lock(&hw->fl_profs_locks[blk]);
+ ice_acquire_lock(&hw->rss_locks);
LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
- l_entry) {
+ l_entry)
if (ice_is_bit_set(p->vsis, vsi_handle)) {
status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
if (status)
break;
if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
- status = ice_flow_rem_prof_sync(hw, blk, p);
+ status = ice_flow_rem_prof(hw, blk, p->id);
if (status)
break;
}
}
- }
- ice_release_lock(&hw->fl_profs_locks[blk]);
+ ice_release_lock(&hw->rss_locks);
return status;
}
* remove from the RSS entry list of the VSI context and delete entry.
*/
LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
- ice_rss_cfg, l_entry) {
+ ice_rss_cfg, l_entry)
if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
ice_clear_bit(vsi_handle, r->vsis);
}
return;
}
- }
}
/**
struct ice_prof_map *map;
u8 prof_id, m;
+ ice_acquire_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
- prof_id = map->prof_id;
-
+ if (map)
+ prof_id = map->prof_id;
+ ice_release_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
+ if (!map)
+ return;
/* clear to default */
for (m = 0; m < 6; m++)
wr32(hw, GLQF_HSYMM(prof_id, m), 0);
if (status)
goto exit;
+ /* Don't do RSS for GTPU Outer */
+ if (segs_cnt == ICE_RSS_OUTER_HEADERS &&
+ segs[segs_cnt - 1].hdrs & ICE_FLOW_SEG_HDR_GTPU) {
+ status = ICE_SUCCESS;
+ goto exit;
+ }
+
/* Search for a flow profile that has matching headers, hash fields
* and has the input VSI associated to it. If found, no further
* operations required and exit.
}
/* Check if a flow profile exists with the same protocol headers and
- * associated with the input VSI. If so disasscociate the VSI from
+ * associated with the input VSI. If so disassociate the VSI from
* this profile. The VSI will be added to a new profile created with
* the protocol header and new hash field configuration.
*/
if (status)
goto out;
+ /* Don't do RSS for GTPU Outer */
+ if (segs_cnt == ICE_RSS_OUTER_HEADERS &&
+ segs[segs_cnt - 1].hdrs & ICE_FLOW_SEG_HDR_GTPU) {
+ status = ICE_SUCCESS;
+ goto out;
+ }
+
prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
vsi_handle,
ICE_FLOW_FIND_PROF_CHK_FLDS);
*/
u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
{
- struct ice_rss_cfg *r, *rss_cfg = NULL;
+ u64 rss_hash = ICE_HASH_INVALID;
+ struct ice_rss_cfg *r;
/* verify if the protocol header is non zero and VSI is valid */
if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
ice_rss_cfg, l_entry)
if (ice_is_bit_set(r->vsis, vsi_handle) &&
r->packet_hdr == hdrs) {
- rss_cfg = r;
+ rss_hash = r->hashed_flds;
break;
}
ice_release_lock(&hw->rss_locks);
- return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID;
+ return rss_hash;
}