/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2019
+ * Copyright(c) 2001-2020 Intel Corporation
*/
#include "ice_common.h"
#define ICE_FLOW_FLD_SZ_VLAN 2
#define ICE_FLOW_FLD_SZ_IPV4_ADDR 4
#define ICE_FLOW_FLD_SZ_IPV6_ADDR 16
+#define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
+#define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
+#define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
#define ICE_FLOW_FLD_SZ_IP_DSCP 1
#define ICE_FLOW_FLD_SZ_IP_TTL 1
#define ICE_FLOW_FLD_SZ_IP_PROT 1
#define ICE_FLOW_FLD_SZ_ARP_OPER 2
#define ICE_FLOW_FLD_SZ_GRE_KEYID 4
#define ICE_FLOW_FLD_SZ_GTP_TEID 4
+#define ICE_FLOW_FLD_SZ_GTP_QFI 2
#define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID 2
-
-/* Protocol header fields are extracted at the word boundaries as word-sized
- * values. Specify the displacement value of some non-word-aligned fields needed
- * to compute the offset of words containing the fields in the corresponding
- * protocol headers. Displacement values are expressed in number of bits.
- */
-#define ICE_FLOW_FLD_IPV6_TTL_DSCP_DISP (-4)
-#define ICE_FLOW_FLD_IPV6_TTL_PROT_DISP ((-2) * BITS_PER_BYTE)
-#define ICE_FLOW_FLD_IPV6_TTL_TTL_DISP ((-1) * BITS_PER_BYTE)
+#define ICE_FLOW_FLD_SZ_PFCP_SEID 8
+#define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID 4
+#define ICE_FLOW_FLD_SZ_ESP_SPI 4
+#define ICE_FLOW_FLD_SZ_AH_SPI 4
+#define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI 4
/* Describe properties of a protocol header field */
struct ice_flow_field_info {
/* ICE_FLOW_FIELD_IDX_C_VLAN */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
/* ICE_FLOW_FIELD_IDX_ETH_TYPE */
- ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 12, ICE_FLOW_FLD_SZ_ETH_TYPE),
- /* IPv4 */
- /* ICE_FLOW_FIELD_IDX_IP_DSCP */
- ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 1, 1),
- /* ICE_FLOW_FIELD_IDX_IP_TTL */
- ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NONE, 8, 1),
- /* ICE_FLOW_FIELD_IDX_IP_PROT */
- ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NONE, 9, ICE_FLOW_FLD_SZ_IP_PROT),
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
+ /* IPv4 / IPv6 */
+ /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
+ ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
+ 0x00fc),
+ /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
+ ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
+ 0x0ff0),
+ /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
+ ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
+ ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
+ /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
+ ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
+ ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
+ /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
+ ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
+ ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
+ /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
+ ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
+ ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
/* ICE_FLOW_FIELD_IDX_IPV4_SA */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
/* ICE_FLOW_FIELD_IDX_IPV4_DA */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
- /* IPv6 */
/* ICE_FLOW_FIELD_IDX_IPV6_SA */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
/* ICE_FLOW_FIELD_IDX_IPV6_DA */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
+ /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
+ ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
+ /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
+ ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
+ /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
+ ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
+ /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
+ ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
+ /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
+ ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
+ /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
+ ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
/* Transport */
/* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
/* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
ICE_FLOW_FLD_SZ_GTP_TEID),
+ /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
+ ICE_FLOW_FLD_SZ_GTP_TEID),
+ /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
+ ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
+ ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
/* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
ICE_FLOW_FLD_SZ_GTP_TEID),
/* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
+ /* PFCP */
+ /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
+ ICE_FLOW_FLD_SZ_PFCP_SEID),
+ /* L2TPV3 */
+ /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
+ ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
+ /* ESP */
+ /* ICE_FLOW_FIELD_IDX_ESP_SPI */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
+ ICE_FLOW_FLD_SZ_ESP_SPI),
+ /* AH */
+ /* ICE_FLOW_FIELD_IDX_AH_SPI */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
+ ICE_FLOW_FLD_SZ_AH_SPI),
+ /* NAT_T_ESP */
+ /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
+ ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
};
/* Bitmaps indicating relevant packet types for a particular protocol header
* Packet types for packets with an Outer/First/Single MAC header
*/
static const u32 ice_ptypes_mac_ofos[] = {
- 0xFDC00CC6, 0xBFBF7F7E, 0xF7EFDFDF, 0xFEFDFDFB,
- 0x03BF7F7E, 0x00000000, 0x00000000, 0x00000000,
- 0x000B0F0F, 0x00000000, 0x00000000, 0x00000000,
+ 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
+ 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
+ 0x00400000, 0x03FFF000, 0x7FFFFFE0, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
-/* Packet types for packets with an Outer/First/Single IPv4 header */
+/* Packet types for packets with an Outer/First/Single IPv4 header, does NOT
+ * include IPV4 other PTYPEs
+ */
static const u32 ice_ptypes_ipv4_ofos[] = {
- 0xFDC00000, 0xBFBF7F7E, 0x00EFDFDF, 0x00000000,
+ 0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000155, 0x00000000, 0x00000000,
+ 0x00000000, 0x000FC000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x0003000F, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Outer/First/Single IPv4 header, includes
+ * IPV4 other PTYPEs
+ */
+static const u32 ice_ptypes_ipv4_ofos_all[] = {
+ 0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000155, 0x00000000, 0x00000000,
+ 0x00000000, 0x000FC000, 0x83E0F800, 0x00000101,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
/* Packet types for packets with an Innermost/Last IPv4 header */
static const u32 ice_ptypes_ipv4_il[] = {
- 0xE0000000, 0xB807700E, 0x8001DC03, 0xE01DC03B,
- 0x0007700E, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
+ 0x0000000E, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x001FF800, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
-/* Packet types for packets with an Outer/First/Single IPv6 header */
+/* Packet types for packets with an Outer/First/Single IPv6 header, does NOT
+ * include IVP6 other PTYPEs
+ */
static const u32 ice_ptypes_ipv6_ofos[] = {
- 0x00000000, 0x00000000, 0xF7000000, 0xFEFDFDFB,
- 0x03BF7F7E, 0x00000000, 0x00000000, 0x00000000,
- 0x00080F00, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x77000000, 0x10002000,
+ 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
+ 0x00000000, 0x03F00000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Outer/First/Single IPv6 header, includes
+ * IPV6 other PTYPEs
+ */
+static const u32 ice_ptypes_ipv6_ofos_all[] = {
+ 0x00000000, 0x00000000, 0x77000000, 0x10002000,
+ 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
+ 0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000206,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
/* Packet types for packets with an Innermost/Last IPv6 header */
static const u32 ice_ptypes_ipv6_il[] = {
- 0x00000000, 0x03B80770, 0x00EE01DC, 0x0EE00000,
- 0x03B80770, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
+ 0x00000770, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */
+static const u32 ice_ipv4_ofos_no_l4[] = {
+ 0x10C00000, 0x04000800, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x000cc000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
+static const u32 ice_ipv4_il_no_l4[] = {
+ 0x60000000, 0x18043008, 0x80000002, 0x6010c021,
+ 0x00000008, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00139800, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */
+static const u32 ice_ipv6_ofos_no_l4[] = {
+ 0x00000000, 0x00000000, 0x43000000, 0x10002000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x02300000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
+static const u32 ice_ipv6_il_no_l4[] = {
+ 0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
+ 0x00000430, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x4e600000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
* packets with inner UDP.
*/
static const u32 ice_ptypes_udp_il[] = {
- 0x81000000, 0x20204040, 0x04081010, 0x80810102,
- 0x00204040, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x81000000, 0x20204040, 0x04000010, 0x80810102,
+ 0x00000040, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00410000, 0x90842000, 0x00000007,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
/* Packet types for packets with an Innermost/Last TCP header */
static const u32 ice_ptypes_tcp_il[] = {
- 0x04000000, 0x80810102, 0x10204040, 0x42040408,
- 0x00810102, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x04000000, 0x80810102, 0x10000040, 0x02040408,
+ 0x00000102, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00820000, 0x21084000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
/* Packet types for packets with an Innermost/Last SCTP header */
static const u32 ice_ptypes_sctp_il[] = {
- 0x08000000, 0x01020204, 0x20408081, 0x04080810,
- 0x01020204, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x08000000, 0x01020204, 0x20000081, 0x04080810,
+ 0x00000204, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x01040000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
/* Packet types for packets with an Innermost/Last ICMP header */
static const u32 ice_ptypes_icmp_il[] = {
- 0x00000000, 0x02040408, 0x40810102, 0x08101020,
- 0x02040408, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x02040408, 0x40000102, 0x08101020,
+ 0x00000408, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x42108000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
/* Packet types for packets with an Outermost/First GRE header */
static const u32 ice_ptypes_gre_of[] = {
- 0x00000000, 0xBFBF7800, 0x00EFDFDF, 0xFEFDE000,
- 0x03BF7F7E, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
+ 0x0000017E, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
/* Packet types for packets with an Innermost/Last MAC header */
static const u32 ice_ptypes_mac_il[] = {
- 0x00000000, 0x00000000, 0x00EFDE00, 0x00000000,
- 0x03BF7800, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
/* Packet types for GTPU */
+static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
+ { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
+};
+
+static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
+ { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+};
+
+static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
+ { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
+};
+
static const u32 ice_ptypes_gtpu[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x7FFFF800, 0x00000000,
+ 0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for pppoe */
+static const u32 ice_ptypes_pppoe[] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x03fff000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with PFCP NODE header */
+static const u32 ice_ptypes_pfcp_node[] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x80000000, 0x00000002,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with PFCP SESSION header */
+static const u32 ice_ptypes_pfcp_session[] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000005,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for l2tpv3 */
+static const u32 ice_ptypes_l2tpv3[] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000300,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for esp */
+static const u32 ice_ptypes_esp[] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000003, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for ah */
+static const u32 ice_ptypes_ah[] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x0000000C, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with NAT_T ESP header */
+static const u32 ice_ptypes_nat_t_esp[] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000030, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+static const u32 ice_ptypes_mac_non_ip_ofos[] = {
+ 0x00000846, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
* This will give us the direction flags.
*/
struct ice_fv_word es[ICE_MAX_FV_WORDS];
+ /* attributes can be used to add attributes to a particular PTYPE */
+ const struct ice_ptype_attributes *attr;
+ u16 attr_cnt;
+
u16 mask[ICE_MAX_FV_WORDS];
ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
};
+#define ICE_FLOW_RSS_HDRS_INNER_MASK \
+ (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
+ ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
+ ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
+ ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
+ ICE_FLOW_SEG_HDR_NAT_T_ESP)
+
#define ICE_FLOW_SEG_HDRS_L2_MASK \
(ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
#define ICE_FLOW_SEG_HDRS_L3_MASK \
(ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
- ICE_FLOW_SEG_HDR_ARP | ICE_FLOW_SEG_HDR_PPPOE)
+ ICE_FLOW_SEG_HDR_ARP)
#define ICE_FLOW_SEG_HDRS_L4_MASK \
(ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
- ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_GTPC | \
- ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU_IP | \
- ICE_FLOW_SEG_HDR_GTPU_UP | ICE_FLOW_SEG_HDR_GTPU_DWN)
+ ICE_FLOW_SEG_HDR_SCTP)
+/* mask for L4 protocols that are NOT part of IPV4/6 OTHER PTYPE groups */
+#define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER \
+ (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
/**
* ice_flow_val_hdrs - validates packet segments for valid protocol headers
static enum ice_status
ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
{
- const u32 masks = (ICE_FLOW_SEG_HDRS_L2_MASK |
- ICE_FLOW_SEG_HDRS_L3_MASK |
- ICE_FLOW_SEG_HDRS_L4_MASK);
u8 i;
for (i = 0; i < segs_cnt; i++) {
- /* No header specified */
- if (!(segs[i].hdrs & masks) || (segs[i].hdrs & ~masks))
- return ICE_ERR_PARAM;
-
/* Multiple L3 headers */
if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
!ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
ICE_FLOW_PTYPE_MAX);
}
- if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
+ if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
+ src = (const ice_bitmap_t *)ice_ptypes_pppoe;
+ ice_and_bitmap(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ }
+ if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
+ (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
+ src = !i ?
+ (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_all :
+ (const ice_bitmap_t *)ice_ptypes_ipv4_il;
+ ice_and_bitmap(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
+ (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
+ src = !i ?
+ (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_all :
+ (const ice_bitmap_t *)ice_ptypes_ipv6_il;
+ ice_and_bitmap(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
+ !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
+ src = !i ? (const ice_bitmap_t *)ice_ipv4_ofos_no_l4 :
+ (const ice_bitmap_t *)ice_ipv4_il_no_l4;
+ ice_and_bitmap(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
(const ice_bitmap_t *)ice_ptypes_ipv4_il;
ice_and_bitmap(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
+ } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
+ !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
+ src = !i ? (const ice_bitmap_t *)ice_ipv6_ofos_no_l4 :
+ (const ice_bitmap_t *)ice_ipv6_il_no_l4;
+ ice_and_bitmap(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
} else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
(const ice_bitmap_t *)ice_ptypes_ipv6_il;
ICE_FLOW_PTYPE_MAX);
}
- if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
- src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
- (const ice_bitmap_t *)ice_ptypes_icmp_il;
+ if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
+ src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
+ ice_and_bitmap(params->ptypes, params->ptypes,
+ src, ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
+ src = (const ice_bitmap_t *)ice_ptypes_pppoe;
ice_and_bitmap(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
- } else if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
+ } else {
+ src = (const ice_bitmap_t *)ice_ptypes_pppoe;
+ ice_andnot_bitmap(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ }
+
+ if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
src = (const ice_bitmap_t *)ice_ptypes_udp_il;
ice_and_bitmap(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
ice_and_bitmap(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
+ }
+
+ if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
+ src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
+ (const ice_bitmap_t *)ice_ptypes_icmp_il;
+ ice_and_bitmap(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
} else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
if (!i) {
src = (const ice_bitmap_t *)ice_ptypes_gre_of;
src, ICE_FLOW_PTYPE_MAX);
}
} else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
- if (!i) {
- src = (const ice_bitmap_t *)ice_ptypes_gtpc;
- ice_and_bitmap(params->ptypes, params->ptypes,
- src, ICE_FLOW_PTYPE_MAX);
- }
+ src = (const ice_bitmap_t *)ice_ptypes_gtpc;
+ ice_and_bitmap(params->ptypes, params->ptypes,
+ src, ICE_FLOW_PTYPE_MAX);
} else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
- if (!i) {
- src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
- ice_and_bitmap(params->ptypes, params->ptypes,
- src, ICE_FLOW_PTYPE_MAX);
- }
- } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU) {
- if (!i) {
- src = (const ice_bitmap_t *)ice_ptypes_gtpu;
- ice_and_bitmap(params->ptypes, params->ptypes,
- src, ICE_FLOW_PTYPE_MAX);
- }
+ src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
+ ice_and_bitmap(params->ptypes, params->ptypes,
+ src, ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
+ src = (const ice_bitmap_t *)ice_ptypes_gtpu;
+ ice_and_bitmap(params->ptypes, params->ptypes,
+ src, ICE_FLOW_PTYPE_MAX);
+
+ /* Attributes for GTP packet with downlink */
+ params->attr = ice_attr_gtpu_down;
+ params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
+ src = (const ice_bitmap_t *)ice_ptypes_gtpu;
+ ice_and_bitmap(params->ptypes, params->ptypes,
+ src, ICE_FLOW_PTYPE_MAX);
+
+ /* Attributes for GTP packet with uplink */
+ params->attr = ice_attr_gtpu_up;
+ params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
+ src = (const ice_bitmap_t *)ice_ptypes_gtpu;
+ ice_and_bitmap(params->ptypes, params->ptypes,
+ src, ICE_FLOW_PTYPE_MAX);
+
+ /* Attributes for GTP packet with Extension Header */
+ params->attr = ice_attr_gtpu_eh;
+ params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
+ src = (const ice_bitmap_t *)ice_ptypes_gtpu;
+ ice_and_bitmap(params->ptypes, params->ptypes,
+ src, ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
+ src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
+ ice_and_bitmap(params->ptypes, params->ptypes,
+ src, ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
+ src = (const ice_bitmap_t *)ice_ptypes_esp;
+ ice_and_bitmap(params->ptypes, params->ptypes,
+ src, ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
+ src = (const ice_bitmap_t *)ice_ptypes_ah;
+ ice_and_bitmap(params->ptypes, params->ptypes,
+ src, ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
+ src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
+ ice_and_bitmap(params->ptypes, params->ptypes,
+ src, ICE_FLOW_PTYPE_MAX);
+ }
+
+ if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
+ if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
+ src =
+ (const ice_bitmap_t *)ice_ptypes_pfcp_node;
+ else
+ src =
+ (const ice_bitmap_t *)ice_ptypes_pfcp_session;
+
+ ice_and_bitmap(params->ptypes, params->ptypes,
+ src, ICE_FLOW_PTYPE_MAX);
+ } else {
+ src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
+ ice_andnot_bitmap(params->ptypes, params->ptypes,
+ src, ICE_FLOW_PTYPE_MAX);
+
+ src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
+ ice_andnot_bitmap(params->ptypes, params->ptypes,
+ src, ICE_FLOW_PTYPE_MAX);
}
}
* ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
* @hw: pointer to the HW struct
* @params: information about the flow to be processed
- * @flags: The value of pkt_flags[x:x] in RX/TX MDID metadata.
+ * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
*
* This function will allocate an extraction sequence entries for a DWORD size
* chunk of the packet flags.
* @params: information about the flow to be processed
* @seg: packet segment index of the field to be extracted
* @fld: ID of field to be extracted
+ * @match: bitfield of all fields
*
* This function determines the protocol ID, offset, and size of the given
* field. It then allocates one or more extraction sequence entries for the
*/
static enum ice_status
ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
- u8 seg, enum ice_flow_field fld)
+ u8 seg, enum ice_flow_field fld, u64 match)
{
enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
u8 fv_words = hw->blk[params->blk].es.fvw;
struct ice_flow_fld_info *flds;
u16 cnt, ese_bits, i;
- s16 adj = 0;
+ u16 sib_mask = 0;
u16 mask;
u16 off;
case ICE_FLOW_FIELD_IDX_ETH_TYPE:
prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
break;
- case ICE_FLOW_FIELD_IDX_IP_DSCP:
- if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
- adj = ICE_FLOW_FLD_IPV6_TTL_DSCP_DISP;
- /* Fall through */
- case ICE_FLOW_FIELD_IDX_IP_TTL:
- case ICE_FLOW_FIELD_IDX_IP_PROT:
- /* Some fields are located at different offsets in IPv4 and
- * IPv6
+ case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
+ prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
+ break;
+ case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
+ prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
+ break;
+ case ICE_FLOW_FIELD_IDX_IPV4_TTL:
+ case ICE_FLOW_FIELD_IDX_IPV4_PROT:
+ prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
+
+ /* TTL and PROT share the same extraction seq. entry.
+ * Each is considered a sibling to the other in terms of sharing
+ * the same extraction sequence entry.
+ */
+ if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
+ sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
+ else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
+ sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
+
+ /* If the sibling field is also included, that field's
+ * mask needs to be included.
+ */
+ if (match & BIT(sib))
+ sib_mask = ice_flds_info[sib].mask;
+ break;
+ case ICE_FLOW_FIELD_IDX_IPV6_TTL:
+ case ICE_FLOW_FIELD_IDX_IPV6_PROT:
+ prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
+
+ /* TTL and PROT share the same extraction seq. entry.
+ * Each is considered a sibling to the other in terms of sharing
+ * the same extraction sequence entry.
*/
- if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4) {
- prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S :
- ICE_PROT_IPV4_IL;
- /* TTL and PROT share the same extraction seq. entry.
- * Each is considered a sibling to the other in term
- * sharing the same extraction sequence entry.
- */
- if (fld == ICE_FLOW_FIELD_IDX_IP_TTL)
- sib = ICE_FLOW_FIELD_IDX_IP_PROT;
- else if (fld == ICE_FLOW_FIELD_IDX_IP_PROT)
- sib = ICE_FLOW_FIELD_IDX_IP_TTL;
- } else if (params->prof->segs[seg].hdrs &
- ICE_FLOW_SEG_HDR_IPV6) {
- prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S :
- ICE_PROT_IPV6_IL;
- if (fld == ICE_FLOW_FIELD_IDX_IP_TTL)
- adj = ICE_FLOW_FLD_IPV6_TTL_TTL_DISP;
- else if (fld == ICE_FLOW_FIELD_IDX_IP_PROT)
- adj = ICE_FLOW_FLD_IPV6_TTL_PROT_DISP;
- }
+ if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
+ sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
+ else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
+ sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
+
+ /* If the sibling field is also included, that field's
+ * mask needs to be included.
+ */
+ if (match & BIT(sib))
+ sib_mask = ice_flds_info[sib].mask;
break;
case ICE_FLOW_FIELD_IDX_IPV4_SA:
case ICE_FLOW_FIELD_IDX_IPV4_DA:
break;
case ICE_FLOW_FIELD_IDX_IPV6_SA:
case ICE_FLOW_FIELD_IDX_IPV6_DA:
+ case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
+ case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
+ case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
+ case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
+ case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
+ case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
break;
case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
+ case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
+ case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
/* GTP is accessed through UDP OF protocol */
prot_id = ICE_PROT_UDP_OF;
break;
case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
prot_id = ICE_PROT_PPPOE;
break;
+ case ICE_FLOW_FIELD_IDX_PFCP_SEID:
+ prot_id = ICE_PROT_UDP_IL_OR_S;
+ break;
+ case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
+ prot_id = ICE_PROT_L2TPV3;
+ break;
+ case ICE_FLOW_FIELD_IDX_ESP_SPI:
+ prot_id = ICE_PROT_ESP_F;
+ break;
+ case ICE_FLOW_FIELD_IDX_AH_SPI:
+ prot_id = ICE_PROT_ESP_2;
+ break;
+ case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
+ prot_id = ICE_PROT_UDP_IL_OR_S;
+ break;
case ICE_FLOW_FIELD_IDX_ARP_SIP:
case ICE_FLOW_FIELD_IDX_ARP_DIP:
case ICE_FLOW_FIELD_IDX_ARP_SHA:
flds[fld].xtrct.prot_id = prot_id;
flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
ICE_FLOW_FV_EXTRACT_SZ;
- flds[fld].xtrct.disp = (u8)((ice_flds_info[fld].off + adj) % ese_bits);
+ flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
flds[fld].xtrct.idx = params->es_cnt;
+ flds[fld].xtrct.mask = ice_flds_info[fld].mask;
/* Adjust the next field-entry index after accommodating the number of
* entries this field consumes
/* Fill in the extraction sequence entries needed for this field */
off = flds[fld].xtrct.off;
- mask = ice_flds_info[fld].mask;
+ mask = flds[fld].xtrct.mask;
for (i = 0; i < cnt; i++) {
/* Only consume an extraction sequence entry if there is no
* sibling field associated with this field or the sibling entry
params->es[idx].prot_id = prot_id;
params->es[idx].off = off;
- params->mask[idx] = mask;
+ params->mask[idx] = mask | sib_mask;
params->es_cnt++;
}
ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
u8 seg)
{
+ u16 fv_words;
u16 hdrs_sz;
u8 i;
if (!hdrs_sz)
return ICE_ERR_PARAM;
+ fv_words = hw->blk[params->blk].es.fvw;
+
for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
struct ice_flow_seg_fld_raw *raw;
u16 off, cnt, j;
raw = ¶ms->prof->segs[seg].raws[i];
- /* Only support matching raw fields in the payload */
- if (raw->off < hdrs_sz)
- return ICE_ERR_PARAM;
-
- /* Convert the segment-relative offset into payload-relative
- * offset.
- */
- off = raw->off - hdrs_sz;
-
/* Storing extraction information */
- raw->info.xtrct.prot_id = ICE_PROT_PAY;
- raw->info.xtrct.off = (off / ICE_FLOW_FV_EXTRACT_SZ) *
+ raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
+ raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
ICE_FLOW_FV_EXTRACT_SZ;
- raw->info.xtrct.disp = (off % ICE_FLOW_FV_EXTRACT_SZ) *
+ raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
BITS_PER_BYTE;
raw->info.xtrct.idx = params->es_cnt;
BITS_PER_BYTE));
off = raw->info.xtrct.off;
for (j = 0; j < cnt; j++) {
+ u16 idx;
+
/* Make sure the number of extraction sequence required
* does not exceed the block's capability
*/
params->es_cnt >= ICE_MAX_FV_WORDS)
return ICE_ERR_MAX_LIMIT;
- params->es[params->es_cnt].prot_id = ICE_PROT_PAY;
- params->es[params->es_cnt].off = off;
+ /* some blocks require a reversed field vector layout */
+ if (hw->blk[params->blk].es.reverse)
+ idx = fv_words - params->es_cnt - 1;
+ else
+ idx = params->es_cnt;
+
+ params->es[idx].prot_id = raw->info.xtrct.prot_id;
+ params->es[idx].off = off;
params->es_cnt++;
off += ICE_FLOW_FV_EXTRACT_SZ;
}
/* For ACL, we also need to extract the direction bit (Rx,Tx) data from
* packet flags
*/
- if (params->blk == ICE_BLK_ACL)
- ice_flow_xtract_pkt_flags(hw, params,
- ICE_RX_MDID_PKT_FLAGS_15_0);
+ if (params->blk == ICE_BLK_ACL) {
+ status = ice_flow_xtract_pkt_flags(hw, params,
+ ICE_RX_MDID_PKT_FLAGS_15_0);
+ if (status)
+ return status;
+ }
for (i = 0; i < params->prof->segs_cnt; i++) {
u64 match = params->prof->segs[i].match;
- u16 j;
+ enum ice_flow_field j;
for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
const u64 bit = BIT_ULL(j);
if (match & bit) {
- status = ice_flow_xtract_fld
- (hw, params, i, (enum ice_flow_field)j);
+ status = ice_flow_xtract_fld(hw, params, i, j,
+ match);
if (status)
return status;
match &= ~bit;
}
/**
- * ice_flow_proc_segs - process all packet segments associated with a profile
- * @hw: pointer to the HW struct
+ * ice_flow_sel_acl_scen - returns the specific scenario
+ * @hw: pointer to the hardware structure
* @params: information about the flow to be processed
+ *
+ * This function will return the specific scenario based on the
+ * params passed to it
*/
static enum ice_status
-ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
+ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
{
- enum ice_status status;
+ /* Find the best-fit scenario for the provided match width */
+ struct ice_acl_scen *cand_scen = NULL, *scen;
- status = ice_flow_proc_seg_hdrs(params);
- if (status)
- return status;
+ if (!hw->acl_tbl)
+ return ICE_ERR_DOES_NOT_EXIST;
- status = ice_flow_create_xtrct_seq(hw, params);
- if (status)
- return status;
+ /* Loop through each scenario and match against the scenario width
+ * to select the specific scenario
+ */
+ LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
+ if (scen->eff_width >= params->entry_length &&
+ (!cand_scen || cand_scen->eff_width > scen->eff_width))
+ cand_scen = scen;
+ if (!cand_scen)
+ return ICE_ERR_DOES_NOT_EXIST;
- switch (params->blk) {
- case ICE_BLK_RSS:
- /* Only header information is provided for RSS configuration.
- * No further processing is needed.
- */
- status = ICE_SUCCESS;
- break;
- case ICE_BLK_FD:
- status = ICE_SUCCESS;
- break;
- case ICE_BLK_SW:
- default:
+ params->prof->cfg.scen = cand_scen;
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
+ * @params: information about the flow to be processed
+ */
+static enum ice_status
+ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
+{
+ u16 index, i, range_idx = 0;
+
+ index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
+
+ for (i = 0; i < params->prof->segs_cnt; i++) {
+ struct ice_flow_seg_info *seg = ¶ms->prof->segs[i];
+ u64 match = seg->match;
+ u8 j;
+
+ for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
+ struct ice_flow_fld_info *fld;
+ const u64 bit = BIT_ULL(j);
+
+ if (!(match & bit))
+ continue;
+
+ fld = &seg->fields[j];
+ fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
+
+ if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
+ fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
+
+ /* Range checking only supported for single
+ * words
+ */
+ if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
+ fld->xtrct.disp,
+ BITS_PER_BYTE * 2) > 1)
+ return ICE_ERR_PARAM;
+
+ /* Ranges must define low and high values */
+ if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
+ fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
+ return ICE_ERR_PARAM;
+
+ fld->entry.val = range_idx++;
+ } else {
+ /* Store adjusted byte-length of field for later
+ * use, taking into account potential
+ * non-byte-aligned displacement
+ */
+ fld->entry.last = DIVIDE_AND_ROUND_UP
+ (ice_flds_info[j].size +
+ (fld->xtrct.disp % BITS_PER_BYTE),
+ BITS_PER_BYTE);
+ fld->entry.val = index;
+ index += fld->entry.last;
+ }
+
+ match &= ~bit;
+ }
+
+ for (j = 0; j < seg->raws_cnt; j++) {
+ struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
+
+ raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
+ raw->info.entry.val = index;
+ raw->info.entry.last = raw->info.src.last;
+ index += raw->info.entry.last;
+ }
+ }
+
+ /* Currently only support using the byte selection base, which only
+ * allows for an effective entry size of 30 bytes. Reject anything
+ * larger.
+ */
+ if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
+ return ICE_ERR_PARAM;
+
+ /* Only 8 range checkers per profile, reject anything trying to use
+ * more
+ */
+ if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
+ return ICE_ERR_PARAM;
+
+ /* Store # bytes required for entry for later use */
+ params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_flow_proc_segs - process all packet segments associated with a profile
+ * @hw: pointer to the HW struct
+ * @params: information about the flow to be processed
+ */
+static enum ice_status
+ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
+{
+ enum ice_status status;
+
+ status = ice_flow_proc_seg_hdrs(params);
+ if (status)
+ return status;
+
+ status = ice_flow_create_xtrct_seq(hw, params);
+ if (status)
+ return status;
+
+ switch (params->blk) {
+ case ICE_BLK_FD:
+ case ICE_BLK_RSS:
+ status = ICE_SUCCESS;
+ break;
+ case ICE_BLK_ACL:
+ status = ice_flow_acl_def_entry_frmt(params);
+ if (status)
+ return status;
+ status = ice_flow_sel_acl_scen(hw, params);
+ if (status)
+ return status;
+ break;
+ default:
return ICE_ERR_NOT_IMPL;
}
struct ice_flow_prof *p, *prof = NULL;
ice_acquire_lock(&hw->fl_profs_locks[blk]);
- LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
+ LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
segs_cnt && segs_cnt == p->segs_cnt) {
u8 i;
break;
}
}
- }
ice_release_lock(&hw->fl_profs_locks[blk]);
return prof;
{
struct ice_flow_prof *p;
- LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
+ LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
if (p->id == prof_id)
return p;
- }
return NULL;
}
if (entry->entry)
ice_free(hw, entry->entry);
+ if (entry->range_buf) {
+ ice_free(hw, entry->range_buf);
+ entry->range_buf = NULL;
+ }
+
if (entry->acts) {
ice_free(hw, entry->acts);
entry->acts = NULL;
ice_free(hw, entry);
}
+#define ICE_ACL_INVALID_SCEN 0x3f
+
+/**
+ * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
+ * @hw: pointer to the hardware structure
+ * @prof: pointer to flow profile
+ * @buf: destination buffer function writes partial extraction sequence to
+ *
+ * returns ICE_SUCCESS if no PF is associated to the given profile
+ * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
+ * returns other error code for real error
+ */
+static enum ice_status
+ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
+ struct ice_aqc_acl_prof_generic_frmt *buf)
+{
+ enum ice_status status;
+ u8 prof_id = 0;
+
+ status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
+ if (status)
+ return status;
+
+ status = ice_query_acl_prof(hw, prof_id, buf, NULL);
+ if (status)
+ return status;
+
+ /* If all PF's associated scenarios are all 0 or all
+ * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
+ * not been configured yet.
+ */
+ if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
+ buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
+ buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
+ buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
+ return ICE_SUCCESS;
+
+ if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
+ buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
+ buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
+ buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
+ buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
+ buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
+ buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
+ buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
+ return ICE_SUCCESS;
+ else
+ return ICE_ERR_IN_USE;
+}
+
+/**
+ * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
+ * @hw: pointer to the hardware structure
+ * @acts: array of actions to be performed on a match
+ * @acts_cnt: number of actions
+ */
+static enum ice_status
+ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
+ u8 acts_cnt)
+{
+ int i;
+
+ for (i = 0; i < acts_cnt; i++) {
+ if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
+ acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
+ acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
+ struct ice_acl_cntrs cntrs;
+ enum ice_status status;
+
+ cntrs.bank = 0; /* Only bank0 for the moment */
+ cntrs.first_cntr =
+ LE16_TO_CPU(acts[i].data.acl_act.value);
+ cntrs.last_cntr =
+ LE16_TO_CPU(acts[i].data.acl_act.value);
+
+ if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
+ cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
+ else
+ cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
+
+ status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
+ if (status)
+ return status;
+ }
+ }
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
+ * @hw: pointer to the hardware structure
+ * @prof: pointer to flow profile
+ *
+ * Disassociate the scenario from the profile for the PF of the VSI.
+ */
+static enum ice_status
+ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
+{
+ struct ice_aqc_acl_prof_generic_frmt buf;
+ enum ice_status status = ICE_SUCCESS;
+ u8 prof_id = 0;
+
+ ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
+
+ status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
+ if (status)
+ return status;
+
+ status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
+ if (status)
+ return status;
+
+ /* Clear scenario for this PF */
+ buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
+ status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
+
+ return status;
+}
+
/**
* ice_flow_rem_entry_sync - Remove a flow entry
* @hw: pointer to the HW struct
+ * @blk: classification stage
* @entry: flow entry to be removed
*/
static enum ice_status
-ice_flow_rem_entry_sync(struct ice_hw *hw, struct ice_flow_entry *entry)
+ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
+ struct ice_flow_entry *entry)
{
if (!entry)
return ICE_ERR_BAD_PTR;
+ if (blk == ICE_BLK_ACL) {
+ enum ice_status status;
+
+ if (!entry->prof)
+ return ICE_ERR_BAD_PTR;
+
+ status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
+ entry->scen_entry_idx);
+ if (status)
+ return status;
+
+ /* Checks if we need to release an ACL counter. */
+ if (entry->acts_cnt && entry->acts)
+ ice_flow_acl_free_act_cntr(hw, entry->acts,
+ entry->acts_cnt);
+ }
+
LIST_DEL(&entry->l_entry);
ice_dealloc_flow_entry(hw, entry);
struct ice_flow_prof **prof)
{
struct ice_flow_prof_params params;
- enum ice_status status = ICE_SUCCESS;
+ enum ice_status status;
u8 i;
if (!prof || (acts_cnt && !acts))
}
/* Add a HW profile for this flow profile */
- status = ice_add_prof_with_mask(hw, blk, prof_id, (u8 *)params.ptypes,
- params.es, params.mask);
+ status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes,
+ params.attr, params.attr_cnt, params.es,
+ params.mask);
if (status) {
ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
goto out;
ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
struct ice_flow_prof *prof)
{
- enum ice_status status = ICE_SUCCESS;
+ enum ice_status status;
/* Remove all remaining flow entries before removing the flow profile */
if (!LIST_EMPTY(&prof->entries)) {
LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
l_entry) {
- status = ice_flow_rem_entry_sync(hw, e);
+ status = ice_flow_rem_entry_sync(hw, blk, e);
if (status)
break;
}
ice_release_lock(&prof->entries_lock);
}
+ if (blk == ICE_BLK_ACL) {
+ struct ice_aqc_acl_profile_ranges query_rng_buf;
+ struct ice_aqc_acl_prof_generic_frmt buf;
+ u8 prof_id = 0;
+
+ /* Disassociate the scenario from the profile for the PF */
+ status = ice_flow_acl_disassoc_scen(hw, prof);
+ if (status)
+ return status;
+
+ /* Clear the range-checker if the profile ID is no longer
+ * used by any PF
+ */
+ status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
+ if (status && status != ICE_ERR_IN_USE) {
+ return status;
+ } else if (!status) {
+ /* Clear the range-checker value for profile ID */
+ ice_memset(&query_rng_buf, 0,
+ sizeof(struct ice_aqc_acl_profile_ranges),
+ ICE_NONDMA_MEM);
+
+ status = ice_flow_get_hw_prof(hw, blk, prof->id,
+ &prof_id);
+ if (status)
+ return status;
+
+ status = ice_prog_acl_prof_ranges(hw, prof_id,
+ &query_rng_buf, NULL);
+ if (status)
+ return status;
+ }
+ }
+
/* Remove all hardware profiles associated with this flow profile */
status = ice_rem_prof(hw, blk, prof->id);
if (!status) {
return status;
}
+/**
+ * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
+ * @buf: Destination buffer function writes partial xtrct sequence to
+ * @info: Info about field
+ */
+static void
+ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
+ struct ice_flow_fld_info *info)
+{
+ u16 dst, i;
+ u8 src;
+
+ src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
+ info->xtrct.disp / BITS_PER_BYTE;
+ dst = info->entry.val;
+ for (i = 0; i < info->entry.last; i++)
+ /* HW stores field vector words in LE, convert words back to BE
+ * so constructed entries will end up in network order
+ */
+ buf->byte_selection[dst++] = src++ ^ 1;
+}
+
+/**
+ * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
+ * @hw: pointer to the hardware structure
+ * @prof: pointer to flow profile
+ */
+static enum ice_status
+ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
+{
+ struct ice_aqc_acl_prof_generic_frmt buf;
+ struct ice_flow_fld_info *info;
+ enum ice_status status;
+ u8 prof_id = 0;
+ u16 i;
+
+ ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
+
+ status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
+ if (status)
+ return status;
+
+ status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
+ if (status && status != ICE_ERR_IN_USE)
+ return status;
+
+ if (!status) {
+ /* Program the profile dependent configuration. This is done
+ * only once regardless of the number of PFs using that profile
+ */
+ ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
+
+ for (i = 0; i < prof->segs_cnt; i++) {
+ struct ice_flow_seg_info *seg = &prof->segs[i];
+ u64 match = seg->match;
+ u16 j;
+
+ for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
+ const u64 bit = BIT_ULL(j);
+
+ if (!(match & bit))
+ continue;
+
+ info = &seg->fields[j];
+
+ if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
+ buf.word_selection[info->entry.val] =
+ info->xtrct.idx;
+ else
+ ice_flow_acl_set_xtrct_seq_fld(&buf,
+ info);
+
+ match &= ~bit;
+ }
+
+ for (j = 0; j < seg->raws_cnt; j++) {
+ info = &seg->raws[j].info;
+ ice_flow_acl_set_xtrct_seq_fld(&buf, info);
+ }
+ }
+
+ ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
+ ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
+ ICE_NONDMA_MEM);
+ }
+
+ /* Update the current PF */
+ buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
+ status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
+
+ return status;
+}
+
/**
* ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
* @hw: pointer to the hardware structure
enum ice_status status = ICE_SUCCESS;
if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
+ if (blk == ICE_BLK_ACL) {
+ status = ice_flow_acl_set_xtrct_seq(hw, prof);
+ if (status)
+ return status;
+ }
status = ice_add_prof_id_flow(hw, blk,
ice_get_hw_vsi_num(hw,
vsi_handle),
if (!status)
LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
- ice_release_lock(&hw->fl_profs_locks[blk]);
+ ice_release_lock(&hw->fl_profs_locks[blk]);
+
+ return status;
+}
+
+/**
+ * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
+ * @hw: pointer to the HW struct
+ * @blk: the block for which the flow profile is to be removed
+ * @prof_id: unique ID of the flow profile to be removed
+ */
+enum ice_status
+ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
+{
+ struct ice_flow_prof *prof;
+ enum ice_status status;
+
+ ice_acquire_lock(&hw->fl_profs_locks[blk]);
+
+ prof = ice_flow_find_prof_id(hw, blk, prof_id);
+ if (!prof) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ goto out;
+ }
+
+ /* prof becomes invalid after the call */
+ status = ice_flow_rem_prof_sync(hw, blk, prof);
+
+out:
+ ice_release_lock(&hw->fl_profs_locks[blk]);
+
+ return status;
+}
+
+/**
+ * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
+ * @hw: pointer to the HW struct
+ * @blk: classification stage
+ * @prof_id: the profile ID handle
+ * @hw_prof_id: pointer to variable to receive the HW profile ID
+ */
+enum ice_status
+ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
+ u8 *hw_prof_id)
+{
+ struct ice_prof_map *map;
+
+ map = ice_search_prof_id(hw, blk, prof_id);
+ if (map) {
+ *hw_prof_id = map->prof_id;
+ return ICE_SUCCESS;
+ }
+
+ return ICE_ERR_DOES_NOT_EXIST;
+}
+
+/**
+ * ice_flow_find_entry - look for a flow entry using its unique ID
+ * @hw: pointer to the HW struct
+ * @blk: classification stage
+ * @entry_id: unique ID to identify this flow entry
+ *
+ * This function looks for the flow entry with the specified unique ID in all
+ * flow profiles of the specified classification stage. If the entry is found,
+ * and it returns the handle to the flow entry. Otherwise, it returns
+ * ICE_FLOW_ENTRY_ID_INVAL.
+ */
+u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
+{
+ struct ice_flow_entry *found = NULL;
+ struct ice_flow_prof *p;
+
+ ice_acquire_lock(&hw->fl_profs_locks[blk]);
+
+ LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
+ struct ice_flow_entry *e;
+
+ ice_acquire_lock(&p->entries_lock);
+ LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
+ if (e->id == entry_id) {
+ found = e;
+ break;
+ }
+ ice_release_lock(&p->entries_lock);
+
+ if (found)
+ break;
+ }
+
+ ice_release_lock(&hw->fl_profs_locks[blk]);
+
+ return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
+}
+
+/**
+ * ice_flow_acl_check_actions - Checks the ACL rule's actions
+ * @hw: pointer to the hardware structure
+ * @acts: array of actions to be performed on a match
+ * @acts_cnt: number of actions
+ * @cnt_alloc: indicates if an ACL counter has been allocated.
+ */
+static enum ice_status
+ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
+ u8 acts_cnt, bool *cnt_alloc)
+{
+ ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
+ int i;
+
+ ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
+ *cnt_alloc = false;
+
+ if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
+ return ICE_ERR_OUT_OF_RANGE;
+
+ for (i = 0; i < acts_cnt; i++) {
+ if (acts[i].type != ICE_FLOW_ACT_NOP &&
+ acts[i].type != ICE_FLOW_ACT_DROP &&
+ acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
+ acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
+ return ICE_ERR_CFG;
+
+ /* If the caller want to add two actions of the same type, then
+ * it is considered invalid configuration.
+ */
+ if (ice_test_and_set_bit(acts[i].type, dup_check))
+ return ICE_ERR_PARAM;
+ }
+
+ /* Checks if ACL counters are needed. */
+ for (i = 0; i < acts_cnt; i++) {
+ if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
+ acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
+ acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
+ struct ice_acl_cntrs cntrs;
+ enum ice_status status;
+
+ cntrs.amount = 1;
+ cntrs.bank = 0; /* Only bank0 for the moment */
+
+ if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
+ cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
+ else
+ cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
+
+ status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
+ if (status)
+ return status;
+ /* Counter index within the bank */
+ acts[i].data.acl_act.value =
+ CPU_TO_LE16(cntrs.first_cntr);
+ *cnt_alloc = true;
+ }
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
+ * @fld: number of the given field
+ * @info: info about field
+ * @range_buf: range checker configuration buffer
+ * @data: pointer to a data buffer containing flow entry's match values/masks
+ * @range: Input/output param indicating which range checkers are being used
+ */
+static void
+ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
+ struct ice_aqc_acl_profile_ranges *range_buf,
+ u8 *data, u8 *range)
+{
+ u16 new_mask;
+
+ /* If not specified, default mask is all bits in field */
+ new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
+ BIT(ice_flds_info[fld].size) - 1 :
+ (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
+
+ /* If the mask is 0, then we don't need to worry about this input
+ * range checker value.
+ */
+ if (new_mask) {
+ u16 new_high =
+ (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
+ u16 new_low =
+ (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
+ u8 range_idx = info->entry.val;
+
+ range_buf->checker_cfg[range_idx].low_boundary =
+ CPU_TO_BE16(new_low);
+ range_buf->checker_cfg[range_idx].high_boundary =
+ CPU_TO_BE16(new_high);
+ range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
+
+ /* Indicate which range checker is being used */
+ *range |= BIT(range_idx);
+ }
+}
+
+/**
+ * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
+ * @fld: number of the given field
+ * @info: info about the field
+ * @buf: buffer containing the entry
+ * @dontcare: buffer containing don't care mask for entry
+ * @data: pointer to a data buffer containing flow entry's match values/masks
+ */
+static void
+ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
+ u8 *dontcare, u8 *data)
+{
+ u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
+ bool use_mask = false;
+ u8 disp;
+
+ src = info->src.val;
+ mask = info->src.mask;
+ dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
+ disp = info->xtrct.disp % BITS_PER_BYTE;
+
+ if (mask != ICE_FLOW_FLD_OFF_INVAL)
+ use_mask = true;
+
+ for (k = 0; k < info->entry.last; k++, dst++) {
+ /* Add overflow bits from previous byte */
+ buf[dst] = (tmp_s & 0xff00) >> 8;
+
+ /* If mask is not valid, tmp_m is always zero, so just setting
+ * dontcare to 0 (no masked bits). If mask is valid, pulls in
+ * overflow bits of mask from prev byte
+ */
+ dontcare[dst] = (tmp_m & 0xff00) >> 8;
+
+ /* If there is displacement, last byte will only contain
+ * displaced data, but there is no more data to read from user
+ * buffer, so skip so as not to potentially read beyond end of
+ * user buffer
+ */
+ if (!disp || k < info->entry.last - 1) {
+ /* Store shifted data to use in next byte */
+ tmp_s = data[src++] << disp;
+
+ /* Add current (shifted) byte */
+ buf[dst] |= tmp_s & 0xff;
+
+ /* Handle mask if valid */
+ if (use_mask) {
+ tmp_m = (~data[mask++] & 0xff) << disp;
+ dontcare[dst] |= tmp_m & 0xff;
+ }
+ }
+ }
+
+ /* Fill in don't care bits at beginning of field */
+ if (disp) {
+ dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
+ for (k = 0; k < disp; k++)
+ dontcare[dst] |= BIT(k);
+ }
+
+ end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
+
+ /* Fill in don't care bits at end of field */
+ if (end_disp) {
+ dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
+ info->entry.last - 1;
+ for (k = end_disp; k < BITS_PER_BYTE; k++)
+ dontcare[dst] |= BIT(k);
+ }
+}
+
+/**
+ * ice_flow_acl_frmt_entry - Format ACL entry
+ * @hw: pointer to the hardware structure
+ * @prof: pointer to flow profile
+ * @e: pointer to the flow entry
+ * @data: pointer to a data buffer containing flow entry's match values/masks
+ * @acts: array of actions to be performed on a match
+ * @acts_cnt: number of actions
+ *
+ * Formats the key (and key_inverse) to be matched from the data passed in,
+ * along with data from the flow profile. This key/key_inverse pair makes up
+ * the 'entry' for an ACL flow entry.
+ */
+static enum ice_status
+ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
+ struct ice_flow_entry *e, u8 *data,
+ struct ice_flow_action *acts, u8 acts_cnt)
+{
+ u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
+ struct ice_aqc_acl_profile_ranges *range_buf = NULL;
+ enum ice_status status;
+ bool cnt_alloc;
+ u8 prof_id = 0;
+ u16 i, buf_sz;
+
+ status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
+ if (status)
+ return status;
+
+ /* Format the result action */
+
+ status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
+ if (status)
+ return status;
+
+ status = ICE_ERR_NO_MEMORY;
+
+ e->acts = (struct ice_flow_action *)
+ ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
+ ICE_NONDMA_TO_NONDMA);
+
+ if (!e->acts)
+ goto out;
+
+ e->acts_cnt = acts_cnt;
+
+ /* Format the matching data */
+ buf_sz = prof->cfg.scen->width;
+ buf = (u8 *)ice_malloc(hw, buf_sz);
+ if (!buf)
+ goto out;
+
+ dontcare = (u8 *)ice_malloc(hw, buf_sz);
+ if (!dontcare)
+ goto out;
+
+ /* 'key' buffer will store both key and key_inverse, so must be twice
+ * size of buf
+ */
+ key = (u8 *)ice_malloc(hw, buf_sz * 2);
+ if (!key)
+ goto out;
+
+ range_buf = (struct ice_aqc_acl_profile_ranges *)
+ ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
+ if (!range_buf)
+ goto out;
+
+ /* Set don't care mask to all 1's to start, will zero out used bytes */
+ ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
+
+ for (i = 0; i < prof->segs_cnt; i++) {
+ struct ice_flow_seg_info *seg = &prof->segs[i];
+ u64 match = seg->match;
+ u16 j;
+
+ for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
+ struct ice_flow_fld_info *info;
+ const u64 bit = BIT_ULL(j);
+
+ if (!(match & bit))
+ continue;
+
+ info = &seg->fields[j];
+
+ if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
+ ice_flow_acl_frmt_entry_range(j, info,
+ range_buf, data,
+ &range);
+ else
+ ice_flow_acl_frmt_entry_fld(j, info, buf,
+ dontcare, data);
+
+ match &= ~bit;
+ }
+
+ for (j = 0; j < seg->raws_cnt; j++) {
+ struct ice_flow_fld_info *info = &seg->raws[j].info;
+ u16 dst, src, mask, k;
+ bool use_mask = false;
+
+ src = info->src.val;
+ dst = info->entry.val -
+ ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
+ mask = info->src.mask;
+
+ if (mask != ICE_FLOW_FLD_OFF_INVAL)
+ use_mask = true;
+
+ for (k = 0; k < info->entry.last; k++, dst++) {
+ buf[dst] = data[src++];
+ if (use_mask)
+ dontcare[dst] = ~data[mask++];
+ else
+ dontcare[dst] = 0;
+ }
+ }
+ }
+
+ buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
+ dontcare[prof->cfg.scen->pid_idx] = 0;
+
+ /* Format the buffer for direction flags */
+ dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
+
+ if (prof->dir == ICE_FLOW_RX)
+ buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
+
+ if (range) {
+ buf[prof->cfg.scen->rng_chk_idx] = range;
+ /* Mark any unused range checkers as don't care */
+ dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
+ e->range_buf = range_buf;
+ } else {
+ ice_free(hw, range_buf);
+ }
+
+ status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
+ buf_sz);
+ if (status)
+ goto out;
+
+ e->entry = key;
+ e->entry_sz = buf_sz * 2;
+
+out:
+ if (buf)
+ ice_free(hw, buf);
+
+ if (dontcare)
+ ice_free(hw, dontcare);
+
+ if (status && key)
+ ice_free(hw, key);
+
+ if (status && range_buf) {
+ ice_free(hw, range_buf);
+ e->range_buf = NULL;
+ }
+
+ if (status && e->acts) {
+ ice_free(hw, e->acts);
+ e->acts = NULL;
+ e->acts_cnt = 0;
+ }
+
+ if (status && cnt_alloc)
+ ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
return status;
}
/**
- * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
- * @hw: pointer to the HW struct
- * @blk: the block for which the flow profile is to be removed
- * @prof_id: unique ID of the flow profile to be removed
+ * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
+ * the compared data.
+ * @prof: pointer to flow profile
+ * @e: pointer to the comparing flow entry
+ * @do_chg_action: decide if we want to change the ACL action
+ * @do_add_entry: decide if we want to add the new ACL entry
+ * @do_rem_entry: decide if we want to remove the current ACL entry
+ *
+ * Find an ACL scenario entry that matches the compared data. In the same time,
+ * this function also figure out:
+ * a/ If we want to change the ACL action
+ * b/ If we want to add the new ACL entry
+ * c/ If we want to remove the current ACL entry
*/
-enum ice_status
-ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
+static struct ice_flow_entry *
+ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
+ struct ice_flow_entry *e, bool *do_chg_action,
+ bool *do_add_entry, bool *do_rem_entry)
{
- struct ice_flow_prof *prof;
- enum ice_status status;
+ struct ice_flow_entry *p, *return_entry = NULL;
+ u8 i, j;
+
+ /* Check if:
+ * a/ There exists an entry with same matching data, but different
+ * priority, then we remove this existing ACL entry. Then, we
+ * will add the new entry to the ACL scenario.
+ * b/ There exists an entry with same matching data, priority, and
+ * result action, then we do nothing
+ * c/ There exists an entry with same matching data, priority, but
+ * different, action, then do only change the action's entry.
+ * d/ Else, we add this new entry to the ACL scenario.
+ */
+ *do_chg_action = false;
+ *do_add_entry = true;
+ *do_rem_entry = false;
+ LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
+ if (memcmp(p->entry, e->entry, p->entry_sz))
+ continue;
+
+ /* From this point, we have the same matching_data. */
+ *do_add_entry = false;
+ return_entry = p;
+
+ if (p->priority != e->priority) {
+ /* matching data && !priority */
+ *do_add_entry = true;
+ *do_rem_entry = true;
+ break;
+ }
- ice_acquire_lock(&hw->fl_profs_locks[blk]);
+ /* From this point, we will have matching_data && priority */
+ if (p->acts_cnt != e->acts_cnt)
+ *do_chg_action = true;
+ for (i = 0; i < p->acts_cnt; i++) {
+ bool found_not_match = false;
- prof = ice_flow_find_prof_id(hw, blk, prof_id);
- if (!prof) {
- status = ICE_ERR_DOES_NOT_EXIST;
- goto out;
- }
+ for (j = 0; j < e->acts_cnt; j++)
+ if (memcmp(&p->acts[i], &e->acts[j],
+ sizeof(struct ice_flow_action))) {
+ found_not_match = true;
+ break;
+ }
- /* prof becomes invalid after the call */
- status = ice_flow_rem_prof_sync(hw, blk, prof);
+ if (found_not_match) {
+ *do_chg_action = true;
+ break;
+ }
+ }
-out:
- ice_release_lock(&hw->fl_profs_locks[blk]);
+ /* (do_chg_action = true) means :
+ * matching_data && priority && !result_action
+ * (do_chg_action = false) means :
+ * matching_data && priority && result_action
+ */
+ break;
+ }
- return status;
+ return return_entry;
}
/**
- * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
- * @hw: pointer to the HW struct
- * @blk: classification stage
- * @prof_id: the profile ID handle
- * @hw_prof_id: pointer to variable to receive the HW profile ID
+ * ice_flow_acl_convert_to_acl_prior - Convert to ACL priority
+ * @p: flow priority
*/
-enum ice_status
-ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
- u8 *hw_prof_id)
+static enum ice_acl_entry_prior
+ice_flow_acl_convert_to_acl_prior(enum ice_flow_priority p)
{
- struct ice_prof_map *map;
+ enum ice_acl_entry_prior acl_prior;
- map = ice_search_prof_id(hw, blk, prof_id);
- if (map) {
- *hw_prof_id = map->prof_id;
- return ICE_SUCCESS;
+ switch (p) {
+ case ICE_FLOW_PRIO_LOW:
+ acl_prior = ICE_LOW;
+ break;
+ case ICE_FLOW_PRIO_NORMAL:
+ acl_prior = ICE_NORMAL;
+ break;
+ case ICE_FLOW_PRIO_HIGH:
+ acl_prior = ICE_HIGH;
+ break;
+ default:
+ acl_prior = ICE_NORMAL;
+ break;
}
- return ICE_ERR_DOES_NOT_EXIST;
+ return acl_prior;
}
/**
- * ice_flow_find_entry - look for a flow entry using its unique ID
- * @hw: pointer to the HW struct
- * @blk: classification stage
- * @entry_id: unique ID to identify this flow entry
+ * ice_flow_acl_union_rng_chk - Perform union operation between two
+ * range-range checker buffers
+ * @dst_buf: pointer to destination range checker buffer
+ * @src_buf: pointer to source range checker buffer
*
- * This function looks for the flow entry with the specified unique ID in all
- * flow profiles of the specified classification stage. If the entry is found,
- * and it returns the handle to the flow entry. Otherwise, it returns
- * ICE_FLOW_ENTRY_ID_INVAL.
+ * For this function, we do the union between dst_buf and src_buf
+ * range checker buffer, and we will save the result back to dst_buf
*/
-u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
+static enum ice_status
+ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
+ struct ice_aqc_acl_profile_ranges *src_buf)
{
- struct ice_flow_entry *found = NULL;
- struct ice_flow_prof *p;
+ u8 i, j;
- ice_acquire_lock(&hw->fl_profs_locks[blk]);
+ if (!dst_buf || !src_buf)
+ return ICE_ERR_BAD_PTR;
- LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
- struct ice_flow_entry *e;
+ for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
+ struct ice_acl_rng_data *cfg_data = NULL, *in_data;
+ bool will_populate = false;
- ice_acquire_lock(&p->entries_lock);
- LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
- if (e->id == entry_id) {
- found = e;
+ in_data = &src_buf->checker_cfg[i];
+
+ if (!in_data->mask)
+ break;
+
+ for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
+ cfg_data = &dst_buf->checker_cfg[j];
+
+ if (!cfg_data->mask ||
+ !memcmp(cfg_data, in_data,
+ sizeof(struct ice_acl_rng_data))) {
+ will_populate = true;
break;
}
- ice_release_lock(&p->entries_lock);
+ }
- if (found)
- break;
+ if (will_populate) {
+ ice_memcpy(cfg_data, in_data,
+ sizeof(struct ice_acl_rng_data),
+ ICE_NONDMA_TO_NONDMA);
+ } else {
+ /* No available slot left to program range checker */
+ return ICE_ERR_MAX_LIMIT;
+ }
}
- ice_release_lock(&hw->fl_profs_locks[blk]);
+ return ICE_SUCCESS;
+}
- return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
+/**
+ * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
+ * @hw: pointer to the hardware structure
+ * @prof: pointer to flow profile
+ * @entry: double pointer to the flow entry
+ *
+ * For this function, we will look at the current added entries in the
+ * corresponding ACL scenario. Then, we will perform matching logic to
+ * see if we want to add/modify/do nothing with this new entry.
+ */
+static enum ice_status
+ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
+ struct ice_flow_entry **entry)
+{
+ bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
+ struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
+ struct ice_acl_act_entry *acts = NULL;
+ struct ice_flow_entry *exist;
+ enum ice_status status = ICE_SUCCESS;
+ struct ice_flow_entry *e;
+ u8 i;
+
+ if (!entry || !(*entry) || !prof)
+ return ICE_ERR_BAD_PTR;
+
+ e = *(entry);
+
+ do_chg_rng_chk = false;
+ if (e->range_buf) {
+ u8 prof_id = 0;
+
+ status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
+ &prof_id);
+ if (status)
+ return status;
+
+ /* Query the current range-checker value in FW */
+ status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
+ NULL);
+ if (status)
+ return status;
+ ice_memcpy(&cfg_rng_buf, &query_rng_buf,
+ sizeof(struct ice_aqc_acl_profile_ranges),
+ ICE_NONDMA_TO_NONDMA);
+
+ /* Generate the new range-checker value */
+ status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
+ if (status)
+ return status;
+
+ /* Reconfigure the range check if the buffer is changed. */
+ do_chg_rng_chk = false;
+ if (memcmp(&query_rng_buf, &cfg_rng_buf,
+ sizeof(struct ice_aqc_acl_profile_ranges))) {
+ status = ice_prog_acl_prof_ranges(hw, prof_id,
+ &cfg_rng_buf, NULL);
+ if (status)
+ return status;
+
+ do_chg_rng_chk = true;
+ }
+ }
+
+ /* Figure out if we want to (change the ACL action) and/or
+ * (Add the new ACL entry) and/or (Remove the current ACL entry)
+ */
+ exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
+ &do_add_entry, &do_rem_entry);
+
+ if (do_rem_entry) {
+ status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
+ if (status)
+ return status;
+ }
+
+ /* Prepare the result action buffer */
+ acts = (struct ice_acl_act_entry *)ice_calloc
+ (hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
+ for (i = 0; i < e->acts_cnt; i++)
+ ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
+ sizeof(struct ice_acl_act_entry),
+ ICE_NONDMA_TO_NONDMA);
+
+ if (do_add_entry) {
+ enum ice_acl_entry_prior prior;
+ u8 *keys, *inverts;
+ u16 entry_idx;
+
+ keys = (u8 *)e->entry;
+ inverts = keys + (e->entry_sz / 2);
+ prior = ice_flow_acl_convert_to_acl_prior(e->priority);
+
+ status = ice_acl_add_entry(hw, prof->cfg.scen, prior, keys,
+ inverts, acts, e->acts_cnt,
+ &entry_idx);
+ if (status)
+ goto out;
+
+ e->scen_entry_idx = entry_idx;
+ LIST_ADD(&e->l_entry, &prof->entries);
+ } else {
+ if (do_chg_action) {
+ /* For the action memory info, update the SW's copy of
+ * exist entry with e's action memory info
+ */
+ ice_free(hw, exist->acts);
+ exist->acts_cnt = e->acts_cnt;
+ exist->acts = (struct ice_flow_action *)
+ ice_calloc(hw, exist->acts_cnt,
+ sizeof(struct ice_flow_action));
+
+ if (!exist->acts) {
+ status = ICE_ERR_NO_MEMORY;
+ goto out;
+ }
+
+ ice_memcpy(exist->acts, e->acts,
+ sizeof(struct ice_flow_action) * e->acts_cnt,
+ ICE_NONDMA_TO_NONDMA);
+
+ status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
+ e->acts_cnt,
+ exist->scen_entry_idx);
+ if (status)
+ goto out;
+ }
+
+ if (do_chg_rng_chk) {
+ /* In this case, we want to update the range checker
+ * information of the exist entry
+ */
+ status = ice_flow_acl_union_rng_chk(exist->range_buf,
+ e->range_buf);
+ if (status)
+ goto out;
+ }
+
+ /* As we don't add the new entry to our SW DB, deallocate its
+ * memories, and return the exist entry to the caller
+ */
+ ice_dealloc_flow_entry(hw, e);
+ *(entry) = exist;
+ }
+out:
+ if (acts)
+ ice_free(hw, acts);
+
+ return status;
+}
+
+/**
+ * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
+ * @hw: pointer to the hardware structure
+ * @prof: pointer to flow profile
+ * @e: double pointer to the flow entry
+ */
+static enum ice_status
+ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
+ struct ice_flow_entry **e)
+{
+ enum ice_status status;
+
+ ice_acquire_lock(&prof->entries_lock);
+ status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
+ ice_release_lock(&prof->entries_lock);
+
+ return status;
}
/**
void *data, struct ice_flow_action *acts, u8 acts_cnt,
u64 *entry_h)
{
- struct ice_flow_prof *prof = NULL;
struct ice_flow_entry *e = NULL;
+ struct ice_flow_prof *prof;
enum ice_status status = ICE_SUCCESS;
- if (acts_cnt && !acts)
+ /* ACL entries must indicate an action */
+ if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
return ICE_ERR_PARAM;
/* No flow entry data is expected for RSS */
e->priority = prio;
switch (blk) {
+ case ICE_BLK_FD:
case ICE_BLK_RSS:
- /* RSS will add only one entry per VSI per profile */
break;
- case ICE_BLK_FD:
+ case ICE_BLK_ACL:
+ /* ACL will handle the entry management */
+ status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
+ acts_cnt);
+ if (status)
+ goto out;
+
+ status = ice_flow_acl_add_scen_entry(hw, prof, &e);
+ if (status)
+ goto out;
+
break;
- case ICE_BLK_SW:
- case ICE_BLK_PE:
default:
status = ICE_ERR_NOT_IMPL;
goto out;
/**
* ice_flow_rem_entry - Remove a flow entry
* @hw: pointer to the HW struct
+ * @blk: classification stage
* @entry_h: handle to the flow entry to be removed
*/
-enum ice_status ice_flow_rem_entry(struct ice_hw *hw, u64 entry_h)
+enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
+ u64 entry_h)
{
struct ice_flow_entry *entry;
struct ice_flow_prof *prof;
- enum ice_status status;
+ enum ice_status status = ICE_SUCCESS;
if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
return ICE_ERR_PARAM;
/* Retain the pointer to the flow profile as the entry will be freed */
prof = entry->prof;
- ice_acquire_lock(&prof->entries_lock);
- status = ice_flow_rem_entry_sync(hw, entry);
- ice_release_lock(&prof->entries_lock);
+ if (prof) {
+ ice_acquire_lock(&prof->entries_lock);
+ status = ice_flow_rem_entry_sync(hw, blk, entry);
+ ice_release_lock(&prof->entries_lock);
+ }
return status;
}
* ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
* @seg: packet segment the field being set belongs to
* @fld: field to be set
- * @type: type of the field
+ * @field_type: type of the field
* @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
* entry's input buffer
* @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
*/
static void
ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
- enum ice_flow_fld_match_type type, u16 val_loc,
+ enum ice_flow_fld_match_type field_type, u16 val_loc,
u16 mask_loc, u16 last_loc)
{
u64 bit = BIT_ULL(fld);
seg->match |= bit;
- if (type == ICE_FLOW_FLD_TYPE_RANGE)
+ if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
seg->range |= bit;
- seg->fields[fld].type = type;
+ seg->fields[fld].type = field_type;
seg->fields[fld].src.val = val_loc;
seg->fields[fld].src.mask = mask_loc;
seg->fields[fld].src.last = last_loc;
seg->raws_cnt++;
}
+#define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
+(ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
+
#define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
- (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_PPPOE)
+ (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
#define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
(ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
- ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_GTPC_TEID)
-
+ ICE_FLOW_SEG_HDR_SCTP)
#define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
- (ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
+ (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
+ ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
/**
}
ICE_FLOW_SET_HDRS(segs, flow_hdr);
- if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS)
+ if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
+ ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER)
return ICE_ERR_PARAM;
val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
- if (!ice_is_pow2(val))
+ if (val && !ice_is_pow2(val))
return ICE_ERR_CFG;
val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
ice_acquire_lock(&hw->rss_locks);
LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
- ice_rss_cfg, l_entry) {
- if (ice_is_bit_set(r->vsis, vsi_handle)) {
- ice_clear_bit(vsi_handle, r->vsis);
-
+ ice_rss_cfg, l_entry)
+ if (ice_test_and_clear_bit(vsi_handle, r->vsis))
if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
LIST_DEL(&r->l_entry);
ice_free(hw, r);
}
- }
- }
ice_release_lock(&hw->rss_locks);
}
ice_acquire_lock(&hw->fl_profs_locks[blk]);
LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
- l_entry) {
+ l_entry)
if (ice_is_bit_set(p->vsis, vsi_handle)) {
status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
if (status)
break;
}
}
- }
ice_release_lock(&hw->fl_profs_locks[blk]);
return status;
* remove from the RSS entry list of the VSI context and delete entry.
*/
LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
- ice_rss_cfg, l_entry) {
+ ice_rss_cfg, l_entry)
if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
ice_clear_bit(vsi_handle, r->vsis);
}
return;
}
- }
}
/**
rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
+ rss_cfg->symm = prof->cfg.symm;
ice_set_bit(vsi_handle, rss_cfg->vsis);
LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
#define ICE_FLOW_PROF_HASH_S 0
#define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
#define ICE_FLOW_PROF_HDR_S 32
-#define ICE_FLOW_PROF_HDR_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HDR_S)
+#define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
+#define ICE_FLOW_PROF_ENCAP_S 63
+#define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
+
+#define ICE_RSS_OUTER_HEADERS 1
+#define ICE_RSS_INNER_HEADERS 2
-#define ICE_FLOW_GEN_PROFID(hash, hdr) \
+/* Flow profile ID format:
+ * [0:31] - Packet match fields
+ * [32:62] - Protocol header
+ * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
+ */
+#define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
(u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
- (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M))
+ (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
+ ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
+
+static void
+ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
+{
+ u32 s = ((src % 4) << 3); /* byte shift */
+ u32 v = dst | 0x80; /* value to program */
+ u8 i = src / 4; /* register index */
+ u32 reg;
+
+ reg = rd32(hw, GLQF_HSYMM(prof_id, i));
+ reg = (reg & ~(0xff << s)) | (v << s);
+ wr32(hw, GLQF_HSYMM(prof_id, i), reg);
+}
+
+static void
+ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
+{
+ int fv_last_word =
+ ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
+ int i;
+
+ for (i = 0; i < len; i++) {
+ ice_rss_config_xor_word(hw, prof_id,
+ /* Yes, field vector in GLQF_HSYMM and
+ * GLQF_HINSET is inversed!
+ */
+ fv_last_word - (src + i),
+ fv_last_word - (dst + i));
+ ice_rss_config_xor_word(hw, prof_id,
+ fv_last_word - (dst + i),
+ fv_last_word - (src + i));
+ }
+}
+
+static void
+ice_rss_update_symm(struct ice_hw *hw,
+ struct ice_flow_prof *prof)
+{
+ struct ice_prof_map *map;
+ u8 prof_id, m;
+
+ map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
+ prof_id = map->prof_id;
+
+ /* clear to default */
+ for (m = 0; m < 6; m++)
+ wr32(hw, GLQF_HSYMM(prof_id, m), 0);
+ if (prof->cfg.symm) {
+ struct ice_flow_seg_info *seg =
+ &prof->segs[prof->segs_cnt - 1];
+
+ struct ice_flow_seg_xtrct *ipv4_src =
+ &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
+ struct ice_flow_seg_xtrct *ipv4_dst =
+ &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
+ struct ice_flow_seg_xtrct *ipv6_src =
+ &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
+ struct ice_flow_seg_xtrct *ipv6_dst =
+ &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
+
+ struct ice_flow_seg_xtrct *tcp_src =
+ &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
+ struct ice_flow_seg_xtrct *tcp_dst =
+ &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
+
+ struct ice_flow_seg_xtrct *udp_src =
+ &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
+ struct ice_flow_seg_xtrct *udp_dst =
+ &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
+
+ struct ice_flow_seg_xtrct *sctp_src =
+ &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
+ struct ice_flow_seg_xtrct *sctp_dst =
+ &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
+
+ /* xor IPv4 */
+ if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
+ ice_rss_config_xor(hw, prof_id,
+ ipv4_src->idx, ipv4_dst->idx, 2);
+
+ /* xor IPv6 */
+ if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
+ ice_rss_config_xor(hw, prof_id,
+ ipv6_src->idx, ipv6_dst->idx, 8);
+
+ /* xor TCP */
+ if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
+ ice_rss_config_xor(hw, prof_id,
+ tcp_src->idx, tcp_dst->idx, 1);
+
+ /* xor UDP */
+ if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
+ ice_rss_config_xor(hw, prof_id,
+ udp_src->idx, udp_dst->idx, 1);
+
+ /* xor SCTP */
+ if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
+ ice_rss_config_xor(hw, prof_id,
+ sctp_src->idx, sctp_dst->idx, 1);
+ }
+}
/**
* ice_add_rss_cfg_sync - add an RSS configuration
* @vsi_handle: software VSI handle
* @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
* @addl_hdrs: protocol header fields
+ * @segs_cnt: packet segment count
+ * @symm: symmetric hash enable/disable
*
* Assumption: lock has already been acquired for RSS list
*/
static enum ice_status
ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
- u32 addl_hdrs)
+ u32 addl_hdrs, u8 segs_cnt, bool symm)
{
const enum ice_block blk = ICE_BLK_RSS;
struct ice_flow_prof *prof = NULL;
struct ice_flow_seg_info *segs;
- enum ice_status status = ICE_SUCCESS;
+ enum ice_status status;
+
+ if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
+ return ICE_ERR_PARAM;
- segs = (struct ice_flow_seg_info *)ice_malloc(hw, sizeof(*segs));
+ segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
+ sizeof(*segs));
if (!segs)
return ICE_ERR_NO_MEMORY;
/* Construct the packet segment info from the hashed fields */
- status = ice_flow_set_rss_seg_info(segs, hashed_flds, addl_hdrs);
+ status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
+ addl_hdrs);
if (status)
goto exit;
* and has the input VSI associated to it. If found, no further
* operations required and exit.
*/
- prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, 1,
+ prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
vsi_handle,
ICE_FLOW_FIND_PROF_CHK_FLDS |
ICE_FLOW_FIND_PROF_CHK_VSI);
- if (prof)
- goto exit;
+ if (prof) {
+ if (prof->cfg.symm == symm)
+ goto exit;
+ prof->cfg.symm = symm;
+ goto update_symm;
+ }
/* Check if a flow profile exists with the same protocol headers and
- * associated with the input VSI. If so disasscociate the VSI from
+ * associated with the input VSI. If so disassociate the VSI from
* this profile. The VSI will be added to a new profile created with
* the protocol header and new hash field configuration.
*/
- prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, 1,
+ prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
if (prof) {
status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
/* Search for a profile that has same match fields only. If this
* exists then associate the VSI to this profile.
*/
- prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, 1,
+ prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
vsi_handle,
ICE_FLOW_FIND_PROF_CHK_FLDS);
if (prof) {
- status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
- if (!status)
- status = ice_add_rss_list(hw, vsi_handle, prof);
+ if (prof->cfg.symm == symm) {
+ status = ice_flow_assoc_prof(hw, blk, prof,
+ vsi_handle);
+ if (!status)
+ status = ice_add_rss_list(hw, vsi_handle,
+ prof);
+ } else {
+ /* if a profile exist but with different symmetric
+ * requirement, just return error.
+ */
+ status = ICE_ERR_NOT_SUPPORTED;
+ }
goto exit;
}
* segment information.
*/
status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
- ICE_FLOW_GEN_PROFID(hashed_flds, segs->hdrs),
- segs, 1, NULL, 0, &prof);
+ ICE_FLOW_GEN_PROFID(hashed_flds,
+ segs[segs_cnt - 1].hdrs,
+ segs_cnt),
+ segs, segs_cnt, NULL, 0, &prof);
if (status)
goto exit;
status = ice_add_rss_list(hw, vsi_handle, prof);
+ prof->cfg.symm = symm;
+
+update_symm:
+ ice_rss_update_symm(hw, prof);
+
exit:
ice_free(hw, segs);
return status;
* @vsi_handle: software VSI handle
* @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
* @addl_hdrs: protocol header fields
+ * @symm: symmetric hash enable/disable
*
* This function will generate a flow profile based on fields associated with
* the input fields to hash on, the flow type and use the VSI number to add
*/
enum ice_status
ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
- u32 addl_hdrs)
+ u32 addl_hdrs, bool symm)
{
enum ice_status status;
return ICE_ERR_PARAM;
ice_acquire_lock(&hw->rss_locks);
- status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs);
+ status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
+ ICE_RSS_OUTER_HEADERS, symm);
+ if (!status)
+ status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
+ addl_hdrs, ICE_RSS_INNER_HEADERS,
+ symm);
ice_release_lock(&hw->rss_locks);
return status;
* @vsi_handle: software VSI handle
* @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
* @addl_hdrs: Protocol header fields within a packet segment
+ * @segs_cnt: packet segment count
*
* Assumption: lock has already been acquired for RSS list
*/
static enum ice_status
ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
- u32 addl_hdrs)
+ u32 addl_hdrs, u8 segs_cnt)
{
const enum ice_block blk = ICE_BLK_RSS;
struct ice_flow_seg_info *segs;
struct ice_flow_prof *prof;
enum ice_status status;
- segs = (struct ice_flow_seg_info *)ice_malloc(hw, sizeof(*segs));
+ segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
+ sizeof(*segs));
if (!segs)
return ICE_ERR_NO_MEMORY;
/* Construct the packet segment info from the hashed fields */
- status = ice_flow_set_rss_seg_info(segs, hashed_flds, addl_hdrs);
+ status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
+ addl_hdrs);
if (status)
goto out;
- prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, 1,
+ prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
vsi_handle,
ICE_FLOW_FIND_PROF_CHK_FLDS);
if (!prof) {
return status;
}
-/* Mapping of AVF hash bit fields to an L3-L4 hash combination.
- * As the ice_flow_avf_hdr_field represent individual bit shifts in a hash,
- * convert its values to their appropriate flow L3, L4 values.
- */
-#define ICE_FLOW_AVF_RSS_IPV4_MASKS \
- (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4))
-#define ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS \
- (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP))
-#define ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS \
- (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP))
-#define ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS \
- (ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS | \
- ICE_FLOW_AVF_RSS_IPV4_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP))
-
-#define ICE_FLOW_AVF_RSS_IPV6_MASKS \
- (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6))
-#define ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS \
- (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP))
-#define ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS \
- (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP))
-#define ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS \
- (ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS | \
- ICE_FLOW_AVF_RSS_IPV6_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP))
-
-#define ICE_FLOW_MAX_CFG 10
-
-/**
- * ice_add_avf_rss_cfg - add an RSS configuration for AVF driver
- * @hw: pointer to the hardware structure
- * @vsi_handle: software VSI handle
- * @avf_hash: hash bit fields (ICE_AVF_FLOW_FIELD_*) to configure
- *
- * This function will take the hash bitmap provided by the AVF driver via a
- * message, convert it to ICE-compatible values, and configure RSS flow
- * profiles.
- */
-enum ice_status
-ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
-{
- enum ice_status status = ICE_SUCCESS;
- u64 hash_flds;
-
- if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID ||
- !ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
-
- /* Make sure no unsupported bits are specified */
- if (avf_hash & ~(ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS |
- ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS))
- return ICE_ERR_CFG;
-
- hash_flds = avf_hash;
-
- /* Always create an L3 RSS configuration for any L4 RSS configuration */
- if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS)
- hash_flds |= ICE_FLOW_AVF_RSS_IPV4_MASKS;
-
- if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS)
- hash_flds |= ICE_FLOW_AVF_RSS_IPV6_MASKS;
-
- /* Create the corresponding RSS configuration for each valid hash bit */
- while (hash_flds) {
- u64 rss_hash = ICE_HASH_INVALID;
-
- if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS) {
- if (hash_flds & ICE_FLOW_AVF_RSS_IPV4_MASKS) {
- rss_hash = ICE_FLOW_HASH_IPV4;
- hash_flds &= ~ICE_FLOW_AVF_RSS_IPV4_MASKS;
- } else if (hash_flds &
- ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS) {
- rss_hash = ICE_FLOW_HASH_IPV4 |
- ICE_FLOW_HASH_TCP_PORT;
- hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS;
- } else if (hash_flds &
- ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS) {
- rss_hash = ICE_FLOW_HASH_IPV4 |
- ICE_FLOW_HASH_UDP_PORT;
- hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS;
- } else if (hash_flds &
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP)) {
- rss_hash = ICE_FLOW_HASH_IPV4 |
- ICE_FLOW_HASH_SCTP_PORT;
- hash_flds &=
- ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP);
- }
- } else if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS) {
- if (hash_flds & ICE_FLOW_AVF_RSS_IPV6_MASKS) {
- rss_hash = ICE_FLOW_HASH_IPV6;
- hash_flds &= ~ICE_FLOW_AVF_RSS_IPV6_MASKS;
- } else if (hash_flds &
- ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS) {
- rss_hash = ICE_FLOW_HASH_IPV6 |
- ICE_FLOW_HASH_TCP_PORT;
- hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS;
- } else if (hash_flds &
- ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS) {
- rss_hash = ICE_FLOW_HASH_IPV6 |
- ICE_FLOW_HASH_UDP_PORT;
- hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS;
- } else if (hash_flds &
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP)) {
- rss_hash = ICE_FLOW_HASH_IPV6 |
- ICE_FLOW_HASH_SCTP_PORT;
- hash_flds &=
- ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP);
- }
- }
-
- if (rss_hash == ICE_HASH_INVALID)
- return ICE_ERR_OUT_OF_RANGE;
-
- status = ice_add_rss_cfg(hw, vsi_handle, rss_hash,
- ICE_FLOW_SEG_HDR_NONE);
- if (status)
- break;
- }
-
- return status;
-}
-
/**
* ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
* @hw: pointer to the hardware structure
return ICE_ERR_PARAM;
ice_acquire_lock(&hw->rss_locks);
- status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs);
+ status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
+ ICE_RSS_OUTER_HEADERS);
+ if (!status)
+ status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds,
+ addl_hdrs, ICE_RSS_INNER_HEADERS);
ice_release_lock(&hw->rss_locks);
return status;
if (ice_is_bit_set(r->vsis, vsi_handle)) {
status = ice_add_rss_cfg_sync(hw, vsi_handle,
r->hashed_flds,
- r->packet_hdr);
+ r->packet_hdr,
+ ICE_RSS_OUTER_HEADERS,
+ r->symm);
+ if (status)
+ break;
+ status = ice_add_rss_cfg_sync(hw, vsi_handle,
+ r->hashed_flds,
+ r->packet_hdr,
+ ICE_RSS_INNER_HEADERS,
+ r->symm);
if (status)
break;
}