1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2021 Intel Corporation
5 #include "ice_common.h"
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE 2
10 #define ICE_FLOW_FLD_SZ_VLAN 2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR 4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR 16
13 #define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
14 #define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
15 #define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
16 #define ICE_FLOW_FLD_SZ_IPV4_ID 2
17 #define ICE_FLOW_FLD_SZ_IPV6_ID 4
18 #define ICE_FLOW_FLD_SZ_IP_DSCP 1
19 #define ICE_FLOW_FLD_SZ_IP_TTL 1
20 #define ICE_FLOW_FLD_SZ_IP_PROT 1
21 #define ICE_FLOW_FLD_SZ_PORT 2
22 #define ICE_FLOW_FLD_SZ_TCP_FLAGS 1
23 #define ICE_FLOW_FLD_SZ_ICMP_TYPE 1
24 #define ICE_FLOW_FLD_SZ_ICMP_CODE 1
25 #define ICE_FLOW_FLD_SZ_ARP_OPER 2
26 #define ICE_FLOW_FLD_SZ_GRE_KEYID 4
27 #define ICE_FLOW_FLD_SZ_GTP_TEID 4
28 #define ICE_FLOW_FLD_SZ_GTP_QFI 2
29 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID 2
30 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
31 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID 4
32 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
33 #define ICE_FLOW_FLD_SZ_AH_SPI 4
34 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI 4
35 #define ICE_FLOW_FLD_SZ_VXLAN_VNI 4
36 #define ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID 2
38 /* Describe properties of a protocol header field */
39 struct ice_flow_field_info {
40 enum ice_flow_seg_hdr hdr;
41 s16 off; /* Offset from start of a protocol header, in bits */
42 u16 size; /* Size of fields in bits */
43 u16 mask; /* 16-bit mask for field */
46 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
48 .off = (_offset_bytes) * BITS_PER_BYTE, \
49 .size = (_size_bytes) * BITS_PER_BYTE, \
53 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
55 .off = (_offset_bytes) * BITS_PER_BYTE, \
56 .size = (_size_bytes) * BITS_PER_BYTE, \
60 /* Table containing properties of supported protocol header fields */
62 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
64 /* ICE_FLOW_FIELD_IDX_ETH_DA */
65 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
66 /* ICE_FLOW_FIELD_IDX_ETH_SA */
67 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
68 /* ICE_FLOW_FIELD_IDX_S_VLAN */
69 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
70 /* ICE_FLOW_FIELD_IDX_C_VLAN */
71 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
72 /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
73 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
75 /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
76 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
78 /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
79 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
81 /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
82 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
83 ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
84 /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
85 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
86 ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
87 /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
88 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
89 ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
90 /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
91 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
92 ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
93 /* ICE_FLOW_FIELD_IDX_IPV4_SA */
94 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
95 /* ICE_FLOW_FIELD_IDX_IPV4_DA */
96 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
97 /* ICE_FLOW_FIELD_IDX_IPV6_SA */
98 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
99 /* ICE_FLOW_FIELD_IDX_IPV6_DA */
100 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
101 /* ICE_FLOW_FIELD_IDX_IPV4_FRAG */
102 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4,
103 ICE_FLOW_FLD_SZ_IPV4_ID),
104 /* ICE_FLOW_FIELD_IDX_IPV6_FRAG */
105 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4,
106 ICE_FLOW_FLD_SZ_IPV6_ID),
107 /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
108 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
109 ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
110 /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
111 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
112 ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
113 /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
114 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
115 ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
116 /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
117 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
118 ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
119 /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
120 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
121 ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
122 /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
123 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
124 ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
126 /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
127 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
128 /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
129 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
130 /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
131 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
132 /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
133 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
134 /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
135 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
136 /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
137 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
138 /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
139 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
141 /* ICE_FLOW_FIELD_IDX_ARP_SIP */
142 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
143 /* ICE_FLOW_FIELD_IDX_ARP_DIP */
144 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
145 /* ICE_FLOW_FIELD_IDX_ARP_SHA */
146 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
147 /* ICE_FLOW_FIELD_IDX_ARP_DHA */
148 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
149 /* ICE_FLOW_FIELD_IDX_ARP_OP */
150 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
152 /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
153 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
154 /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
155 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
157 /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
158 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
160 /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
161 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
162 ICE_FLOW_FLD_SZ_GTP_TEID),
163 /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
164 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
165 ICE_FLOW_FLD_SZ_GTP_TEID),
166 /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
167 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
168 ICE_FLOW_FLD_SZ_GTP_TEID),
169 /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
170 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
171 ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
172 /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
173 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
174 ICE_FLOW_FLD_SZ_GTP_TEID),
175 /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
176 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
177 ICE_FLOW_FLD_SZ_GTP_TEID),
179 /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
180 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
181 ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
183 /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
184 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
185 ICE_FLOW_FLD_SZ_PFCP_SEID),
187 /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
188 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
189 ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
191 /* ICE_FLOW_FIELD_IDX_ESP_SPI */
192 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
193 ICE_FLOW_FLD_SZ_ESP_SPI),
195 /* ICE_FLOW_FIELD_IDX_AH_SPI */
196 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
197 ICE_FLOW_FLD_SZ_AH_SPI),
199 /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
200 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
201 ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
202 /* ICE_FLOW_FIELD_IDX_VXLAN_VNI */
203 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VXLAN, 12,
204 ICE_FLOW_FLD_SZ_VXLAN_VNI),
206 /* ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID */
207 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ECPRI_TP0, 4,
208 ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
210 /* ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID */
211 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0, 12,
212 ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
215 /* Bitmaps indicating relevant packet types for a particular protocol header
217 * Packet types for packets with an Outer/First/Single MAC header
219 static const u32 ice_ptypes_mac_ofos[] = {
220 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
221 0x0000077E, 0x000003FF, 0x00000000, 0x00000000,
222 0x00400000, 0x03FFF000, 0xFFFFFFE0, 0x00100707,
223 0xFFFFFF00, 0x0000003F, 0x00000000, 0x00000000,
224 0x00000000, 0x00000000, 0x00000000, 0x00000000,
225 0x00000000, 0x00000000, 0x00000000, 0x00000000,
226 0x00000000, 0x00000000, 0x00000000, 0x00000000,
227 0x00000000, 0x00000000, 0x00000000, 0x00000000,
230 /* Packet types for packets with an Innermost/Last MAC VLAN header */
231 static const u32 ice_ptypes_macvlan_il[] = {
232 0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
233 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
234 0x00000000, 0x00000000, 0x00000000, 0x00000000,
235 0x00000000, 0x00000000, 0x00000000, 0x00000000,
236 0x00000000, 0x00000000, 0x00000000, 0x00000000,
237 0x00000000, 0x00000000, 0x00000000, 0x00000000,
238 0x00000000, 0x00000000, 0x00000000, 0x00000000,
239 0x00000000, 0x00000000, 0x00000000, 0x00000000,
242 /* Packet types for packets with an Outer/First/Single IPv4 header, does NOT
243 * include IPV4 other PTYPEs
245 static const u32 ice_ptypes_ipv4_ofos[] = {
246 0x1DC00000, 0x24000800, 0x00000000, 0x00000000,
247 0x00000000, 0x00000155, 0x00000000, 0x00000000,
248 0x00000000, 0x000FC000, 0x000002A0, 0x00100000,
249 0x00001500, 0x00000000, 0x00000000, 0x00000000,
250 0x00000000, 0x00000000, 0x00000000, 0x00000000,
251 0x00000000, 0x00000000, 0x00000000, 0x00000000,
252 0x00000000, 0x00000000, 0x00000000, 0x00000000,
253 0x00000000, 0x00000000, 0x00000000, 0x00000000,
256 /* Packet types for packets with an Outer/First/Single IPv4 header, includes
259 static const u32 ice_ptypes_ipv4_ofos_all[] = {
260 0x1DC00000, 0x24000800, 0x00000000, 0x00000000,
261 0x00000000, 0x00000155, 0x00000000, 0x00000000,
262 0x00000000, 0x000FC000, 0x83E0FAA0, 0x00000101,
263 0x03FFD500, 0x00000000, 0x00000000, 0x00000000,
264 0x00000000, 0x00000000, 0x00000000, 0x00000000,
265 0x00000000, 0x00000000, 0x00000000, 0x00000000,
266 0x00000000, 0x00000000, 0x00000000, 0x00000000,
267 0x00000000, 0x00000000, 0x00000000, 0x00000000,
270 /* Packet types for packets with an Innermost/Last IPv4 header */
271 static const u32 ice_ptypes_ipv4_il[] = {
272 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
273 0x0000000E, 0x00000000, 0x00000000, 0x00000000,
274 0x00000000, 0x00000000, 0x001FF800, 0x00100000,
275 0xFC0FC000, 0x00000000, 0x00000000, 0x00000000,
276 0x00000000, 0x00000000, 0x00000000, 0x00000000,
277 0x00000000, 0x00000000, 0x00000000, 0x00000000,
278 0x00000000, 0x00000000, 0x00000000, 0x00000000,
279 0x00000000, 0x00000000, 0x00000000, 0x00000000,
282 /* Packet types for packets with an Outer/First/Single IPv6 header, does NOT
283 * include IVP6 other PTYPEs
285 static const u32 ice_ptypes_ipv6_ofos[] = {
286 0x00000000, 0x00000000, 0x77000000, 0x10002000,
287 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
288 0x00000000, 0x03F00000, 0x00000540, 0x00000000,
289 0x00002A00, 0x00000000, 0x00000000, 0x00000000,
290 0x00000000, 0x00000000, 0x00000000, 0x00000000,
291 0x00000000, 0x00000000, 0x00000000, 0x00000000,
292 0x00000000, 0x00000000, 0x00000000, 0x00000000,
293 0x00000000, 0x00000000, 0x00000000, 0x00000000,
296 /* Packet types for packets with an Outer/First/Single IPv6 header, includes
299 static const u32 ice_ptypes_ipv6_ofos_all[] = {
300 0x00000000, 0x00000000, 0x77000000, 0x10002000,
301 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
302 0x00000000, 0x03F00000, 0x7C1F0540, 0x00000206,
303 0xFC002A00, 0x0000003F, 0x00000000, 0x00000000,
304 0x00000000, 0x00000000, 0x00000000, 0x00000000,
305 0x00000000, 0x00000000, 0x00000000, 0x00000000,
306 0x00000000, 0x00000000, 0x00000000, 0x00000000,
307 0x00000000, 0x00000000, 0x00000000, 0x00000000,
310 /* Packet types for packets with an Innermost/Last IPv6 header */
311 static const u32 ice_ptypes_ipv6_il[] = {
312 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
313 0x00000770, 0x00000000, 0x00000000, 0x00000000,
314 0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
315 0x03F00000, 0x0000003F, 0x00000000, 0x00000000,
316 0x00000000, 0x00000000, 0x00000000, 0x00000000,
317 0x00000000, 0x00000000, 0x00000000, 0x00000000,
318 0x00000000, 0x00000000, 0x00000000, 0x00000000,
319 0x00000000, 0x00000000, 0x00000000, 0x00000000,
322 /* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */
323 static const u32 ice_ptypes_ipv4_ofos_no_l4[] = {
324 0x10C00000, 0x04000800, 0x00000000, 0x00000000,
325 0x00000000, 0x00000000, 0x00000000, 0x00000000,
326 0x00000000, 0x000cc000, 0x000002A0, 0x00000000,
327 0x00001500, 0x00000000, 0x00000000, 0x00000000,
328 0x00000000, 0x00000000, 0x00000000, 0x00000000,
329 0x00000000, 0x00000000, 0x00000000, 0x00000000,
330 0x00000000, 0x00000000, 0x00000000, 0x00000000,
331 0x00000000, 0x00000000, 0x00000000, 0x00000000,
334 /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
335 static const u32 ice_ptypes_ipv4_il_no_l4[] = {
336 0x60000000, 0x18043008, 0x80000002, 0x6010c021,
337 0x00000008, 0x00000000, 0x00000000, 0x00000000,
338 0x00000000, 0x00000000, 0x00139800, 0x00000000,
339 0x8C08C000, 0x00000000, 0x00000000, 0x00000000,
340 0x00000000, 0x00000000, 0x00000000, 0x00000000,
341 0x00000000, 0x00000000, 0x00000000, 0x00000000,
342 0x00000000, 0x00000000, 0x00000000, 0x00000000,
343 0x00000000, 0x00000000, 0x00000000, 0x00000000,
346 /* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */
347 static const u32 ice_ptypes_ipv6_ofos_no_l4[] = {
348 0x00000000, 0x00000000, 0x43000000, 0x10002000,
349 0x00000000, 0x00000000, 0x00000000, 0x00000000,
350 0x00000000, 0x02300000, 0x00000540, 0x00000000,
351 0x00002A00, 0x00000000, 0x00000000, 0x00000000,
352 0x00000000, 0x00000000, 0x00000000, 0x00000000,
353 0x00000000, 0x00000000, 0x00000000, 0x00000000,
354 0x00000000, 0x00000000, 0x00000000, 0x00000000,
355 0x00000000, 0x00000000, 0x00000000, 0x00000000,
358 /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
359 static const u32 ice_ptypes_ipv6_il_no_l4[] = {
360 0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
361 0x00000430, 0x00000000, 0x00000000, 0x00000000,
362 0x00000000, 0x00000000, 0x4e600000, 0x00000000,
363 0x02300000, 0x00000023, 0x00000000, 0x00000000,
364 0x00000000, 0x00000000, 0x00000000, 0x00000000,
365 0x00000000, 0x00000000, 0x00000000, 0x00000000,
366 0x00000000, 0x00000000, 0x00000000, 0x00000000,
367 0x00000000, 0x00000000, 0x00000000, 0x00000000,
370 /* Packet types for packets with an Outermost/First ARP header */
371 static const u32 ice_ptypes_arp_of[] = {
372 0x00000800, 0x00000000, 0x00000000, 0x00000000,
373 0x00000000, 0x00000000, 0x00000000, 0x00000000,
374 0x00000000, 0x00000000, 0x00000000, 0x00000000,
375 0x00000000, 0x00000000, 0x00000000, 0x00000000,
376 0x00000000, 0x00000000, 0x00000000, 0x00000000,
377 0x00000000, 0x00000000, 0x00000000, 0x00000000,
378 0x00000000, 0x00000000, 0x00000000, 0x00000000,
379 0x00000000, 0x00000000, 0x00000000, 0x00000000,
382 /* UDP Packet types for non-tunneled packets or tunneled
383 * packets with inner UDP.
385 static const u32 ice_ptypes_udp_il[] = {
386 0x81000000, 0x20204040, 0x04000010, 0x80810102,
387 0x00000040, 0x00000000, 0x00000000, 0x00000000,
388 0x00000000, 0x00410000, 0x908427E0, 0x00100007,
389 0x10410000, 0x00000004, 0x00000000, 0x00000000,
390 0x00000000, 0x00000000, 0x00000000, 0x00000000,
391 0x00000000, 0x00000000, 0x00000000, 0x00000000,
392 0x00000000, 0x00000000, 0x00000000, 0x00000000,
393 0x00000000, 0x00000000, 0x00000000, 0x00000000,
396 /* Packet types for packets with an Innermost/Last TCP header */
397 static const u32 ice_ptypes_tcp_il[] = {
398 0x04000000, 0x80810102, 0x10000040, 0x02040408,
399 0x00000102, 0x00000000, 0x00000000, 0x00000000,
400 0x00000000, 0x00820000, 0x21084000, 0x00000000,
401 0x20820000, 0x00000008, 0x00000000, 0x00000000,
402 0x00000000, 0x00000000, 0x00000000, 0x00000000,
403 0x00000000, 0x00000000, 0x00000000, 0x00000000,
404 0x00000000, 0x00000000, 0x00000000, 0x00000000,
405 0x00000000, 0x00000000, 0x00000000, 0x00000000,
408 /* Packet types for packets with an Innermost/Last SCTP header */
409 static const u32 ice_ptypes_sctp_il[] = {
410 0x08000000, 0x01020204, 0x20000081, 0x04080810,
411 0x00000204, 0x00000000, 0x00000000, 0x00000000,
412 0x00000000, 0x01040000, 0x00000000, 0x00000000,
413 0x41040000, 0x00000010, 0x00000000, 0x00000000,
414 0x00000000, 0x00000000, 0x00000000, 0x00000000,
415 0x00000000, 0x00000000, 0x00000000, 0x00000000,
416 0x00000000, 0x00000000, 0x00000000, 0x00000000,
417 0x00000000, 0x00000000, 0x00000000, 0x00000000,
420 /* Packet types for packets with an Outermost/First ICMP header */
421 static const u32 ice_ptypes_icmp_of[] = {
422 0x10000000, 0x00000000, 0x00000000, 0x00000000,
423 0x00000000, 0x00000000, 0x00000000, 0x00000000,
424 0x00000000, 0x00000000, 0x00000000, 0x00000000,
425 0x00000000, 0x00000000, 0x00000000, 0x00000000,
426 0x00000000, 0x00000000, 0x00000000, 0x00000000,
427 0x00000000, 0x00000000, 0x00000000, 0x00000000,
428 0x00000000, 0x00000000, 0x00000000, 0x00000000,
429 0x00000000, 0x00000000, 0x00000000, 0x00000000,
432 /* Packet types for packets with an Innermost/Last ICMP header */
433 static const u32 ice_ptypes_icmp_il[] = {
434 0x00000000, 0x02040408, 0x40000102, 0x08101020,
435 0x00000408, 0x00000000, 0x00000000, 0x00000000,
436 0x00000000, 0x00000000, 0x42108000, 0x00000000,
437 0x82080000, 0x00000020, 0x00000000, 0x00000000,
438 0x00000000, 0x00000000, 0x00000000, 0x00000000,
439 0x00000000, 0x00000000, 0x00000000, 0x00000000,
440 0x00000000, 0x00000000, 0x00000000, 0x00000000,
441 0x00000000, 0x00000000, 0x00000000, 0x00000000,
444 /* Packet types for packets with an Outermost/First GRE header */
445 static const u32 ice_ptypes_gre_of[] = {
446 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
447 0x0000017E, 0x00000000, 0x00000000, 0x00000000,
448 0x00000000, 0x00000000, 0x00000000, 0x00000000,
449 0x00000000, 0x00000000, 0x00000000, 0x00000000,
450 0x00000000, 0x00000000, 0x00000000, 0x00000000,
451 0x00000000, 0x00000000, 0x00000000, 0x00000000,
452 0x00000000, 0x00000000, 0x00000000, 0x00000000,
453 0x00000000, 0x00000000, 0x00000000, 0x00000000,
456 /* Packet types for packets with an Innermost/Last MAC header */
457 static const u32 ice_ptypes_mac_il[] = {
458 0x00000000, 0x20000000, 0x00000000, 0x00000000,
459 0x00000000, 0x00000000, 0x00000000, 0x00000000,
460 0x00000000, 0x00000000, 0x00000000, 0x00000000,
461 0x00000000, 0x00000000, 0x00000000, 0x00000000,
462 0x00000000, 0x00000000, 0x00000000, 0x00000000,
463 0x00000000, 0x00000000, 0x00000000, 0x00000000,
464 0x00000000, 0x00000000, 0x00000000, 0x00000000,
465 0x00000000, 0x00000000, 0x00000000, 0x00000000,
468 /* Packet types for GTPC */
469 static const u32 ice_ptypes_gtpc[] = {
470 0x00000000, 0x00000000, 0x00000000, 0x00000000,
471 0x00000000, 0x00000000, 0x00000000, 0x00000000,
472 0x00000000, 0x00000000, 0x000001E0, 0x00000000,
473 0x00000000, 0x00000000, 0x00000000, 0x00000000,
474 0x00000000, 0x00000000, 0x00000000, 0x00000000,
475 0x00000000, 0x00000000, 0x00000000, 0x00000000,
476 0x00000000, 0x00000000, 0x00000000, 0x00000000,
477 0x00000000, 0x00000000, 0x00000000, 0x00000000,
480 /* Packet types for VXLAN with VNI */
481 static const u32 ice_ptypes_vxlan_vni[] = {
482 0x00000000, 0xBFBFF800, 0x00EFDFDF, 0xFEFDE000,
483 0x03BF7F7E, 0x00000000, 0x00000000, 0x00000000,
484 0x00000000, 0x00000000, 0x00000000, 0x00000000,
485 0x00000000, 0x00000000, 0x00000000, 0x00000000,
486 0x00000000, 0x00000000, 0x00000000, 0x00000000,
487 0x00000000, 0x00000000, 0x00000000, 0x00000000,
488 0x00000000, 0x00000000, 0x00000000, 0x00000000,
489 0x00000000, 0x00000000, 0x00000000, 0x00000000,
492 /* Packet types for GTPC with TEID */
493 static const u32 ice_ptypes_gtpc_tid[] = {
494 0x00000000, 0x00000000, 0x00000000, 0x00000000,
495 0x00000000, 0x00000000, 0x00000000, 0x00000000,
496 0x00000000, 0x00000000, 0x00000060, 0x00000000,
497 0x00000000, 0x00000000, 0x00000000, 0x00000000,
498 0x00000000, 0x00000000, 0x00000000, 0x00000000,
499 0x00000000, 0x00000000, 0x00000000, 0x00000000,
500 0x00000000, 0x00000000, 0x00000000, 0x00000000,
501 0x00000000, 0x00000000, 0x00000000, 0x00000000,
504 /* Packet types for GTPU */
505 static const struct ice_ptype_attributes ice_attr_gtpu_session[] = {
506 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
507 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
508 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
509 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
510 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
511 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
512 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
513 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
514 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
515 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
516 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
517 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
518 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
519 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
520 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
521 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
522 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
523 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
524 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
525 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
528 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
529 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
530 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
531 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
532 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
533 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
534 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
535 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
536 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
537 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
538 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
539 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
540 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
541 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
542 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
543 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
544 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
545 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
546 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
547 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
548 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
551 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
552 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
553 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
554 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
555 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
556 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
557 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
558 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
559 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
560 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
561 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
562 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
563 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
564 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
565 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
566 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
567 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
568 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
569 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
570 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
571 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
574 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
575 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
576 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
577 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
578 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
579 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
580 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
581 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
582 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
583 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
584 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
585 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
586 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
587 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
588 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
589 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
590 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
591 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
592 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
593 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
594 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
597 static const u32 ice_ptypes_gtpu[] = {
598 0x00000000, 0x00000000, 0x00000000, 0x00000000,
599 0x00000000, 0x00000000, 0x00000000, 0x00000000,
600 0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
601 0x00000000, 0x00000000, 0x00000000, 0x00000000,
602 0x00000000, 0x00000000, 0x00000000, 0x00000000,
603 0x00000000, 0x00000000, 0x00000000, 0x00000000,
604 0x00000000, 0x00000000, 0x00000000, 0x00000000,
605 0x00000000, 0x00000000, 0x00000000, 0x00000000,
608 /* Packet types for pppoe */
609 static const u32 ice_ptypes_pppoe[] = {
610 0x00000000, 0x00000000, 0x00000000, 0x00000000,
611 0x00000000, 0x00000000, 0x00000000, 0x00000000,
612 0x00000000, 0x03ffe000, 0x00000000, 0x00000000,
613 0x00000000, 0x00000000, 0x00000000, 0x00000000,
614 0x00000000, 0x00000000, 0x00000000, 0x00000000,
615 0x00000000, 0x00000000, 0x00000000, 0x00000000,
616 0x00000000, 0x00000000, 0x00000000, 0x00000000,
617 0x00000000, 0x00000000, 0x00000000, 0x00000000,
620 /* Packet types for packets with PFCP NODE header */
621 static const u32 ice_ptypes_pfcp_node[] = {
622 0x00000000, 0x00000000, 0x00000000, 0x00000000,
623 0x00000000, 0x00000000, 0x00000000, 0x00000000,
624 0x00000000, 0x00000000, 0x80000000, 0x00000002,
625 0x00000000, 0x00000000, 0x00000000, 0x00000000,
626 0x00000000, 0x00000000, 0x00000000, 0x00000000,
627 0x00000000, 0x00000000, 0x00000000, 0x00000000,
628 0x00000000, 0x00000000, 0x00000000, 0x00000000,
629 0x00000000, 0x00000000, 0x00000000, 0x00000000,
632 /* Packet types for packets with PFCP SESSION header */
633 static const u32 ice_ptypes_pfcp_session[] = {
634 0x00000000, 0x00000000, 0x00000000, 0x00000000,
635 0x00000000, 0x00000000, 0x00000000, 0x00000000,
636 0x00000000, 0x00000000, 0x00000000, 0x00000005,
637 0x00000000, 0x00000000, 0x00000000, 0x00000000,
638 0x00000000, 0x00000000, 0x00000000, 0x00000000,
639 0x00000000, 0x00000000, 0x00000000, 0x00000000,
640 0x00000000, 0x00000000, 0x00000000, 0x00000000,
641 0x00000000, 0x00000000, 0x00000000, 0x00000000,
644 /* Packet types for l2tpv3 */
645 static const u32 ice_ptypes_l2tpv3[] = {
646 0x00000000, 0x00000000, 0x00000000, 0x00000000,
647 0x00000000, 0x00000000, 0x00000000, 0x00000000,
648 0x00000000, 0x00000000, 0x00000000, 0x00000300,
649 0x00000000, 0x00000000, 0x00000000, 0x00000000,
650 0x00000000, 0x00000000, 0x00000000, 0x00000000,
651 0x00000000, 0x00000000, 0x00000000, 0x00000000,
652 0x00000000, 0x00000000, 0x00000000, 0x00000000,
653 0x00000000, 0x00000000, 0x00000000, 0x00000000,
656 /* Packet types for esp */
657 static const u32 ice_ptypes_esp[] = {
658 0x00000000, 0x00000000, 0x00000000, 0x00000000,
659 0x00000000, 0x00000003, 0x00000000, 0x00000000,
660 0x00000000, 0x00000000, 0x00000000, 0x00000000,
661 0x00000000, 0x00000000, 0x00000000, 0x00000000,
662 0x00000000, 0x00000000, 0x00000000, 0x00000000,
663 0x00000000, 0x00000000, 0x00000000, 0x00000000,
664 0x00000000, 0x00000000, 0x00000000, 0x00000000,
665 0x00000000, 0x00000000, 0x00000000, 0x00000000,
668 /* Packet types for ah */
669 static const u32 ice_ptypes_ah[] = {
670 0x00000000, 0x00000000, 0x00000000, 0x00000000,
671 0x00000000, 0x0000000C, 0x00000000, 0x00000000,
672 0x00000000, 0x00000000, 0x00000000, 0x00000000,
673 0x00000000, 0x00000000, 0x00000000, 0x00000000,
674 0x00000000, 0x00000000, 0x00000000, 0x00000000,
675 0x00000000, 0x00000000, 0x00000000, 0x00000000,
676 0x00000000, 0x00000000, 0x00000000, 0x00000000,
677 0x00000000, 0x00000000, 0x00000000, 0x00000000,
680 /* Packet types for packets with NAT_T ESP header */
681 static const u32 ice_ptypes_nat_t_esp[] = {
682 0x00000000, 0x00000000, 0x00000000, 0x00000000,
683 0x00000000, 0x00000030, 0x00000000, 0x00000000,
684 0x00000000, 0x00000000, 0x00000000, 0x00000000,
685 0x00000000, 0x00000000, 0x00000000, 0x00000000,
686 0x00000000, 0x00000000, 0x00000000, 0x00000000,
687 0x00000000, 0x00000000, 0x00000000, 0x00000000,
688 0x00000000, 0x00000000, 0x00000000, 0x00000000,
689 0x00000000, 0x00000000, 0x00000000, 0x00000000,
692 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
693 0x00000846, 0x00000000, 0x00000000, 0x00000000,
694 0x00000000, 0x00000000, 0x00000000, 0x00000000,
695 0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
696 0x00000000, 0x00000000, 0x00000000, 0x00000000,
697 0x00000000, 0x00000000, 0x00000000, 0x00000000,
698 0x00000000, 0x00000000, 0x00000000, 0x00000000,
699 0x00000000, 0x00000000, 0x00000000, 0x00000000,
700 0x00000000, 0x00000000, 0x00000000, 0x00000000,
703 static const u32 ice_ptypes_gtpu_no_ip[] = {
704 0x00000000, 0x00000000, 0x00000000, 0x00000000,
705 0x00000000, 0x00000000, 0x00000000, 0x00000000,
706 0x00000000, 0x00000000, 0x00000600, 0x00000000,
707 0x00000000, 0x00000000, 0x00000000, 0x00000000,
708 0x00000000, 0x00000000, 0x00000000, 0x00000000,
709 0x00000000, 0x00000000, 0x00000000, 0x00000000,
710 0x00000000, 0x00000000, 0x00000000, 0x00000000,
711 0x00000000, 0x00000000, 0x00000000, 0x00000000,
714 static const u32 ice_ptypes_ecpri_tp0[] = {
715 0x00000000, 0x00000000, 0x00000000, 0x00000000,
716 0x00000000, 0x00000000, 0x00000000, 0x00000000,
717 0x00000000, 0x00000000, 0x00000000, 0x00000400,
718 0x00000000, 0x00000000, 0x00000000, 0x00000000,
719 0x00000000, 0x00000000, 0x00000000, 0x00000000,
720 0x00000000, 0x00000000, 0x00000000, 0x00000000,
721 0x00000000, 0x00000000, 0x00000000, 0x00000000,
722 0x00000000, 0x00000000, 0x00000000, 0x00000000,
725 static const u32 ice_ptypes_udp_ecpri_tp0[] = {
726 0x00000000, 0x00000000, 0x00000000, 0x00000000,
727 0x00000000, 0x00000000, 0x00000000, 0x00000000,
728 0x00000000, 0x00000000, 0x00000000, 0x00100000,
729 0x00000000, 0x00000000, 0x00000000, 0x00000000,
730 0x00000000, 0x00000000, 0x00000000, 0x00000000,
731 0x00000000, 0x00000000, 0x00000000, 0x00000000,
732 0x00000000, 0x00000000, 0x00000000, 0x00000000,
733 0x00000000, 0x00000000, 0x00000000, 0x00000000,
736 static const u32 ice_ptypes_l2tpv2[] = {
737 0x00000000, 0x00000000, 0x00000000, 0x00000000,
738 0x00000000, 0x00000000, 0x00000000, 0x00000000,
739 0x00000000, 0x00000000, 0x00000000, 0x00000000,
740 0xFFFFFF00, 0x0000003F, 0x00000000, 0x00000000,
741 0x00000000, 0x00000000, 0x00000000, 0x00000000,
742 0x00000000, 0x00000000, 0x00000000, 0x00000000,
743 0x00000000, 0x00000000, 0x00000000, 0x00000000,
744 0x00000000, 0x00000000, 0x00000000, 0x00000000,
747 static const u32 ice_ptypes_ppp[] = {
748 0x00000000, 0x00000000, 0x00000000, 0x00000000,
749 0x00000000, 0x00000000, 0x00000000, 0x00000000,
750 0x00000000, 0x00000000, 0x00000000, 0x00000000,
751 0xFFFFF000, 0x0000003F, 0x00000000, 0x00000000,
752 0x00000000, 0x00000000, 0x00000000, 0x00000000,
753 0x00000000, 0x00000000, 0x00000000, 0x00000000,
754 0x00000000, 0x00000000, 0x00000000, 0x00000000,
755 0x00000000, 0x00000000, 0x00000000, 0x00000000,
758 static const u32 ice_ptypes_ipv4_frag[] = {
759 0x00400000, 0x00000000, 0x00000000, 0x00000000,
760 0x00000000, 0x00000000, 0x00000000, 0x00000000,
761 0x00000000, 0x00000000, 0x00000000, 0x00000000,
762 0x00000000, 0x00000000, 0x00000000, 0x00000000,
763 0x00000000, 0x00000000, 0x00000000, 0x00000000,
764 0x00000000, 0x00000000, 0x00000000, 0x00000000,
765 0x00000000, 0x00000000, 0x00000000, 0x00000000,
766 0x00000000, 0x00000000, 0x00000000, 0x00000000,
769 static const u32 ice_ptypes_ipv6_frag[] = {
770 0x00000000, 0x00000000, 0x01000000, 0x00000000,
771 0x00000000, 0x00000000, 0x00000000, 0x00000000,
772 0x00000000, 0x00000000, 0x00000000, 0x00000000,
773 0x00000000, 0x00000000, 0x00000000, 0x00000000,
774 0x00000000, 0x00000000, 0x00000000, 0x00000000,
775 0x00000000, 0x00000000, 0x00000000, 0x00000000,
776 0x00000000, 0x00000000, 0x00000000, 0x00000000,
777 0x00000000, 0x00000000, 0x00000000, 0x00000000,
780 /* Manage parameters and info. used during the creation of a flow profile */
781 struct ice_flow_prof_params {
783 u16 entry_length; /* # of bytes formatted entry will require */
785 struct ice_flow_prof *prof;
787 /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
788 * This will give us the direction flags.
790 struct ice_fv_word es[ICE_MAX_FV_WORDS];
791 /* attributes can be used to add attributes to a particular PTYPE */
792 const struct ice_ptype_attributes *attr;
795 u16 mask[ICE_MAX_FV_WORDS];
796 ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
799 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
800 (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
801 ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
802 ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
803 ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
804 ICE_FLOW_SEG_HDR_NAT_T_ESP | ICE_FLOW_SEG_HDR_GTPU_NON_IP | \
805 ICE_FLOW_SEG_HDR_ECPRI_TP0 | ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0 | \
806 ICE_FLOW_SEG_HDR_L2TPV2 | ICE_FLOW_SEG_HDR_PPP)
808 #define ICE_FLOW_SEG_HDRS_L2_MASK \
809 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
810 #define ICE_FLOW_SEG_HDRS_L3_MASK \
811 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
812 ICE_FLOW_SEG_HDR_ARP)
813 #define ICE_FLOW_SEG_HDRS_L4_MASK \
814 (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
815 ICE_FLOW_SEG_HDR_SCTP)
816 /* mask for L4 protocols that are NOT part of IPV4/6 OTHER PTYPE groups */
817 #define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER \
818 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
821 * ice_flow_val_hdrs - validates packet segments for valid protocol headers
822 * @segs: array of one or more packet segments that describe the flow
823 * @segs_cnt: number of packet segments provided
825 static enum ice_status
826 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
830 for (i = 0; i < segs_cnt; i++) {
831 /* Multiple L3 headers */
832 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
833 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
834 return ICE_ERR_PARAM;
836 /* Multiple L4 headers */
837 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
838 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
839 return ICE_ERR_PARAM;
845 /* Sizes of fixed known protocol headers without header options */
846 #define ICE_FLOW_PROT_HDR_SZ_MAC 14
847 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
848 #define ICE_FLOW_PROT_HDR_SZ_IPV4 20
849 #define ICE_FLOW_PROT_HDR_SZ_IPV6 40
850 #define ICE_FLOW_PROT_HDR_SZ_ARP 28
851 #define ICE_FLOW_PROT_HDR_SZ_ICMP 8
852 #define ICE_FLOW_PROT_HDR_SZ_TCP 20
853 #define ICE_FLOW_PROT_HDR_SZ_UDP 8
854 #define ICE_FLOW_PROT_HDR_SZ_SCTP 12
857 * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
858 * @params: information about the flow to be processed
859 * @seg: index of packet segment whose header size is to be determined
861 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
866 sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
867 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
870 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
871 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
872 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
873 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
874 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
875 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
876 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
877 /* A L3 header is required if L4 is specified */
881 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
882 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
883 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
884 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
885 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
886 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
887 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
888 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
894 * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
895 * @params: information about the flow to be processed
897 * This function identifies the packet types associated with the protocol
898 * headers being present in packet segments of the specified flow profile.
900 static enum ice_status
901 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
903 struct ice_flow_prof *prof;
906 ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
911 for (i = 0; i < params->prof->segs_cnt; i++) {
912 const ice_bitmap_t *src;
915 hdrs = prof->segs[i].hdrs;
917 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
918 src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
919 (const ice_bitmap_t *)ice_ptypes_mac_il;
920 ice_and_bitmap(params->ptypes, params->ptypes, src,
924 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
925 src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
926 ice_and_bitmap(params->ptypes, params->ptypes, src,
930 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
931 ice_and_bitmap(params->ptypes, params->ptypes,
932 (const ice_bitmap_t *)ice_ptypes_arp_of,
936 if (hdrs & ICE_FLOW_SEG_HDR_ECPRI_TP0) {
937 src = (const ice_bitmap_t *)ice_ptypes_ecpri_tp0;
938 ice_and_bitmap(params->ptypes, params->ptypes, src,
941 if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
942 (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
944 (const ice_bitmap_t *)ice_ptypes_ipv4_il :
945 (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_all;
946 ice_and_bitmap(params->ptypes, params->ptypes, src,
948 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
949 (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
951 (const ice_bitmap_t *)ice_ptypes_ipv6_il :
952 (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_all;
953 ice_and_bitmap(params->ptypes, params->ptypes, src,
955 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
956 (hdrs & ICE_FLOW_SEG_HDR_IPV_FRAG)) {
957 src = (const ice_bitmap_t *)ice_ptypes_ipv4_frag;
958 ice_and_bitmap(params->ptypes, params->ptypes, src,
960 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
961 (hdrs & ICE_FLOW_SEG_HDR_IPV_FRAG)) {
962 src = (const ice_bitmap_t *)ice_ptypes_ipv6_frag;
963 ice_and_bitmap(params->ptypes, params->ptypes, src,
965 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
966 !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
967 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_no_l4 :
968 (const ice_bitmap_t *)ice_ptypes_ipv4_il_no_l4;
969 ice_and_bitmap(params->ptypes, params->ptypes, src,
971 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
972 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
973 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
974 ice_and_bitmap(params->ptypes, params->ptypes, src,
976 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
977 !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
978 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_no_l4 :
979 (const ice_bitmap_t *)ice_ptypes_ipv6_il_no_l4;
980 ice_and_bitmap(params->ptypes, params->ptypes, src,
982 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
983 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
984 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
985 ice_and_bitmap(params->ptypes, params->ptypes, src,
989 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
990 src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
991 ice_and_bitmap(params->ptypes, params->ptypes,
992 src, ICE_FLOW_PTYPE_MAX);
993 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
994 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
995 ice_and_bitmap(params->ptypes, params->ptypes, src,
998 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
999 ice_andnot_bitmap(params->ptypes, params->ptypes, src,
1000 ICE_FLOW_PTYPE_MAX);
1003 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
1004 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
1005 ice_and_bitmap(params->ptypes, params->ptypes, src,
1006 ICE_FLOW_PTYPE_MAX);
1007 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
1008 ice_and_bitmap(params->ptypes, params->ptypes,
1009 (const ice_bitmap_t *)ice_ptypes_tcp_il,
1010 ICE_FLOW_PTYPE_MAX);
1011 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
1012 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
1013 ice_and_bitmap(params->ptypes, params->ptypes, src,
1014 ICE_FLOW_PTYPE_MAX);
1017 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
1018 src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
1019 (const ice_bitmap_t *)ice_ptypes_icmp_il;
1020 ice_and_bitmap(params->ptypes, params->ptypes, src,
1021 ICE_FLOW_PTYPE_MAX);
1022 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
1024 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
1025 ice_and_bitmap(params->ptypes, params->ptypes,
1026 src, ICE_FLOW_PTYPE_MAX);
1028 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
1029 src = (const ice_bitmap_t *)ice_ptypes_gtpc;
1030 ice_and_bitmap(params->ptypes, params->ptypes,
1031 src, ICE_FLOW_PTYPE_MAX);
1032 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
1033 src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
1034 ice_and_bitmap(params->ptypes, params->ptypes,
1035 src, ICE_FLOW_PTYPE_MAX);
1036 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_NON_IP) {
1037 src = (const ice_bitmap_t *)ice_ptypes_gtpu_no_ip;
1038 ice_and_bitmap(params->ptypes, params->ptypes,
1039 src, ICE_FLOW_PTYPE_MAX);
1040 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
1041 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1042 ice_and_bitmap(params->ptypes, params->ptypes,
1043 src, ICE_FLOW_PTYPE_MAX);
1045 /* Attributes for GTP packet with downlink */
1046 params->attr = ice_attr_gtpu_down;
1047 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
1048 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
1049 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1050 ice_and_bitmap(params->ptypes, params->ptypes,
1051 src, ICE_FLOW_PTYPE_MAX);
1053 /* Attributes for GTP packet with uplink */
1054 params->attr = ice_attr_gtpu_up;
1055 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
1056 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
1057 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1058 ice_and_bitmap(params->ptypes, params->ptypes,
1059 src, ICE_FLOW_PTYPE_MAX);
1061 /* Attributes for GTP packet with Extension Header */
1062 params->attr = ice_attr_gtpu_eh;
1063 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
1064 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
1065 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1066 ice_and_bitmap(params->ptypes, params->ptypes,
1067 src, ICE_FLOW_PTYPE_MAX);
1069 /* Attributes for GTP packet without Extension Header */
1070 params->attr = ice_attr_gtpu_session;
1071 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session);
1072 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV2) {
1073 src = (const ice_bitmap_t *)ice_ptypes_l2tpv2;
1074 ice_and_bitmap(params->ptypes, params->ptypes,
1075 src, ICE_FLOW_PTYPE_MAX);
1076 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
1077 src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
1078 ice_and_bitmap(params->ptypes, params->ptypes,
1079 src, ICE_FLOW_PTYPE_MAX);
1080 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
1081 src = (const ice_bitmap_t *)ice_ptypes_esp;
1082 ice_and_bitmap(params->ptypes, params->ptypes,
1083 src, ICE_FLOW_PTYPE_MAX);
1084 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
1085 src = (const ice_bitmap_t *)ice_ptypes_ah;
1086 ice_and_bitmap(params->ptypes, params->ptypes,
1087 src, ICE_FLOW_PTYPE_MAX);
1088 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
1089 src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
1090 ice_and_bitmap(params->ptypes, params->ptypes,
1091 src, ICE_FLOW_PTYPE_MAX);
1092 } else if (hdrs & ICE_FLOW_SEG_HDR_VXLAN) {
1093 src = (const ice_bitmap_t *)ice_ptypes_vxlan_vni;
1094 ice_and_bitmap(params->ptypes, params->ptypes,
1095 src, ICE_FLOW_PTYPE_MAX);
1096 } else if (hdrs & ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0) {
1097 src = (const ice_bitmap_t *)ice_ptypes_udp_ecpri_tp0;
1098 ice_and_bitmap(params->ptypes, params->ptypes,
1099 src, ICE_FLOW_PTYPE_MAX);
1102 if (hdrs & ICE_FLOW_SEG_HDR_PPP) {
1103 src = (const ice_bitmap_t *)ice_ptypes_ppp;
1104 ice_and_bitmap(params->ptypes, params->ptypes,
1105 src, ICE_FLOW_PTYPE_MAX);
1108 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
1109 if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
1111 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1114 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1116 ice_and_bitmap(params->ptypes, params->ptypes,
1117 src, ICE_FLOW_PTYPE_MAX);
1119 src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1120 ice_andnot_bitmap(params->ptypes, params->ptypes,
1121 src, ICE_FLOW_PTYPE_MAX);
1123 src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1124 ice_andnot_bitmap(params->ptypes, params->ptypes,
1125 src, ICE_FLOW_PTYPE_MAX);
1133 * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
1134 * @hw: pointer to the HW struct
1135 * @params: information about the flow to be processed
1136 * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
1138 * This function will allocate an extraction sequence entries for a DWORD size
1139 * chunk of the packet flags.
1141 static enum ice_status
1142 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
1143 struct ice_flow_prof_params *params,
1144 enum ice_flex_mdid_pkt_flags flags)
1146 u8 fv_words = hw->blk[params->blk].es.fvw;
1149 /* Make sure the number of extraction sequence entries required does not
1150 * exceed the block's capacity.
1152 if (params->es_cnt >= fv_words)
1153 return ICE_ERR_MAX_LIMIT;
1155 /* some blocks require a reversed field vector layout */
1156 if (hw->blk[params->blk].es.reverse)
1157 idx = fv_words - params->es_cnt - 1;
1159 idx = params->es_cnt;
1161 params->es[idx].prot_id = ICE_PROT_META_ID;
1162 params->es[idx].off = flags;
1169 * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
1170 * @hw: pointer to the HW struct
1171 * @params: information about the flow to be processed
1172 * @seg: packet segment index of the field to be extracted
1173 * @fld: ID of field to be extracted
1174 * @match: bitfield of all fields
1176 * This function determines the protocol ID, offset, and size of the given
1177 * field. It then allocates one or more extraction sequence entries for the
1178 * given field, and fill the entries with protocol ID and offset information.
1180 static enum ice_status
1181 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
1182 u8 seg, enum ice_flow_field fld, u64 match)
1184 enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
1185 enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
1186 u8 fv_words = hw->blk[params->blk].es.fvw;
1187 struct ice_flow_fld_info *flds;
1188 u16 cnt, ese_bits, i;
1193 flds = params->prof->segs[seg].fields;
1196 case ICE_FLOW_FIELD_IDX_ETH_DA:
1197 case ICE_FLOW_FIELD_IDX_ETH_SA:
1198 case ICE_FLOW_FIELD_IDX_S_VLAN:
1199 case ICE_FLOW_FIELD_IDX_C_VLAN:
1200 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
1202 case ICE_FLOW_FIELD_IDX_ETH_TYPE:
1203 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
1205 case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
1206 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1208 case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
1209 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1211 case ICE_FLOW_FIELD_IDX_IPV4_TTL:
1212 case ICE_FLOW_FIELD_IDX_IPV4_PROT:
1213 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1215 /* TTL and PROT share the same extraction seq. entry.
1216 * Each is considered a sibling to the other in terms of sharing
1217 * the same extraction sequence entry.
1219 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
1220 sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
1222 sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
1224 /* If the sibling field is also included, that field's
1225 * mask needs to be included.
1227 if (match & BIT(sib))
1228 sib_mask = ice_flds_info[sib].mask;
1230 case ICE_FLOW_FIELD_IDX_IPV6_TTL:
1231 case ICE_FLOW_FIELD_IDX_IPV6_PROT:
1232 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1234 /* TTL and PROT share the same extraction seq. entry.
1235 * Each is considered a sibling to the other in terms of sharing
1236 * the same extraction sequence entry.
1238 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
1239 sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
1241 sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
1243 /* If the sibling field is also included, that field's
1244 * mask needs to be included.
1246 if (match & BIT(sib))
1247 sib_mask = ice_flds_info[sib].mask;
1249 case ICE_FLOW_FIELD_IDX_IPV4_SA:
1250 case ICE_FLOW_FIELD_IDX_IPV4_DA:
1251 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1253 case ICE_FLOW_FIELD_IDX_IPV4_ID:
1254 prot_id = ICE_PROT_IPV4_OF_OR_S;
1256 case ICE_FLOW_FIELD_IDX_IPV6_SA:
1257 case ICE_FLOW_FIELD_IDX_IPV6_DA:
1258 case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
1259 case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
1260 case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
1261 case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
1262 case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
1263 case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
1264 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1266 case ICE_FLOW_FIELD_IDX_IPV6_ID:
1267 prot_id = ICE_PROT_IPV6_FRAG;
1269 case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
1270 case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
1271 case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
1272 prot_id = ICE_PROT_TCP_IL;
1274 case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
1275 case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
1276 prot_id = ICE_PROT_UDP_IL_OR_S;
1278 case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
1279 case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
1280 prot_id = ICE_PROT_SCTP_IL;
1282 case ICE_FLOW_FIELD_IDX_VXLAN_VNI:
1283 case ICE_FLOW_FIELD_IDX_GTPC_TEID:
1284 case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
1285 case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
1286 case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
1287 case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
1288 case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
1289 /* GTP is accessed through UDP OF protocol */
1290 prot_id = ICE_PROT_UDP_OF;
1292 case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
1293 prot_id = ICE_PROT_PPPOE;
1295 case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1296 prot_id = ICE_PROT_UDP_IL_OR_S;
1298 case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1299 prot_id = ICE_PROT_L2TPV3;
1301 case ICE_FLOW_FIELD_IDX_ESP_SPI:
1302 prot_id = ICE_PROT_ESP_F;
1304 case ICE_FLOW_FIELD_IDX_AH_SPI:
1305 prot_id = ICE_PROT_ESP_2;
1307 case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1308 prot_id = ICE_PROT_UDP_IL_OR_S;
1310 case ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID:
1311 prot_id = ICE_PROT_ECPRI;
1313 case ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID:
1314 prot_id = ICE_PROT_UDP_IL_OR_S;
1316 case ICE_FLOW_FIELD_IDX_ARP_SIP:
1317 case ICE_FLOW_FIELD_IDX_ARP_DIP:
1318 case ICE_FLOW_FIELD_IDX_ARP_SHA:
1319 case ICE_FLOW_FIELD_IDX_ARP_DHA:
1320 case ICE_FLOW_FIELD_IDX_ARP_OP:
1321 prot_id = ICE_PROT_ARP_OF;
1323 case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1324 case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1325 /* ICMP type and code share the same extraction seq. entry */
1326 prot_id = (params->prof->segs[seg].hdrs &
1327 ICE_FLOW_SEG_HDR_IPV4) ?
1328 ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1329 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1330 ICE_FLOW_FIELD_IDX_ICMP_CODE :
1331 ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1333 case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1334 prot_id = ICE_PROT_GRE_OF;
1337 return ICE_ERR_NOT_IMPL;
1340 /* Each extraction sequence entry is a word in size, and extracts a
1341 * word-aligned offset from a protocol header.
1343 ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1345 flds[fld].xtrct.prot_id = prot_id;
1346 flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1347 ICE_FLOW_FV_EXTRACT_SZ;
1348 flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
1349 flds[fld].xtrct.idx = params->es_cnt;
1350 flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1352 /* Adjust the next field-entry index after accommodating the number of
1353 * entries this field consumes
1355 cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1356 ice_flds_info[fld].size, ese_bits);
1358 /* Fill in the extraction sequence entries needed for this field */
1359 off = flds[fld].xtrct.off;
1360 mask = flds[fld].xtrct.mask;
1361 for (i = 0; i < cnt; i++) {
1362 /* Only consume an extraction sequence entry if there is no
1363 * sibling field associated with this field or the sibling entry
1364 * already extracts the word shared with this field.
1366 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1367 flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1368 flds[sib].xtrct.off != off) {
1371 /* Make sure the number of extraction sequence required
1372 * does not exceed the block's capability
1374 if (params->es_cnt >= fv_words)
1375 return ICE_ERR_MAX_LIMIT;
1377 /* some blocks require a reversed field vector layout */
1378 if (hw->blk[params->blk].es.reverse)
1379 idx = fv_words - params->es_cnt - 1;
1381 idx = params->es_cnt;
1383 params->es[idx].prot_id = prot_id;
1384 params->es[idx].off = off;
1385 params->mask[idx] = mask | sib_mask;
1389 off += ICE_FLOW_FV_EXTRACT_SZ;
1396 * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1397 * @hw: pointer to the HW struct
1398 * @params: information about the flow to be processed
1399 * @seg: index of packet segment whose raw fields are to be extracted
1401 static enum ice_status
1402 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1409 if (!params->prof->segs[seg].raws_cnt)
1412 if (params->prof->segs[seg].raws_cnt >
1413 ARRAY_SIZE(params->prof->segs[seg].raws))
1414 return ICE_ERR_MAX_LIMIT;
1416 /* Offsets within the segment headers are not supported */
1417 hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1419 return ICE_ERR_PARAM;
1421 fv_words = hw->blk[params->blk].es.fvw;
1423 for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1424 struct ice_flow_seg_fld_raw *raw;
1427 raw = ¶ms->prof->segs[seg].raws[i];
1429 /* Storing extraction information */
1430 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1431 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1432 ICE_FLOW_FV_EXTRACT_SZ;
1433 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1435 raw->info.xtrct.idx = params->es_cnt;
1437 /* Determine the number of field vector entries this raw field
1440 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1441 (raw->info.src.last * BITS_PER_BYTE),
1442 (ICE_FLOW_FV_EXTRACT_SZ *
1444 off = raw->info.xtrct.off;
1445 for (j = 0; j < cnt; j++) {
1448 /* Make sure the number of extraction sequence required
1449 * does not exceed the block's capability
1451 if (params->es_cnt >= hw->blk[params->blk].es.count ||
1452 params->es_cnt >= ICE_MAX_FV_WORDS)
1453 return ICE_ERR_MAX_LIMIT;
1455 /* some blocks require a reversed field vector layout */
1456 if (hw->blk[params->blk].es.reverse)
1457 idx = fv_words - params->es_cnt - 1;
1459 idx = params->es_cnt;
1461 params->es[idx].prot_id = raw->info.xtrct.prot_id;
1462 params->es[idx].off = off;
1464 off += ICE_FLOW_FV_EXTRACT_SZ;
1472 * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1473 * @hw: pointer to the HW struct
1474 * @params: information about the flow to be processed
1476 * This function iterates through all matched fields in the given segments, and
1477 * creates an extraction sequence for the fields.
1479 static enum ice_status
1480 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1481 struct ice_flow_prof_params *params)
1483 enum ice_status status = ICE_SUCCESS;
1486 /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1489 if (params->blk == ICE_BLK_ACL) {
1490 status = ice_flow_xtract_pkt_flags(hw, params,
1491 ICE_RX_MDID_PKT_FLAGS_15_0);
1496 for (i = 0; i < params->prof->segs_cnt; i++) {
1497 u64 match = params->prof->segs[i].match;
1498 enum ice_flow_field j;
1500 ice_for_each_set_bit(j, (ice_bitmap_t *)&match,
1501 ICE_FLOW_FIELD_IDX_MAX) {
1502 status = ice_flow_xtract_fld(hw, params, i, j, match);
1505 ice_clear_bit(j, (ice_bitmap_t *)&match);
1508 /* Process raw matching bytes */
1509 status = ice_flow_xtract_raws(hw, params, i);
1518 * ice_flow_sel_acl_scen - returns the specific scenario
1519 * @hw: pointer to the hardware structure
1520 * @params: information about the flow to be processed
1522 * This function will return the specific scenario based on the
1523 * params passed to it
1525 static enum ice_status
1526 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1528 /* Find the best-fit scenario for the provided match width */
1529 struct ice_acl_scen *cand_scen = NULL, *scen;
1532 return ICE_ERR_DOES_NOT_EXIST;
1534 /* Loop through each scenario and match against the scenario width
1535 * to select the specific scenario
1537 LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1538 if (scen->eff_width >= params->entry_length &&
1539 (!cand_scen || cand_scen->eff_width > scen->eff_width))
1542 return ICE_ERR_DOES_NOT_EXIST;
1544 params->prof->cfg.scen = cand_scen;
1550 * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1551 * @params: information about the flow to be processed
1553 static enum ice_status
1554 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1556 u16 index, i, range_idx = 0;
1558 index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1560 for (i = 0; i < params->prof->segs_cnt; i++) {
1561 struct ice_flow_seg_info *seg = ¶ms->prof->segs[i];
1564 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
1565 ICE_FLOW_FIELD_IDX_MAX) {
1566 struct ice_flow_fld_info *fld = &seg->fields[j];
1568 fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1570 if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1571 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1573 /* Range checking only supported for single
1576 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1578 BITS_PER_BYTE * 2) > 1)
1579 return ICE_ERR_PARAM;
1581 /* Ranges must define low and high values */
1582 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1583 fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1584 return ICE_ERR_PARAM;
1586 fld->entry.val = range_idx++;
1588 /* Store adjusted byte-length of field for later
1589 * use, taking into account potential
1590 * non-byte-aligned displacement
1592 fld->entry.last = DIVIDE_AND_ROUND_UP
1593 (ice_flds_info[j].size +
1594 (fld->xtrct.disp % BITS_PER_BYTE),
1596 fld->entry.val = index;
1597 index += fld->entry.last;
1601 for (j = 0; j < seg->raws_cnt; j++) {
1602 struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1604 raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1605 raw->info.entry.val = index;
1606 raw->info.entry.last = raw->info.src.last;
1607 index += raw->info.entry.last;
1611 /* Currently only support using the byte selection base, which only
1612 * allows for an effective entry size of 30 bytes. Reject anything
1615 if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1616 return ICE_ERR_PARAM;
1618 /* Only 8 range checkers per profile, reject anything trying to use
1621 if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1622 return ICE_ERR_PARAM;
1624 /* Store # bytes required for entry for later use */
1625 params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1631 * ice_flow_proc_segs - process all packet segments associated with a profile
1632 * @hw: pointer to the HW struct
1633 * @params: information about the flow to be processed
1635 static enum ice_status
1636 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1638 enum ice_status status;
1640 status = ice_flow_proc_seg_hdrs(params);
1644 status = ice_flow_create_xtrct_seq(hw, params);
1648 switch (params->blk) {
1651 status = ICE_SUCCESS;
1654 status = ice_flow_acl_def_entry_frmt(params);
1657 status = ice_flow_sel_acl_scen(hw, params);
1662 return ICE_ERR_NOT_IMPL;
1668 #define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001
1669 #define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002
1670 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004
1673 * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1674 * @hw: pointer to the HW struct
1675 * @blk: classification stage
1676 * @dir: flow direction
1677 * @segs: array of one or more packet segments that describe the flow
1678 * @segs_cnt: number of packet segments provided
1679 * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1680 * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1682 static struct ice_flow_prof *
1683 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1684 enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1685 u8 segs_cnt, u16 vsi_handle, u32 conds)
1687 struct ice_flow_prof *p, *prof = NULL;
1689 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1690 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1691 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1692 segs_cnt && segs_cnt == p->segs_cnt) {
1695 /* Check for profile-VSI association if specified */
1696 if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1697 ice_is_vsi_valid(hw, vsi_handle) &&
1698 !ice_is_bit_set(p->vsis, vsi_handle))
1701 /* Protocol headers must be checked. Matched fields are
1702 * checked if specified.
1704 for (i = 0; i < segs_cnt; i++)
1705 if (segs[i].hdrs != p->segs[i].hdrs ||
1706 ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1707 segs[i].match != p->segs[i].match))
1710 /* A match is found if all segments are matched */
1711 if (i == segs_cnt) {
1716 ice_release_lock(&hw->fl_profs_locks[blk]);
1722 * ice_flow_find_prof - Look up a profile matching headers and matched fields
1723 * @hw: pointer to the HW struct
1724 * @blk: classification stage
1725 * @dir: flow direction
1726 * @segs: array of one or more packet segments that describe the flow
1727 * @segs_cnt: number of packet segments provided
1730 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1731 struct ice_flow_seg_info *segs, u8 segs_cnt)
1733 struct ice_flow_prof *p;
1735 p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1736 ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1738 return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1742 * ice_flow_find_prof_id - Look up a profile with given profile ID
1743 * @hw: pointer to the HW struct
1744 * @blk: classification stage
1745 * @prof_id: unique ID to identify this flow profile
1747 static struct ice_flow_prof *
1748 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1750 struct ice_flow_prof *p;
1752 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1753 if (p->id == prof_id)
1760 * ice_dealloc_flow_entry - Deallocate flow entry memory
1761 * @hw: pointer to the HW struct
1762 * @entry: flow entry to be removed
1765 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1771 ice_free(hw, entry->entry);
1773 if (entry->range_buf) {
1774 ice_free(hw, entry->range_buf);
1775 entry->range_buf = NULL;
1779 ice_free(hw, entry->acts);
1781 entry->acts_cnt = 0;
1784 ice_free(hw, entry);
1788 * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
1789 * @hw: pointer to the HW struct
1790 * @blk: classification stage
1791 * @prof_id: the profile ID handle
1792 * @hw_prof_id: pointer to variable to receive the HW profile ID
1795 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
1798 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
1799 struct ice_prof_map *map;
1801 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
1802 map = ice_search_prof_id(hw, blk, prof_id);
1804 *hw_prof_id = map->prof_id;
1805 status = ICE_SUCCESS;
1807 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
1811 #define ICE_ACL_INVALID_SCEN 0x3f
1814 * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
1815 * @hw: pointer to the hardware structure
1816 * @prof: pointer to flow profile
1817 * @buf: destination buffer function writes partial extraction sequence to
1819 * returns ICE_SUCCESS if no PF is associated to the given profile
1820 * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
1821 * returns other error code for real error
1823 static enum ice_status
1824 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1825 struct ice_aqc_acl_prof_generic_frmt *buf)
1827 enum ice_status status;
1830 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1834 status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1838 /* If all PF's associated scenarios are all 0 or all
1839 * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1840 * not been configured yet.
1842 if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1843 buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1844 buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1845 buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1848 if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1849 buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1850 buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1851 buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1852 buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1853 buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1854 buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1855 buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1858 return ICE_ERR_IN_USE;
1862 * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
1863 * @hw: pointer to the hardware structure
1864 * @acts: array of actions to be performed on a match
1865 * @acts_cnt: number of actions
1867 static enum ice_status
1868 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1873 for (i = 0; i < acts_cnt; i++) {
1874 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1875 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1876 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1877 struct ice_acl_cntrs cntrs = { 0 };
1878 enum ice_status status;
1880 /* amount is unused in the dealloc path but the common
1881 * parameter check routine wants a value set, as zero
1882 * is invalid for the check. Just set it.
1885 cntrs.bank = 0; /* Only bank0 for the moment */
1887 LE16_TO_CPU(acts[i].data.acl_act.value);
1889 LE16_TO_CPU(acts[i].data.acl_act.value);
1891 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1892 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1894 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1896 status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1905 * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
1906 * @hw: pointer to the hardware structure
1907 * @prof: pointer to flow profile
1909 * Disassociate the scenario from the profile for the PF of the VSI.
1911 static enum ice_status
1912 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1914 struct ice_aqc_acl_prof_generic_frmt buf;
1915 enum ice_status status = ICE_SUCCESS;
1918 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1920 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1924 status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1928 /* Clear scenario for this PF */
1929 buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1930 status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
1936 * ice_flow_rem_entry_sync - Remove a flow entry
1937 * @hw: pointer to the HW struct
1938 * @blk: classification stage
1939 * @entry: flow entry to be removed
1941 static enum ice_status
1942 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1943 struct ice_flow_entry *entry)
1946 return ICE_ERR_BAD_PTR;
1948 if (blk == ICE_BLK_ACL) {
1949 enum ice_status status;
1952 return ICE_ERR_BAD_PTR;
1954 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1955 entry->scen_entry_idx);
1959 /* Checks if we need to release an ACL counter. */
1960 if (entry->acts_cnt && entry->acts)
1961 ice_flow_acl_free_act_cntr(hw, entry->acts,
1965 LIST_DEL(&entry->l_entry);
1967 ice_dealloc_flow_entry(hw, entry);
1973 * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1974 * @hw: pointer to the HW struct
1975 * @blk: classification stage
1976 * @dir: flow direction
1977 * @prof_id: unique ID to identify this flow profile
1978 * @segs: array of one or more packet segments that describe the flow
1979 * @segs_cnt: number of packet segments provided
1980 * @acts: array of default actions
1981 * @acts_cnt: number of default actions
1982 * @prof: stores the returned flow profile added
1984 * Assumption: the caller has acquired the lock to the profile list
1986 static enum ice_status
1987 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1988 enum ice_flow_dir dir, u64 prof_id,
1989 struct ice_flow_seg_info *segs, u8 segs_cnt,
1990 struct ice_flow_action *acts, u8 acts_cnt,
1991 struct ice_flow_prof **prof)
1993 struct ice_flow_prof_params *params;
1994 enum ice_status status;
1997 if (!prof || (acts_cnt && !acts))
1998 return ICE_ERR_BAD_PTR;
2000 params = (struct ice_flow_prof_params *)ice_malloc(hw, sizeof(*params));
2002 return ICE_ERR_NO_MEMORY;
2004 params->prof = (struct ice_flow_prof *)
2005 ice_malloc(hw, sizeof(*params->prof));
2006 if (!params->prof) {
2007 status = ICE_ERR_NO_MEMORY;
2011 /* initialize extraction sequence to all invalid (0xff) */
2012 for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
2013 params->es[i].prot_id = ICE_PROT_INVALID;
2014 params->es[i].off = ICE_FV_OFFSET_INVAL;
2018 params->prof->id = prof_id;
2019 params->prof->dir = dir;
2020 params->prof->segs_cnt = segs_cnt;
2022 /* Make a copy of the segments that need to be persistent in the flow
2025 for (i = 0; i < segs_cnt; i++)
2026 ice_memcpy(¶ms->prof->segs[i], &segs[i], sizeof(*segs),
2027 ICE_NONDMA_TO_NONDMA);
2029 /* Make a copy of the actions that need to be persistent in the flow
2033 params->prof->acts = (struct ice_flow_action *)
2034 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2035 ICE_NONDMA_TO_NONDMA);
2037 if (!params->prof->acts) {
2038 status = ICE_ERR_NO_MEMORY;
2043 status = ice_flow_proc_segs(hw, params);
2045 ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n");
2049 /* Add a HW profile for this flow profile */
2050 status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
2051 params->attr, params->attr_cnt, params->es,
2054 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
2058 INIT_LIST_HEAD(¶ms->prof->entries);
2059 ice_init_lock(¶ms->prof->entries_lock);
2060 *prof = params->prof;
2064 if (params->prof->acts)
2065 ice_free(hw, params->prof->acts);
2066 ice_free(hw, params->prof);
2069 ice_free(hw, params);
2075 * ice_flow_rem_prof_sync - remove a flow profile
2076 * @hw: pointer to the hardware structure
2077 * @blk: classification stage
2078 * @prof: pointer to flow profile to remove
2080 * Assumption: the caller has acquired the lock to the profile list
2082 static enum ice_status
2083 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
2084 struct ice_flow_prof *prof)
2086 enum ice_status status;
2088 /* Remove all remaining flow entries before removing the flow profile */
2089 if (!LIST_EMPTY(&prof->entries)) {
2090 struct ice_flow_entry *e, *t;
2092 ice_acquire_lock(&prof->entries_lock);
2094 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
2096 status = ice_flow_rem_entry_sync(hw, blk, e);
2101 ice_release_lock(&prof->entries_lock);
2104 if (blk == ICE_BLK_ACL) {
2105 struct ice_aqc_acl_profile_ranges query_rng_buf;
2106 struct ice_aqc_acl_prof_generic_frmt buf;
2109 /* Disassociate the scenario from the profile for the PF */
2110 status = ice_flow_acl_disassoc_scen(hw, prof);
2114 /* Clear the range-checker if the profile ID is no longer
2117 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2118 if (status && status != ICE_ERR_IN_USE) {
2120 } else if (!status) {
2121 /* Clear the range-checker value for profile ID */
2122 ice_memset(&query_rng_buf, 0,
2123 sizeof(struct ice_aqc_acl_profile_ranges),
2126 status = ice_flow_get_hw_prof(hw, blk, prof->id,
2131 status = ice_prog_acl_prof_ranges(hw, prof_id,
2132 &query_rng_buf, NULL);
2138 /* Remove all hardware profiles associated with this flow profile */
2139 status = ice_rem_prof(hw, blk, prof->id);
2141 LIST_DEL(&prof->l_entry);
2142 ice_destroy_lock(&prof->entries_lock);
2144 ice_free(hw, prof->acts);
2152 * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
2153 * @buf: Destination buffer function writes partial xtrct sequence to
2154 * @info: Info about field
2157 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
2158 struct ice_flow_fld_info *info)
2163 src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
2164 info->xtrct.disp / BITS_PER_BYTE;
2165 dst = info->entry.val;
2166 for (i = 0; i < info->entry.last; i++)
2167 /* HW stores field vector words in LE, convert words back to BE
2168 * so constructed entries will end up in network order
2170 buf->byte_selection[dst++] = src++ ^ 1;
2174 * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
2175 * @hw: pointer to the hardware structure
2176 * @prof: pointer to flow profile
2178 static enum ice_status
2179 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
2181 struct ice_aqc_acl_prof_generic_frmt buf;
2182 struct ice_flow_fld_info *info;
2183 enum ice_status status;
2187 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2189 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2193 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2194 if (status && status != ICE_ERR_IN_USE)
2198 /* Program the profile dependent configuration. This is done
2199 * only once regardless of the number of PFs using that profile
2201 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2203 for (i = 0; i < prof->segs_cnt; i++) {
2204 struct ice_flow_seg_info *seg = &prof->segs[i];
2207 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2208 ICE_FLOW_FIELD_IDX_MAX) {
2209 info = &seg->fields[j];
2211 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2212 buf.word_selection[info->entry.val] =
2215 ice_flow_acl_set_xtrct_seq_fld(&buf,
2219 for (j = 0; j < seg->raws_cnt; j++) {
2220 info = &seg->raws[j].info;
2221 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
2225 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
2226 ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
2230 /* Update the current PF */
2231 buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
2232 status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
2238 * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
2239 * @hw: pointer to the hardware structure
2240 * @blk: classification stage
2241 * @vsi_handle: software VSI handle
2242 * @vsig: target VSI group
2244 * Assumption: the caller has already verified that the VSI to
2245 * be added has the same characteristics as the VSIG and will
2246 * thereby have access to all resources added to that VSIG.
2249 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
2252 enum ice_status status;
2254 if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
2255 return ICE_ERR_PARAM;
2257 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2258 status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
2260 ice_release_lock(&hw->fl_profs_locks[blk]);
2266 * ice_flow_assoc_prof - associate a VSI with a flow profile
2267 * @hw: pointer to the hardware structure
2268 * @blk: classification stage
2269 * @prof: pointer to flow profile
2270 * @vsi_handle: software VSI handle
2272 * Assumption: the caller has acquired the lock to the profile list
2273 * and the software VSI handle has been validated
2276 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
2277 struct ice_flow_prof *prof, u16 vsi_handle)
2279 enum ice_status status = ICE_SUCCESS;
2281 if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
2282 if (blk == ICE_BLK_ACL) {
2283 status = ice_flow_acl_set_xtrct_seq(hw, prof);
2287 status = ice_add_prof_id_flow(hw, blk,
2288 ice_get_hw_vsi_num(hw,
2292 ice_set_bit(vsi_handle, prof->vsis);
2294 ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n",
2302 * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
2303 * @hw: pointer to the hardware structure
2304 * @blk: classification stage
2305 * @prof: pointer to flow profile
2306 * @vsi_handle: software VSI handle
2308 * Assumption: the caller has acquired the lock to the profile list
2309 * and the software VSI handle has been validated
2311 static enum ice_status
2312 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
2313 struct ice_flow_prof *prof, u16 vsi_handle)
2315 enum ice_status status = ICE_SUCCESS;
2317 if (ice_is_bit_set(prof->vsis, vsi_handle)) {
2318 status = ice_rem_prof_id_flow(hw, blk,
2319 ice_get_hw_vsi_num(hw,
2323 ice_clear_bit(vsi_handle, prof->vsis);
2325 ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n",
2333 * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2334 * @hw: pointer to the HW struct
2335 * @blk: classification stage
2336 * @dir: flow direction
2337 * @prof_id: unique ID to identify this flow profile
2338 * @segs: array of one or more packet segments that describe the flow
2339 * @segs_cnt: number of packet segments provided
2340 * @acts: array of default actions
2341 * @acts_cnt: number of default actions
2342 * @prof: stores the returned flow profile added
2345 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2346 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2347 struct ice_flow_action *acts, u8 acts_cnt,
2348 struct ice_flow_prof **prof)
2350 enum ice_status status;
2352 if (segs_cnt > ICE_FLOW_SEG_MAX)
2353 return ICE_ERR_MAX_LIMIT;
2356 return ICE_ERR_PARAM;
2359 return ICE_ERR_BAD_PTR;
2361 status = ice_flow_val_hdrs(segs, segs_cnt);
2365 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2367 status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2368 acts, acts_cnt, prof);
2370 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2372 ice_release_lock(&hw->fl_profs_locks[blk]);
2378 * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2379 * @hw: pointer to the HW struct
2380 * @blk: the block for which the flow profile is to be removed
2381 * @prof_id: unique ID of the flow profile to be removed
2384 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2386 struct ice_flow_prof *prof;
2387 enum ice_status status;
2389 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2391 prof = ice_flow_find_prof_id(hw, blk, prof_id);
2393 status = ICE_ERR_DOES_NOT_EXIST;
2397 /* prof becomes invalid after the call */
2398 status = ice_flow_rem_prof_sync(hw, blk, prof);
2401 ice_release_lock(&hw->fl_profs_locks[blk]);
2407 * ice_flow_find_entry - look for a flow entry using its unique ID
2408 * @hw: pointer to the HW struct
2409 * @blk: classification stage
2410 * @entry_id: unique ID to identify this flow entry
2412 * This function looks for the flow entry with the specified unique ID in all
2413 * flow profiles of the specified classification stage. If the entry is found,
2414 * and it returns the handle to the flow entry. Otherwise, it returns
2415 * ICE_FLOW_ENTRY_ID_INVAL.
2417 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2419 struct ice_flow_entry *found = NULL;
2420 struct ice_flow_prof *p;
2422 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2424 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2425 struct ice_flow_entry *e;
2427 ice_acquire_lock(&p->entries_lock);
2428 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2429 if (e->id == entry_id) {
2433 ice_release_lock(&p->entries_lock);
2439 ice_release_lock(&hw->fl_profs_locks[blk]);
2441 return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2445 * ice_flow_acl_check_actions - Checks the ACL rule's actions
2446 * @hw: pointer to the hardware structure
2447 * @acts: array of actions to be performed on a match
2448 * @acts_cnt: number of actions
2449 * @cnt_alloc: indicates if an ACL counter has been allocated.
2451 static enum ice_status
2452 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2453 u8 acts_cnt, bool *cnt_alloc)
2455 ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2458 ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2461 if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2462 return ICE_ERR_OUT_OF_RANGE;
2464 for (i = 0; i < acts_cnt; i++) {
2465 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2466 acts[i].type != ICE_FLOW_ACT_DROP &&
2467 acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2468 acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2471 /* If the caller want to add two actions of the same type, then
2472 * it is considered invalid configuration.
2474 if (ice_test_and_set_bit(acts[i].type, dup_check))
2475 return ICE_ERR_PARAM;
2478 /* Checks if ACL counters are needed. */
2479 for (i = 0; i < acts_cnt; i++) {
2480 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2481 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2482 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2483 struct ice_acl_cntrs cntrs = { 0 };
2484 enum ice_status status;
2487 cntrs.bank = 0; /* Only bank0 for the moment */
2489 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2490 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2492 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2494 status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2497 /* Counter index within the bank */
2498 acts[i].data.acl_act.value =
2499 CPU_TO_LE16(cntrs.first_cntr);
2508 * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2509 * @fld: number of the given field
2510 * @info: info about field
2511 * @range_buf: range checker configuration buffer
2512 * @data: pointer to a data buffer containing flow entry's match values/masks
2513 * @range: Input/output param indicating which range checkers are being used
2516 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2517 struct ice_aqc_acl_profile_ranges *range_buf,
2518 u8 *data, u8 *range)
2522 /* If not specified, default mask is all bits in field */
2523 new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2524 BIT(ice_flds_info[fld].size) - 1 :
2525 (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2527 /* If the mask is 0, then we don't need to worry about this input
2528 * range checker value.
2532 (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2534 (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2535 u8 range_idx = info->entry.val;
2537 range_buf->checker_cfg[range_idx].low_boundary =
2538 CPU_TO_BE16(new_low);
2539 range_buf->checker_cfg[range_idx].high_boundary =
2540 CPU_TO_BE16(new_high);
2541 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2543 /* Indicate which range checker is being used */
2544 *range |= BIT(range_idx);
2549 * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2550 * @fld: number of the given field
2551 * @info: info about the field
2552 * @buf: buffer containing the entry
2553 * @dontcare: buffer containing don't care mask for entry
2554 * @data: pointer to a data buffer containing flow entry's match values/masks
2557 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2558 u8 *dontcare, u8 *data)
2560 u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2561 bool use_mask = false;
2564 src = info->src.val;
2565 mask = info->src.mask;
2566 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2567 disp = info->xtrct.disp % BITS_PER_BYTE;
2569 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2572 for (k = 0; k < info->entry.last; k++, dst++) {
2573 /* Add overflow bits from previous byte */
2574 buf[dst] = (tmp_s & 0xff00) >> 8;
2576 /* If mask is not valid, tmp_m is always zero, so just setting
2577 * dontcare to 0 (no masked bits). If mask is valid, pulls in
2578 * overflow bits of mask from prev byte
2580 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2582 /* If there is displacement, last byte will only contain
2583 * displaced data, but there is no more data to read from user
2584 * buffer, so skip so as not to potentially read beyond end of
2587 if (!disp || k < info->entry.last - 1) {
2588 /* Store shifted data to use in next byte */
2589 tmp_s = data[src++] << disp;
2591 /* Add current (shifted) byte */
2592 buf[dst] |= tmp_s & 0xff;
2594 /* Handle mask if valid */
2596 tmp_m = (~data[mask++] & 0xff) << disp;
2597 dontcare[dst] |= tmp_m & 0xff;
2602 /* Fill in don't care bits at beginning of field */
2604 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2605 for (k = 0; k < disp; k++)
2606 dontcare[dst] |= BIT(k);
2609 end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2611 /* Fill in don't care bits at end of field */
2613 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2614 info->entry.last - 1;
2615 for (k = end_disp; k < BITS_PER_BYTE; k++)
2616 dontcare[dst] |= BIT(k);
2621 * ice_flow_acl_frmt_entry - Format ACL entry
2622 * @hw: pointer to the hardware structure
2623 * @prof: pointer to flow profile
2624 * @e: pointer to the flow entry
2625 * @data: pointer to a data buffer containing flow entry's match values/masks
2626 * @acts: array of actions to be performed on a match
2627 * @acts_cnt: number of actions
2629 * Formats the key (and key_inverse) to be matched from the data passed in,
2630 * along with data from the flow profile. This key/key_inverse pair makes up
2631 * the 'entry' for an ACL flow entry.
2633 static enum ice_status
2634 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2635 struct ice_flow_entry *e, u8 *data,
2636 struct ice_flow_action *acts, u8 acts_cnt)
2638 u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2639 struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2640 enum ice_status status;
2645 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2649 /* Format the result action */
2651 status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2655 status = ICE_ERR_NO_MEMORY;
2657 e->acts = (struct ice_flow_action *)
2658 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2659 ICE_NONDMA_TO_NONDMA);
2663 e->acts_cnt = acts_cnt;
2665 /* Format the matching data */
2666 buf_sz = prof->cfg.scen->width;
2667 buf = (u8 *)ice_malloc(hw, buf_sz);
2671 dontcare = (u8 *)ice_malloc(hw, buf_sz);
2675 /* 'key' buffer will store both key and key_inverse, so must be twice
2678 key = (u8 *)ice_malloc(hw, buf_sz * 2);
2682 range_buf = (struct ice_aqc_acl_profile_ranges *)
2683 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2687 /* Set don't care mask to all 1's to start, will zero out used bytes */
2688 ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2690 for (i = 0; i < prof->segs_cnt; i++) {
2691 struct ice_flow_seg_info *seg = &prof->segs[i];
2694 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2695 ICE_FLOW_FIELD_IDX_MAX) {
2696 struct ice_flow_fld_info *info = &seg->fields[j];
2698 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2699 ice_flow_acl_frmt_entry_range(j, info,
2703 ice_flow_acl_frmt_entry_fld(j, info, buf,
2707 for (j = 0; j < seg->raws_cnt; j++) {
2708 struct ice_flow_fld_info *info = &seg->raws[j].info;
2709 u16 dst, src, mask, k;
2710 bool use_mask = false;
2712 src = info->src.val;
2713 dst = info->entry.val -
2714 ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2715 mask = info->src.mask;
2717 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2720 for (k = 0; k < info->entry.last; k++, dst++) {
2721 buf[dst] = data[src++];
2723 dontcare[dst] = ~data[mask++];
2730 buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2731 dontcare[prof->cfg.scen->pid_idx] = 0;
2733 /* Format the buffer for direction flags */
2734 dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2736 if (prof->dir == ICE_FLOW_RX)
2737 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2740 buf[prof->cfg.scen->rng_chk_idx] = range;
2741 /* Mark any unused range checkers as don't care */
2742 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2743 e->range_buf = range_buf;
2745 ice_free(hw, range_buf);
2748 status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2754 e->entry_sz = buf_sz * 2;
2761 ice_free(hw, dontcare);
2766 if (status && range_buf) {
2767 ice_free(hw, range_buf);
2768 e->range_buf = NULL;
2771 if (status && e->acts) {
2772 ice_free(hw, e->acts);
2777 if (status && cnt_alloc)
2778 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2784 * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2785 * the compared data.
2786 * @prof: pointer to flow profile
2787 * @e: pointer to the comparing flow entry
2788 * @do_chg_action: decide if we want to change the ACL action
2789 * @do_add_entry: decide if we want to add the new ACL entry
2790 * @do_rem_entry: decide if we want to remove the current ACL entry
2792 * Find an ACL scenario entry that matches the compared data. In the same time,
2793 * this function also figure out:
2794 * a/ If we want to change the ACL action
2795 * b/ If we want to add the new ACL entry
2796 * c/ If we want to remove the current ACL entry
2798 static struct ice_flow_entry *
2799 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2800 struct ice_flow_entry *e, bool *do_chg_action,
2801 bool *do_add_entry, bool *do_rem_entry)
2803 struct ice_flow_entry *p, *return_entry = NULL;
2807 * a/ There exists an entry with same matching data, but different
2808 * priority, then we remove this existing ACL entry. Then, we
2809 * will add the new entry to the ACL scenario.
2810 * b/ There exists an entry with same matching data, priority, and
2811 * result action, then we do nothing
2812 * c/ There exists an entry with same matching data, priority, but
2813 * different, action, then do only change the action's entry.
2814 * d/ Else, we add this new entry to the ACL scenario.
2816 *do_chg_action = false;
2817 *do_add_entry = true;
2818 *do_rem_entry = false;
2819 LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2820 if (memcmp(p->entry, e->entry, p->entry_sz))
2823 /* From this point, we have the same matching_data. */
2824 *do_add_entry = false;
2827 if (p->priority != e->priority) {
2828 /* matching data && !priority */
2829 *do_add_entry = true;
2830 *do_rem_entry = true;
2834 /* From this point, we will have matching_data && priority */
2835 if (p->acts_cnt != e->acts_cnt)
2836 *do_chg_action = true;
2837 for (i = 0; i < p->acts_cnt; i++) {
2838 bool found_not_match = false;
2840 for (j = 0; j < e->acts_cnt; j++)
2841 if (memcmp(&p->acts[i], &e->acts[j],
2842 sizeof(struct ice_flow_action))) {
2843 found_not_match = true;
2847 if (found_not_match) {
2848 *do_chg_action = true;
2853 /* (do_chg_action = true) means :
2854 * matching_data && priority && !result_action
2855 * (do_chg_action = false) means :
2856 * matching_data && priority && result_action
2861 return return_entry;
2865 * ice_flow_acl_convert_to_acl_prio - Convert to ACL priority
2868 static enum ice_acl_entry_prio
2869 ice_flow_acl_convert_to_acl_prio(enum ice_flow_priority p)
2871 enum ice_acl_entry_prio acl_prio;
2874 case ICE_FLOW_PRIO_LOW:
2875 acl_prio = ICE_ACL_PRIO_LOW;
2877 case ICE_FLOW_PRIO_NORMAL:
2878 acl_prio = ICE_ACL_PRIO_NORMAL;
2880 case ICE_FLOW_PRIO_HIGH:
2881 acl_prio = ICE_ACL_PRIO_HIGH;
2884 acl_prio = ICE_ACL_PRIO_NORMAL;
2892 * ice_flow_acl_union_rng_chk - Perform union operation between two
2893 * range-range checker buffers
2894 * @dst_buf: pointer to destination range checker buffer
2895 * @src_buf: pointer to source range checker buffer
2897 * For this function, we do the union between dst_buf and src_buf
2898 * range checker buffer, and we will save the result back to dst_buf
2900 static enum ice_status
2901 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2902 struct ice_aqc_acl_profile_ranges *src_buf)
2906 if (!dst_buf || !src_buf)
2907 return ICE_ERR_BAD_PTR;
2909 for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2910 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2911 bool will_populate = false;
2913 in_data = &src_buf->checker_cfg[i];
2918 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2919 cfg_data = &dst_buf->checker_cfg[j];
2921 if (!cfg_data->mask ||
2922 !memcmp(cfg_data, in_data,
2923 sizeof(struct ice_acl_rng_data))) {
2924 will_populate = true;
2929 if (will_populate) {
2930 ice_memcpy(cfg_data, in_data,
2931 sizeof(struct ice_acl_rng_data),
2932 ICE_NONDMA_TO_NONDMA);
2934 /* No available slot left to program range checker */
2935 return ICE_ERR_MAX_LIMIT;
2943 * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2944 * @hw: pointer to the hardware structure
2945 * @prof: pointer to flow profile
2946 * @entry: double pointer to the flow entry
2948 * For this function, we will look at the current added entries in the
2949 * corresponding ACL scenario. Then, we will perform matching logic to
2950 * see if we want to add/modify/do nothing with this new entry.
2952 static enum ice_status
2953 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2954 struct ice_flow_entry **entry)
2956 bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2957 struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2958 struct ice_acl_act_entry *acts = NULL;
2959 struct ice_flow_entry *exist;
2960 enum ice_status status = ICE_SUCCESS;
2961 struct ice_flow_entry *e;
2964 if (!entry || !(*entry) || !prof)
2965 return ICE_ERR_BAD_PTR;
2969 do_chg_rng_chk = false;
2973 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2978 /* Query the current range-checker value in FW */
2979 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2983 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2984 sizeof(struct ice_aqc_acl_profile_ranges),
2985 ICE_NONDMA_TO_NONDMA);
2987 /* Generate the new range-checker value */
2988 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2992 /* Reconfigure the range check if the buffer is changed. */
2993 do_chg_rng_chk = false;
2994 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2995 sizeof(struct ice_aqc_acl_profile_ranges))) {
2996 status = ice_prog_acl_prof_ranges(hw, prof_id,
2997 &cfg_rng_buf, NULL);
3001 do_chg_rng_chk = true;
3005 /* Figure out if we want to (change the ACL action) and/or
3006 * (Add the new ACL entry) and/or (Remove the current ACL entry)
3008 exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
3009 &do_add_entry, &do_rem_entry);
3011 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
3016 /* Prepare the result action buffer */
3017 acts = (struct ice_acl_act_entry *)
3018 ice_calloc(hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
3020 return ICE_ERR_NO_MEMORY;
3022 for (i = 0; i < e->acts_cnt; i++)
3023 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
3024 sizeof(struct ice_acl_act_entry),
3025 ICE_NONDMA_TO_NONDMA);
3028 enum ice_acl_entry_prio prio;
3032 keys = (u8 *)e->entry;
3033 inverts = keys + (e->entry_sz / 2);
3034 prio = ice_flow_acl_convert_to_acl_prio(e->priority);
3036 status = ice_acl_add_entry(hw, prof->cfg.scen, prio, keys,
3037 inverts, acts, e->acts_cnt,
3042 e->scen_entry_idx = entry_idx;
3043 LIST_ADD(&e->l_entry, &prof->entries);
3045 if (do_chg_action) {
3046 /* For the action memory info, update the SW's copy of
3047 * exist entry with e's action memory info
3049 ice_free(hw, exist->acts);
3050 exist->acts_cnt = e->acts_cnt;
3051 exist->acts = (struct ice_flow_action *)
3052 ice_calloc(hw, exist->acts_cnt,
3053 sizeof(struct ice_flow_action));
3055 status = ICE_ERR_NO_MEMORY;
3059 ice_memcpy(exist->acts, e->acts,
3060 sizeof(struct ice_flow_action) * e->acts_cnt,
3061 ICE_NONDMA_TO_NONDMA);
3063 status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
3065 exist->scen_entry_idx);
3070 if (do_chg_rng_chk) {
3071 /* In this case, we want to update the range checker
3072 * information of the exist entry
3074 status = ice_flow_acl_union_rng_chk(exist->range_buf,
3080 /* As we don't add the new entry to our SW DB, deallocate its
3081 * memories, and return the exist entry to the caller
3083 ice_dealloc_flow_entry(hw, e);
3093 * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
3094 * @hw: pointer to the hardware structure
3095 * @prof: pointer to flow profile
3096 * @e: double pointer to the flow entry
3098 static enum ice_status
3099 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
3100 struct ice_flow_entry **e)
3102 enum ice_status status;
3104 ice_acquire_lock(&prof->entries_lock);
3105 status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
3106 ice_release_lock(&prof->entries_lock);
3112 * ice_flow_add_entry - Add a flow entry
3113 * @hw: pointer to the HW struct
3114 * @blk: classification stage
3115 * @prof_id: ID of the profile to add a new flow entry to
3116 * @entry_id: unique ID to identify this flow entry
3117 * @vsi_handle: software VSI handle for the flow entry
3118 * @prio: priority of the flow entry
3119 * @data: pointer to a data buffer containing flow entry's match values/masks
3120 * @acts: arrays of actions to be performed on a match
3121 * @acts_cnt: number of actions
3122 * @entry_h: pointer to buffer that receives the new flow entry's handle
3125 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
3126 u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
3127 void *data, struct ice_flow_action *acts, u8 acts_cnt,
3130 struct ice_flow_entry *e = NULL;
3131 struct ice_flow_prof *prof;
3132 enum ice_status status = ICE_SUCCESS;
3134 /* ACL entries must indicate an action */
3135 if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
3136 return ICE_ERR_PARAM;
3138 /* No flow entry data is expected for RSS */
3139 if (!entry_h || (!data && blk != ICE_BLK_RSS))
3140 return ICE_ERR_BAD_PTR;
3142 if (!ice_is_vsi_valid(hw, vsi_handle))
3143 return ICE_ERR_PARAM;
3145 ice_acquire_lock(&hw->fl_profs_locks[blk]);
3147 prof = ice_flow_find_prof_id(hw, blk, prof_id);
3149 status = ICE_ERR_DOES_NOT_EXIST;
3151 /* Allocate memory for the entry being added and associate
3152 * the VSI to the found flow profile
3154 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
3156 status = ICE_ERR_NO_MEMORY;
3158 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3161 ice_release_lock(&hw->fl_profs_locks[blk]);
3166 e->vsi_handle = vsi_handle;
3175 /* ACL will handle the entry management */
3176 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
3181 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
3187 status = ICE_ERR_NOT_IMPL;
3191 if (blk != ICE_BLK_ACL) {
3192 /* ACL will handle the entry management */
3193 ice_acquire_lock(&prof->entries_lock);
3194 LIST_ADD(&e->l_entry, &prof->entries);
3195 ice_release_lock(&prof->entries_lock);
3198 *entry_h = ICE_FLOW_ENTRY_HNDL(e);
3203 ice_free(hw, e->entry);
3211 * ice_flow_rem_entry - Remove a flow entry
3212 * @hw: pointer to the HW struct
3213 * @blk: classification stage
3214 * @entry_h: handle to the flow entry to be removed
3216 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
3219 struct ice_flow_entry *entry;
3220 struct ice_flow_prof *prof;
3221 enum ice_status status = ICE_SUCCESS;
3223 if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
3224 return ICE_ERR_PARAM;
3226 entry = ICE_FLOW_ENTRY_PTR((intptr_t)entry_h);
3228 /* Retain the pointer to the flow profile as the entry will be freed */
3232 ice_acquire_lock(&prof->entries_lock);
3233 status = ice_flow_rem_entry_sync(hw, blk, entry);
3234 ice_release_lock(&prof->entries_lock);
3241 * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
3242 * @seg: packet segment the field being set belongs to
3243 * @fld: field to be set
3244 * @field_type: type of the field
3245 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3246 * entry's input buffer
3247 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3249 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3250 * entry's input buffer
3252 * This helper function stores information of a field being matched, including
3253 * the type of the field and the locations of the value to match, the mask, and
3254 * the upper-bound value in the start of the input buffer for a flow entry.
3255 * This function should only be used for fixed-size data structures.
3257 * This function also opportunistically determines the protocol headers to be
3258 * present based on the fields being set. Some fields cannot be used alone to
3259 * determine the protocol headers present. Sometimes, fields for particular
3260 * protocol headers are not matched. In those cases, the protocol headers
3261 * must be explicitly set.
3264 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3265 enum ice_flow_fld_match_type field_type, u16 val_loc,
3266 u16 mask_loc, u16 last_loc)
3268 u64 bit = BIT_ULL(fld);
3271 if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
3274 seg->fields[fld].type = field_type;
3275 seg->fields[fld].src.val = val_loc;
3276 seg->fields[fld].src.mask = mask_loc;
3277 seg->fields[fld].src.last = last_loc;
3279 ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
3283 * ice_flow_set_fld - specifies locations of field from entry's input buffer
3284 * @seg: packet segment the field being set belongs to
3285 * @fld: field to be set
3286 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3287 * entry's input buffer
3288 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3290 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3291 * entry's input buffer
3292 * @range: indicate if field being matched is to be in a range
3294 * This function specifies the locations, in the form of byte offsets from the
3295 * start of the input buffer for a flow entry, from where the value to match,
3296 * the mask value, and upper value can be extracted. These locations are then
3297 * stored in the flow profile. When adding a flow entry associated with the
3298 * flow profile, these locations will be used to quickly extract the values and
3299 * create the content of a match entry. This function should only be used for
3300 * fixed-size data structures.
3303 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3304 u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3306 enum ice_flow_fld_match_type t = range ?
3307 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3309 ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3313 * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3314 * @seg: packet segment the field being set belongs to
3315 * @fld: field to be set
3316 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3317 * entry's input buffer
3318 * @pref_loc: location of prefix value from entry's input buffer
3319 * @pref_sz: size of the location holding the prefix value
3321 * This function specifies the locations, in the form of byte offsets from the
3322 * start of the input buffer for a flow entry, from where the value to match
3323 * and the IPv4 prefix value can be extracted. These locations are then stored
3324 * in the flow profile. When adding flow entries to the associated flow profile,
3325 * these locations can be used to quickly extract the values to create the
3326 * content of a match entry. This function should only be used for fixed-size
3330 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3331 u16 val_loc, u16 pref_loc, u8 pref_sz)
3333 /* For this type of field, the "mask" location is for the prefix value's
3334 * location and the "last" location is for the size of the location of
3337 ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3338 pref_loc, (u16)pref_sz);
3342 * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3343 * @seg: packet segment the field being set belongs to
3344 * @off: offset of the raw field from the beginning of the segment in bytes
3345 * @len: length of the raw pattern to be matched
3346 * @val_loc: location of the value to match from entry's input buffer
3347 * @mask_loc: location of mask value from entry's input buffer
3349 * This function specifies the offset of the raw field to be match from the
3350 * beginning of the specified packet segment, and the locations, in the form of
3351 * byte offsets from the start of the input buffer for a flow entry, from where
3352 * the value to match and the mask value to be extracted. These locations are
3353 * then stored in the flow profile. When adding flow entries to the associated
3354 * flow profile, these locations can be used to quickly extract the values to
3355 * create the content of a match entry. This function should only be used for
3356 * fixed-size data structures.
3359 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3360 u16 val_loc, u16 mask_loc)
3362 if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3363 seg->raws[seg->raws_cnt].off = off;
3364 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3365 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3366 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3367 /* The "last" field is used to store the length of the field */
3368 seg->raws[seg->raws_cnt].info.src.last = len;
3371 /* Overflows of "raws" will be handled as an error condition later in
3372 * the flow when this information is processed.
3378 * ice_flow_rem_vsi_prof - remove vsi from flow profile
3379 * @hw: pointer to the hardware structure
3380 * @blk: classification stage
3381 * @vsi_handle: software VSI handle
3382 * @prof_id: unique ID to identify this flow profile
3384 * This function removes the flow entries associated to the input
3385 * vsi handle and disassociates the vsi from the flow profile.
3387 enum ice_status ice_flow_rem_vsi_prof(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
3390 struct ice_flow_prof *prof = NULL;
3391 enum ice_status status = ICE_SUCCESS;
3393 if (blk >= ICE_BLK_COUNT || !ice_is_vsi_valid(hw, vsi_handle))
3394 return ICE_ERR_PARAM;
3396 /* find flow profile pointer with input package block and profile id */
3397 prof = ice_flow_find_prof_id(hw, ICE_BLK_FD, prof_id);
3399 ice_debug(hw, ICE_DBG_PKG,
3400 "Cannot find flow profile id=%" PRIu64 "\n", prof_id);
3401 return ICE_ERR_DOES_NOT_EXIST;
3404 /* Remove all remaining flow entries before removing the flow profile */
3405 if (!LIST_EMPTY(&prof->entries)) {
3406 struct ice_flow_entry *e, *t;
3408 ice_acquire_lock(&prof->entries_lock);
3409 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
3411 if (e->vsi_handle != vsi_handle)
3414 status = ice_flow_rem_entry_sync(hw, blk, e);
3418 ice_release_lock(&prof->entries_lock);
3423 /* disassociate the flow profile from sw vsi handle */
3424 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3426 ice_debug(hw, ICE_DBG_PKG,
3427 "ice_flow_disassoc_prof() failed with status=%d\n",
3432 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3433 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
3435 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3436 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3438 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3439 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
3441 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3442 (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3443 ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3444 ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3447 * ice_flow_set_rss_seg_info - setup packet segments for RSS
3448 * @segs: pointer to the flow field segment(s)
3449 * @seg_cnt: segment count
3450 * @cfg: configure parameters
3452 * Helper function to extract fields from hash bitmap and use flow
3453 * header value to set flow field segment for further use in flow
3454 * profile entry or removal.
3456 static enum ice_status
3457 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
3458 const struct ice_rss_hash_cfg *cfg)
3460 struct ice_flow_seg_info *seg;
3464 /* set inner most segment */
3465 seg = &segs[seg_cnt - 1];
3467 ice_for_each_set_bit(i, (const ice_bitmap_t *)&cfg->hash_flds,
3468 ICE_FLOW_FIELD_IDX_MAX)
3469 ice_flow_set_fld(seg, (enum ice_flow_field)i,
3470 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
3471 ICE_FLOW_FLD_OFF_INVAL, false);
3473 ICE_FLOW_SET_HDRS(seg, cfg->addl_hdrs);
3475 /* set outer most header */
3476 if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4)
3477 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
3478 ICE_FLOW_SEG_HDR_IPV_FRAG |
3479 ICE_FLOW_SEG_HDR_IPV_OTHER;
3480 else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6)
3481 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
3482 ICE_FLOW_SEG_HDR_IPV_FRAG |
3483 ICE_FLOW_SEG_HDR_IPV_OTHER;
3485 if (seg->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3486 ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER &
3487 ~ICE_FLOW_SEG_HDR_IPV_FRAG)
3488 return ICE_ERR_PARAM;
3490 val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3491 if (val && !ice_is_pow2(val))
3494 val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3495 if (val && !ice_is_pow2(val))
3502 * ice_rem_vsi_rss_list - remove VSI from RSS list
3503 * @hw: pointer to the hardware structure
3504 * @vsi_handle: software VSI handle
3506 * Remove the VSI from all RSS configurations in the list.
3508 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3510 struct ice_rss_cfg *r, *tmp;
3512 if (LIST_EMPTY(&hw->rss_list_head))
3515 ice_acquire_lock(&hw->rss_locks);
3516 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3517 ice_rss_cfg, l_entry)
3518 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3519 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3520 LIST_DEL(&r->l_entry);
3523 ice_release_lock(&hw->rss_locks);
3527 * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3528 * @hw: pointer to the hardware structure
3529 * @vsi_handle: software VSI handle
3531 * This function will iterate through all flow profiles and disassociate
3532 * the VSI from that profile. If the flow profile has no VSIs it will
3535 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3537 const enum ice_block blk = ICE_BLK_RSS;
3538 struct ice_flow_prof *p, *t;
3539 enum ice_status status = ICE_SUCCESS;
3541 if (!ice_is_vsi_valid(hw, vsi_handle))
3542 return ICE_ERR_PARAM;
3544 if (LIST_EMPTY(&hw->fl_profs[blk]))
3547 ice_acquire_lock(&hw->rss_locks);
3548 LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3550 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3551 status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3555 if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3556 status = ice_flow_rem_prof(hw, blk, p->id);
3561 ice_release_lock(&hw->rss_locks);
3567 * ice_get_rss_hdr_type - get a RSS profile's header type
3568 * @prof: RSS flow profile
3570 static enum ice_rss_cfg_hdr_type
3571 ice_get_rss_hdr_type(struct ice_flow_prof *prof)
3573 enum ice_rss_cfg_hdr_type hdr_type = ICE_RSS_ANY_HEADERS;
3575 if (prof->segs_cnt == ICE_FLOW_SEG_SINGLE) {
3576 hdr_type = ICE_RSS_OUTER_HEADERS;
3577 } else if (prof->segs_cnt == ICE_FLOW_SEG_MAX) {
3578 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs == ICE_FLOW_SEG_HDR_NONE)
3579 hdr_type = ICE_RSS_INNER_HEADERS;
3580 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV4)
3581 hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV4;
3582 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV6)
3583 hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV6;
3590 * ice_rem_rss_list - remove RSS configuration from list
3591 * @hw: pointer to the hardware structure
3592 * @vsi_handle: software VSI handle
3593 * @prof: pointer to flow profile
3595 * Assumption: lock has already been acquired for RSS list
3598 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3600 enum ice_rss_cfg_hdr_type hdr_type;
3601 struct ice_rss_cfg *r, *tmp;
3603 /* Search for RSS hash fields associated to the VSI that match the
3604 * hash configurations associated to the flow profile. If found
3605 * remove from the RSS entry list of the VSI context and delete entry.
3607 hdr_type = ice_get_rss_hdr_type(prof);
3608 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3609 ice_rss_cfg, l_entry)
3610 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3611 r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3612 r->hash.hdr_type == hdr_type) {
3613 ice_clear_bit(vsi_handle, r->vsis);
3614 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3615 LIST_DEL(&r->l_entry);
3623 * ice_add_rss_list - add RSS configuration to list
3624 * @hw: pointer to the hardware structure
3625 * @vsi_handle: software VSI handle
3626 * @prof: pointer to flow profile
3628 * Assumption: lock has already been acquired for RSS list
3630 static enum ice_status
3631 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3633 enum ice_rss_cfg_hdr_type hdr_type;
3634 struct ice_rss_cfg *r, *rss_cfg;
3636 hdr_type = ice_get_rss_hdr_type(prof);
3637 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3638 ice_rss_cfg, l_entry)
3639 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3640 r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3641 r->hash.hdr_type == hdr_type) {
3642 ice_set_bit(vsi_handle, r->vsis);
3646 rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3648 return ICE_ERR_NO_MEMORY;
3650 rss_cfg->hash.hash_flds = prof->segs[prof->segs_cnt - 1].match;
3651 rss_cfg->hash.addl_hdrs = prof->segs[prof->segs_cnt - 1].hdrs;
3652 rss_cfg->hash.hdr_type = hdr_type;
3653 rss_cfg->hash.symm = prof->cfg.symm;
3654 ice_set_bit(vsi_handle, rss_cfg->vsis);
3656 LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3661 #define ICE_FLOW_PROF_HASH_S 0
3662 #define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3663 #define ICE_FLOW_PROF_HDR_S 32
3664 #define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3665 #define ICE_FLOW_PROF_ENCAP_S 62
3666 #define ICE_FLOW_PROF_ENCAP_M (0x3ULL << ICE_FLOW_PROF_ENCAP_S)
3668 /* Flow profile ID format:
3669 * [0:31] - Packet match fields
3670 * [32:61] - Protocol header
3671 * [62:63] - Encapsulation flag:
3674 * 2 for tunneled with outer ipv4
3675 * 3 for tunneled with outer ipv6
3677 #define ICE_FLOW_GEN_PROFID(hash, hdr, encap) \
3678 ((u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3679 (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3680 (((u64)(encap) << ICE_FLOW_PROF_ENCAP_S) & ICE_FLOW_PROF_ENCAP_M)))
3683 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3685 u32 s = ((src % 4) << 3); /* byte shift */
3686 u32 v = dst | 0x80; /* value to program */
3687 u8 i = src / 4; /* register index */
3690 reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3691 reg = (reg & ~(0xff << s)) | (v << s);
3692 wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3696 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3699 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3702 for (i = 0; i < len; i++) {
3703 ice_rss_config_xor_word(hw, prof_id,
3704 /* Yes, field vector in GLQF_HSYMM and
3705 * GLQF_HINSET is inversed!
3707 fv_last_word - (src + i),
3708 fv_last_word - (dst + i));
3709 ice_rss_config_xor_word(hw, prof_id,
3710 fv_last_word - (dst + i),
3711 fv_last_word - (src + i));
3716 ice_rss_update_symm(struct ice_hw *hw,
3717 struct ice_flow_prof *prof)
3719 struct ice_prof_map *map;
3722 ice_acquire_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3723 map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3725 prof_id = map->prof_id;
3726 ice_release_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3729 /* clear to default */
3730 for (m = 0; m < 6; m++)
3731 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3732 if (prof->cfg.symm) {
3733 struct ice_flow_seg_info *seg =
3734 &prof->segs[prof->segs_cnt - 1];
3736 struct ice_flow_seg_xtrct *ipv4_src =
3737 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3738 struct ice_flow_seg_xtrct *ipv4_dst =
3739 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3740 struct ice_flow_seg_xtrct *ipv6_src =
3741 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3742 struct ice_flow_seg_xtrct *ipv6_dst =
3743 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3745 struct ice_flow_seg_xtrct *tcp_src =
3746 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3747 struct ice_flow_seg_xtrct *tcp_dst =
3748 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3750 struct ice_flow_seg_xtrct *udp_src =
3751 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3752 struct ice_flow_seg_xtrct *udp_dst =
3753 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3755 struct ice_flow_seg_xtrct *sctp_src =
3756 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3757 struct ice_flow_seg_xtrct *sctp_dst =
3758 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3761 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3762 ice_rss_config_xor(hw, prof_id,
3763 ipv4_src->idx, ipv4_dst->idx, 2);
3766 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3767 ice_rss_config_xor(hw, prof_id,
3768 ipv6_src->idx, ipv6_dst->idx, 8);
3771 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3772 ice_rss_config_xor(hw, prof_id,
3773 tcp_src->idx, tcp_dst->idx, 1);
3776 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3777 ice_rss_config_xor(hw, prof_id,
3778 udp_src->idx, udp_dst->idx, 1);
3781 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3782 ice_rss_config_xor(hw, prof_id,
3783 sctp_src->idx, sctp_dst->idx, 1);
3788 * ice_add_rss_cfg_sync - add an RSS configuration
3789 * @hw: pointer to the hardware structure
3790 * @vsi_handle: software VSI handle
3791 * @cfg: configure parameters
3793 * Assumption: lock has already been acquired for RSS list
3795 static enum ice_status
3796 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
3797 const struct ice_rss_hash_cfg *cfg)
3799 const enum ice_block blk = ICE_BLK_RSS;
3800 struct ice_flow_prof *prof = NULL;
3801 struct ice_flow_seg_info *segs;
3802 enum ice_status status;
3805 segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
3806 ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
3808 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3811 return ICE_ERR_NO_MEMORY;
3813 /* Construct the packet segment info from the hashed fields */
3814 status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
3818 /* Search for a flow profile that has matching headers, hash fields
3819 * and has the input VSI associated to it. If found, no further
3820 * operations required and exit.
3822 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3824 ICE_FLOW_FIND_PROF_CHK_FLDS |
3825 ICE_FLOW_FIND_PROF_CHK_VSI);
3827 if (prof->cfg.symm == cfg->symm)
3829 prof->cfg.symm = cfg->symm;
3833 /* Check if a flow profile exists with the same protocol headers and
3834 * associated with the input VSI. If so disassociate the VSI from
3835 * this profile. The VSI will be added to a new profile created with
3836 * the protocol header and new hash field configuration.
3838 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3839 vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3841 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3843 ice_rem_rss_list(hw, vsi_handle, prof);
3847 /* Remove profile if it has no VSIs associated */
3848 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3849 status = ice_flow_rem_prof(hw, blk, prof->id);
3855 /* Search for a profile that has same match fields only. If this
3856 * exists then associate the VSI to this profile.
3858 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3860 ICE_FLOW_FIND_PROF_CHK_FLDS);
3862 if (prof->cfg.symm == cfg->symm) {
3863 status = ice_flow_assoc_prof(hw, blk, prof,
3866 status = ice_add_rss_list(hw, vsi_handle,
3869 /* if a profile exist but with different symmetric
3870 * requirement, just return error.
3872 status = ICE_ERR_NOT_SUPPORTED;
3877 /* Create a new flow profile with generated profile and packet
3878 * segment information.
3880 status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3881 ICE_FLOW_GEN_PROFID(cfg->hash_flds,
3882 segs[segs_cnt - 1].hdrs,
3884 segs, segs_cnt, NULL, 0, &prof);
3888 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3889 /* If association to a new flow profile failed then this profile can
3893 ice_flow_rem_prof(hw, blk, prof->id);
3897 status = ice_add_rss_list(hw, vsi_handle, prof);
3899 prof->cfg.symm = cfg->symm;
3901 ice_rss_update_symm(hw, prof);
3909 * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3910 * @hw: pointer to the hardware structure
3911 * @vsi_handle: software VSI handle
3912 * @cfg: configure parameters
3914 * This function will generate a flow profile based on fields associated with
3915 * the input fields to hash on, the flow type and use the VSI number to add
3916 * a flow entry to the profile.
3919 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
3920 const struct ice_rss_hash_cfg *cfg)
3922 struct ice_rss_hash_cfg local_cfg;
3923 enum ice_status status;
3925 if (!ice_is_vsi_valid(hw, vsi_handle) ||
3926 !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
3927 cfg->hash_flds == ICE_HASH_INVALID)
3928 return ICE_ERR_PARAM;
3931 if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
3932 ice_acquire_lock(&hw->rss_locks);
3933 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3934 ice_release_lock(&hw->rss_locks);
3936 ice_acquire_lock(&hw->rss_locks);
3937 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
3938 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3940 local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
3941 status = ice_add_rss_cfg_sync(hw, vsi_handle,
3944 ice_release_lock(&hw->rss_locks);
3951 * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3952 * @hw: pointer to the hardware structure
3953 * @vsi_handle: software VSI handle
3954 * @cfg: configure parameters
3956 * Assumption: lock has already been acquired for RSS list
3958 static enum ice_status
3959 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
3960 const struct ice_rss_hash_cfg *cfg)
3962 const enum ice_block blk = ICE_BLK_RSS;
3963 struct ice_flow_seg_info *segs;
3964 struct ice_flow_prof *prof;
3965 enum ice_status status;
3968 segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
3969 ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
3970 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3973 return ICE_ERR_NO_MEMORY;
3975 /* Construct the packet segment info from the hashed fields */
3976 status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
3980 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3982 ICE_FLOW_FIND_PROF_CHK_FLDS);
3984 status = ICE_ERR_DOES_NOT_EXIST;
3988 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3992 /* Remove RSS configuration from VSI context before deleting
3995 ice_rem_rss_list(hw, vsi_handle, prof);
3997 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
3998 status = ice_flow_rem_prof(hw, blk, prof->id);
4006 * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
4007 * @hw: pointer to the hardware structure
4008 * @vsi_handle: software VSI handle
4009 * @cfg: configure parameters
4011 * This function will lookup the flow profile based on the input
4012 * hash field bitmap, iterate through the profile entry list of
4013 * that profile and find entry associated with input VSI to be
4014 * removed. Calls are made to underlying flow apis which will in
4015 * turn build or update buffers for RSS XLT1 section.
4018 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
4019 const struct ice_rss_hash_cfg *cfg)
4021 struct ice_rss_hash_cfg local_cfg;
4022 enum ice_status status;
4024 if (!ice_is_vsi_valid(hw, vsi_handle) ||
4025 !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
4026 cfg->hash_flds == ICE_HASH_INVALID)
4027 return ICE_ERR_PARAM;
4029 ice_acquire_lock(&hw->rss_locks);
4031 if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
4032 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4034 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
4035 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4038 local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
4039 status = ice_rem_rss_cfg_sync(hw, vsi_handle,
4043 ice_release_lock(&hw->rss_locks);
4049 * ice_replay_rss_cfg - replay RSS configurations associated with VSI
4050 * @hw: pointer to the hardware structure
4051 * @vsi_handle: software VSI handle
4053 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
4055 enum ice_status status = ICE_SUCCESS;
4056 struct ice_rss_cfg *r;
4058 if (!ice_is_vsi_valid(hw, vsi_handle))
4059 return ICE_ERR_PARAM;
4061 ice_acquire_lock(&hw->rss_locks);
4062 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
4063 ice_rss_cfg, l_entry) {
4064 if (ice_is_bit_set(r->vsis, vsi_handle)) {
4065 status = ice_add_rss_cfg_sync(hw, vsi_handle, &r->hash);
4070 ice_release_lock(&hw->rss_locks);
4076 * ice_get_rss_cfg - returns hashed fields for the given header types
4077 * @hw: pointer to the hardware structure
4078 * @vsi_handle: software VSI handle
4079 * @hdrs: protocol header type
4081 * This function will return the match fields of the first instance of flow
4082 * profile having the given header types and containing input VSI
4084 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
4086 u64 rss_hash = ICE_HASH_INVALID;
4087 struct ice_rss_cfg *r;
4089 /* verify if the protocol header is non zero and VSI is valid */
4090 if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
4091 return ICE_HASH_INVALID;
4093 ice_acquire_lock(&hw->rss_locks);
4094 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
4095 ice_rss_cfg, l_entry)
4096 if (ice_is_bit_set(r->vsis, vsi_handle) &&
4097 r->hash.addl_hdrs == hdrs) {
4098 rss_hash = r->hash.hash_flds;
4101 ice_release_lock(&hw->rss_locks);