1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
5 #include "ice_common.h"
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE 2
10 #define ICE_FLOW_FLD_SZ_VLAN 2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR 4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR 16
13 #define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
14 #define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
15 #define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
16 #define ICE_FLOW_FLD_SZ_IP_DSCP 1
17 #define ICE_FLOW_FLD_SZ_IP_TTL 1
18 #define ICE_FLOW_FLD_SZ_IP_PROT 1
19 #define ICE_FLOW_FLD_SZ_PORT 2
20 #define ICE_FLOW_FLD_SZ_TCP_FLAGS 1
21 #define ICE_FLOW_FLD_SZ_ICMP_TYPE 1
22 #define ICE_FLOW_FLD_SZ_ICMP_CODE 1
23 #define ICE_FLOW_FLD_SZ_ARP_OPER 2
24 #define ICE_FLOW_FLD_SZ_GRE_KEYID 4
25 #define ICE_FLOW_FLD_SZ_GTP_TEID 4
26 #define ICE_FLOW_FLD_SZ_GTP_QFI 2
27 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID 2
28 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
29 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID 4
30 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
31 #define ICE_FLOW_FLD_SZ_AH_SPI 4
32 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI 4
33 #define ICE_FLOW_FLD_SZ_VXLAN_VNI 4
34 #define ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID 2
36 /* Describe properties of a protocol header field */
37 struct ice_flow_field_info {
38 enum ice_flow_seg_hdr hdr;
39 s16 off; /* Offset from start of a protocol header, in bits */
40 u16 size; /* Size of fields in bits */
41 u16 mask; /* 16-bit mask for field */
44 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
46 .off = (_offset_bytes) * BITS_PER_BYTE, \
47 .size = (_size_bytes) * BITS_PER_BYTE, \
51 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
53 .off = (_offset_bytes) * BITS_PER_BYTE, \
54 .size = (_size_bytes) * BITS_PER_BYTE, \
58 /* Table containing properties of supported protocol header fields */
60 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
62 /* ICE_FLOW_FIELD_IDX_ETH_DA */
63 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
64 /* ICE_FLOW_FIELD_IDX_ETH_SA */
65 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
66 /* ICE_FLOW_FIELD_IDX_S_VLAN */
67 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
68 /* ICE_FLOW_FIELD_IDX_C_VLAN */
69 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
70 /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
71 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
73 /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
74 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
76 /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
77 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
79 /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
80 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
81 ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
82 /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
83 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
84 ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
85 /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
86 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
87 ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
88 /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
89 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
90 ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
91 /* ICE_FLOW_FIELD_IDX_IPV4_SA */
92 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
93 /* ICE_FLOW_FIELD_IDX_IPV4_DA */
94 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
95 /* ICE_FLOW_FIELD_IDX_IPV6_SA */
96 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
97 /* ICE_FLOW_FIELD_IDX_IPV6_DA */
98 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
99 /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
100 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
101 ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
102 /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
103 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
104 ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
105 /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
106 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
107 ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
108 /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
109 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
110 ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
111 /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
112 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
113 ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
114 /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
115 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
116 ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
118 /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
119 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
120 /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
121 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
122 /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
123 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
124 /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
125 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
126 /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
127 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
128 /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
129 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
130 /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
131 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
133 /* ICE_FLOW_FIELD_IDX_ARP_SIP */
134 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
135 /* ICE_FLOW_FIELD_IDX_ARP_DIP */
136 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
137 /* ICE_FLOW_FIELD_IDX_ARP_SHA */
138 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
139 /* ICE_FLOW_FIELD_IDX_ARP_DHA */
140 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
141 /* ICE_FLOW_FIELD_IDX_ARP_OP */
142 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
144 /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
145 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
146 /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
147 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
149 /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
150 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
152 /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
153 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
154 ICE_FLOW_FLD_SZ_GTP_TEID),
155 /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
156 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
157 ICE_FLOW_FLD_SZ_GTP_TEID),
158 /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
159 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
160 ICE_FLOW_FLD_SZ_GTP_TEID),
161 /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
162 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
163 ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
164 /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
165 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
166 ICE_FLOW_FLD_SZ_GTP_TEID),
167 /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
168 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
169 ICE_FLOW_FLD_SZ_GTP_TEID),
171 /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
172 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
173 ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
175 /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
176 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
177 ICE_FLOW_FLD_SZ_PFCP_SEID),
179 /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
180 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
181 ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
183 /* ICE_FLOW_FIELD_IDX_ESP_SPI */
184 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
185 ICE_FLOW_FLD_SZ_ESP_SPI),
187 /* ICE_FLOW_FIELD_IDX_AH_SPI */
188 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
189 ICE_FLOW_FLD_SZ_AH_SPI),
191 /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
192 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
193 ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
194 /* ICE_FLOW_FIELD_IDX_VXLAN_VNI */
195 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VXLAN, 12,
196 ICE_FLOW_FLD_SZ_VXLAN_VNI),
198 /* ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID */
199 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ECPRI_TP0, 4,
200 ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
203 /* Bitmaps indicating relevant packet types for a particular protocol header
205 * Packet types for packets with an Outer/First/Single MAC header
207 static const u32 ice_ptypes_mac_ofos[] = {
208 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
209 0x0000077E, 0x000003FF, 0x00000000, 0x00000000,
210 0x00400000, 0x03FFF000, 0xFFFFFFE0, 0x00000707,
211 0x00000000, 0x00000000, 0x00000000, 0x00000000,
212 0x00000000, 0x00000000, 0x00000000, 0x00000000,
213 0x00000000, 0x00000000, 0x00000000, 0x00000000,
214 0x00000000, 0x00000000, 0x00000000, 0x00000000,
215 0x00000000, 0x00000000, 0x00000000, 0x00000000,
218 /* Packet types for packets with an Innermost/Last MAC VLAN header */
219 static const u32 ice_ptypes_macvlan_il[] = {
220 0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
221 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
222 0x00000000, 0x00000000, 0x00000000, 0x00000000,
223 0x00000000, 0x00000000, 0x00000000, 0x00000000,
224 0x00000000, 0x00000000, 0x00000000, 0x00000000,
225 0x00000000, 0x00000000, 0x00000000, 0x00000000,
226 0x00000000, 0x00000000, 0x00000000, 0x00000000,
227 0x00000000, 0x00000000, 0x00000000, 0x00000000,
230 /* Packet types for packets with an Outer/First/Single IPv4 header, does NOT
231 * include IPV4 other PTYPEs
233 static const u32 ice_ptypes_ipv4_ofos[] = {
234 0x1DC00000, 0x24000800, 0x00000000, 0x00000000,
235 0x00000000, 0x00000155, 0x00000000, 0x00000000,
236 0x00000000, 0x000FC000, 0x000002A0, 0x00000000,
237 0x00000000, 0x00000000, 0x00000000, 0x00000000,
238 0x00000000, 0x00000000, 0x00000000, 0x00000000,
239 0x00000000, 0x00000000, 0x00000000, 0x00000000,
240 0x00000000, 0x00000000, 0x00000000, 0x00000000,
241 0x00000000, 0x00000000, 0x00000000, 0x00000000,
244 /* Packet types for packets with an Outer/First/Single IPv4 header, includes
247 static const u32 ice_ptypes_ipv4_ofos_all[] = {
248 0x1DC00000, 0x24000800, 0x00000000, 0x00000000,
249 0x00000000, 0x00000155, 0x00000000, 0x00000000,
250 0x00000000, 0x000FC000, 0x83E0FAA0, 0x00000101,
251 0x00000000, 0x00000000, 0x00000000, 0x00000000,
252 0x00000000, 0x00000000, 0x00000000, 0x00000000,
253 0x00000000, 0x00000000, 0x00000000, 0x00000000,
254 0x00000000, 0x00000000, 0x00000000, 0x00000000,
255 0x00000000, 0x00000000, 0x00000000, 0x00000000,
258 /* Packet types for packets with an Innermost/Last IPv4 header */
259 static const u32 ice_ptypes_ipv4_il[] = {
260 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
261 0x0000000E, 0x00000000, 0x00000000, 0x00000000,
262 0x00000000, 0x00000000, 0x001FF800, 0x00000000,
263 0x00000000, 0x00000000, 0x00000000, 0x00000000,
264 0x00000000, 0x00000000, 0x00000000, 0x00000000,
265 0x00000000, 0x00000000, 0x00000000, 0x00000000,
266 0x00000000, 0x00000000, 0x00000000, 0x00000000,
267 0x00000000, 0x00000000, 0x00000000, 0x00000000,
270 /* Packet types for packets with an Outer/First/Single IPv6 header, does NOT
271 * include IVP6 other PTYPEs
273 static const u32 ice_ptypes_ipv6_ofos[] = {
274 0x00000000, 0x00000000, 0x77000000, 0x10002000,
275 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
276 0x00000000, 0x03F00000, 0x00000540, 0x00000000,
277 0x00000000, 0x00000000, 0x00000000, 0x00000000,
278 0x00000000, 0x00000000, 0x00000000, 0x00000000,
279 0x00000000, 0x00000000, 0x00000000, 0x00000000,
280 0x00000000, 0x00000000, 0x00000000, 0x00000000,
281 0x00000000, 0x00000000, 0x00000000, 0x00000000,
284 /* Packet types for packets with an Outer/First/Single IPv6 header, includes
287 static const u32 ice_ptypes_ipv6_ofos_all[] = {
288 0x00000000, 0x00000000, 0x77000000, 0x10002000,
289 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
290 0x00000000, 0x03F00000, 0x7C1F0540, 0x00000206,
291 0x00000000, 0x00000000, 0x00000000, 0x00000000,
292 0x00000000, 0x00000000, 0x00000000, 0x00000000,
293 0x00000000, 0x00000000, 0x00000000, 0x00000000,
294 0x00000000, 0x00000000, 0x00000000, 0x00000000,
295 0x00000000, 0x00000000, 0x00000000, 0x00000000,
298 /* Packet types for packets with an Innermost/Last IPv6 header */
299 static const u32 ice_ptypes_ipv6_il[] = {
300 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
301 0x00000770, 0x00000000, 0x00000000, 0x00000000,
302 0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
303 0x00000000, 0x00000000, 0x00000000, 0x00000000,
304 0x00000000, 0x00000000, 0x00000000, 0x00000000,
305 0x00000000, 0x00000000, 0x00000000, 0x00000000,
306 0x00000000, 0x00000000, 0x00000000, 0x00000000,
307 0x00000000, 0x00000000, 0x00000000, 0x00000000,
310 /* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */
311 static const u32 ice_ptypes_ipv4_ofos_no_l4[] = {
312 0x10C00000, 0x04000800, 0x00000000, 0x00000000,
313 0x00000000, 0x00000000, 0x00000000, 0x00000000,
314 0x00000000, 0x000cc000, 0x000002A0, 0x00000000,
315 0x00000000, 0x00000000, 0x00000000, 0x00000000,
316 0x00000000, 0x00000000, 0x00000000, 0x00000000,
317 0x00000000, 0x00000000, 0x00000000, 0x00000000,
318 0x00000000, 0x00000000, 0x00000000, 0x00000000,
319 0x00000000, 0x00000000, 0x00000000, 0x00000000,
322 /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
323 static const u32 ice_ptypes_ipv4_il_no_l4[] = {
324 0x60000000, 0x18043008, 0x80000002, 0x6010c021,
325 0x00000008, 0x00000000, 0x00000000, 0x00000000,
326 0x00000000, 0x00000000, 0x00139800, 0x00000000,
327 0x00000000, 0x00000000, 0x00000000, 0x00000000,
328 0x00000000, 0x00000000, 0x00000000, 0x00000000,
329 0x00000000, 0x00000000, 0x00000000, 0x00000000,
330 0x00000000, 0x00000000, 0x00000000, 0x00000000,
331 0x00000000, 0x00000000, 0x00000000, 0x00000000,
334 /* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */
335 static const u32 ice_ptypes_ipv6_ofos_no_l4[] = {
336 0x00000000, 0x00000000, 0x43000000, 0x10002000,
337 0x00000000, 0x00000000, 0x00000000, 0x00000000,
338 0x00000000, 0x02300000, 0x00000540, 0x00000000,
339 0x00000000, 0x00000000, 0x00000000, 0x00000000,
340 0x00000000, 0x00000000, 0x00000000, 0x00000000,
341 0x00000000, 0x00000000, 0x00000000, 0x00000000,
342 0x00000000, 0x00000000, 0x00000000, 0x00000000,
343 0x00000000, 0x00000000, 0x00000000, 0x00000000,
346 /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
347 static const u32 ice_ptypes_ipv6_il_no_l4[] = {
348 0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
349 0x00000430, 0x00000000, 0x00000000, 0x00000000,
350 0x00000000, 0x00000000, 0x4e600000, 0x00000000,
351 0x00000000, 0x00000000, 0x00000000, 0x00000000,
352 0x00000000, 0x00000000, 0x00000000, 0x00000000,
353 0x00000000, 0x00000000, 0x00000000, 0x00000000,
354 0x00000000, 0x00000000, 0x00000000, 0x00000000,
355 0x00000000, 0x00000000, 0x00000000, 0x00000000,
358 /* Packet types for packets with an Outermost/First ARP header */
359 static const u32 ice_ptypes_arp_of[] = {
360 0x00000800, 0x00000000, 0x00000000, 0x00000000,
361 0x00000000, 0x00000000, 0x00000000, 0x00000000,
362 0x00000000, 0x00000000, 0x00000000, 0x00000000,
363 0x00000000, 0x00000000, 0x00000000, 0x00000000,
364 0x00000000, 0x00000000, 0x00000000, 0x00000000,
365 0x00000000, 0x00000000, 0x00000000, 0x00000000,
366 0x00000000, 0x00000000, 0x00000000, 0x00000000,
367 0x00000000, 0x00000000, 0x00000000, 0x00000000,
370 /* UDP Packet types for non-tunneled packets or tunneled
371 * packets with inner UDP.
373 static const u32 ice_ptypes_udp_il[] = {
374 0x81000000, 0x20204040, 0x04000010, 0x80810102,
375 0x00000040, 0x00000000, 0x00000000, 0x00000000,
376 0x00000000, 0x00410000, 0x908427E0, 0x00000007,
377 0x00000000, 0x00000000, 0x00000000, 0x00000000,
378 0x00000000, 0x00000000, 0x00000000, 0x00000000,
379 0x00000000, 0x00000000, 0x00000000, 0x00000000,
380 0x00000000, 0x00000000, 0x00000000, 0x00000000,
381 0x00000000, 0x00000000, 0x00000000, 0x00000000,
384 /* Packet types for packets with an Innermost/Last TCP header */
385 static const u32 ice_ptypes_tcp_il[] = {
386 0x04000000, 0x80810102, 0x10000040, 0x02040408,
387 0x00000102, 0x00000000, 0x00000000, 0x00000000,
388 0x00000000, 0x00820000, 0x21084000, 0x00000000,
389 0x00000000, 0x00000000, 0x00000000, 0x00000000,
390 0x00000000, 0x00000000, 0x00000000, 0x00000000,
391 0x00000000, 0x00000000, 0x00000000, 0x00000000,
392 0x00000000, 0x00000000, 0x00000000, 0x00000000,
393 0x00000000, 0x00000000, 0x00000000, 0x00000000,
396 /* Packet types for packets with an Innermost/Last SCTP header */
397 static const u32 ice_ptypes_sctp_il[] = {
398 0x08000000, 0x01020204, 0x20000081, 0x04080810,
399 0x00000204, 0x00000000, 0x00000000, 0x00000000,
400 0x00000000, 0x01040000, 0x00000000, 0x00000000,
401 0x00000000, 0x00000000, 0x00000000, 0x00000000,
402 0x00000000, 0x00000000, 0x00000000, 0x00000000,
403 0x00000000, 0x00000000, 0x00000000, 0x00000000,
404 0x00000000, 0x00000000, 0x00000000, 0x00000000,
405 0x00000000, 0x00000000, 0x00000000, 0x00000000,
408 /* Packet types for packets with an Outermost/First ICMP header */
409 static const u32 ice_ptypes_icmp_of[] = {
410 0x10000000, 0x00000000, 0x00000000, 0x00000000,
411 0x00000000, 0x00000000, 0x00000000, 0x00000000,
412 0x00000000, 0x00000000, 0x00000000, 0x00000000,
413 0x00000000, 0x00000000, 0x00000000, 0x00000000,
414 0x00000000, 0x00000000, 0x00000000, 0x00000000,
415 0x00000000, 0x00000000, 0x00000000, 0x00000000,
416 0x00000000, 0x00000000, 0x00000000, 0x00000000,
417 0x00000000, 0x00000000, 0x00000000, 0x00000000,
420 /* Packet types for packets with an Innermost/Last ICMP header */
421 static const u32 ice_ptypes_icmp_il[] = {
422 0x00000000, 0x02040408, 0x40000102, 0x08101020,
423 0x00000408, 0x00000000, 0x00000000, 0x00000000,
424 0x00000000, 0x00000000, 0x42108000, 0x00000000,
425 0x00000000, 0x00000000, 0x00000000, 0x00000000,
426 0x00000000, 0x00000000, 0x00000000, 0x00000000,
427 0x00000000, 0x00000000, 0x00000000, 0x00000000,
428 0x00000000, 0x00000000, 0x00000000, 0x00000000,
429 0x00000000, 0x00000000, 0x00000000, 0x00000000,
432 /* Packet types for packets with an Outermost/First GRE header */
433 static const u32 ice_ptypes_gre_of[] = {
434 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
435 0x0000017E, 0x00000000, 0x00000000, 0x00000000,
436 0x00000000, 0x00000000, 0x00000000, 0x00000000,
437 0x00000000, 0x00000000, 0x00000000, 0x00000000,
438 0x00000000, 0x00000000, 0x00000000, 0x00000000,
439 0x00000000, 0x00000000, 0x00000000, 0x00000000,
440 0x00000000, 0x00000000, 0x00000000, 0x00000000,
441 0x00000000, 0x00000000, 0x00000000, 0x00000000,
444 /* Packet types for packets with an Innermost/Last MAC header */
445 static const u32 ice_ptypes_mac_il[] = {
446 0x00000000, 0x20000000, 0x00000000, 0x00000000,
447 0x00000000, 0x00000000, 0x00000000, 0x00000000,
448 0x00000000, 0x00000000, 0x00000000, 0x00000000,
449 0x00000000, 0x00000000, 0x00000000, 0x00000000,
450 0x00000000, 0x00000000, 0x00000000, 0x00000000,
451 0x00000000, 0x00000000, 0x00000000, 0x00000000,
452 0x00000000, 0x00000000, 0x00000000, 0x00000000,
453 0x00000000, 0x00000000, 0x00000000, 0x00000000,
456 /* Packet types for GTPC */
457 static const u32 ice_ptypes_gtpc[] = {
458 0x00000000, 0x00000000, 0x00000000, 0x00000000,
459 0x00000000, 0x00000000, 0x00000000, 0x00000000,
460 0x00000000, 0x00000000, 0x000001E0, 0x00000000,
461 0x00000000, 0x00000000, 0x00000000, 0x00000000,
462 0x00000000, 0x00000000, 0x00000000, 0x00000000,
463 0x00000000, 0x00000000, 0x00000000, 0x00000000,
464 0x00000000, 0x00000000, 0x00000000, 0x00000000,
465 0x00000000, 0x00000000, 0x00000000, 0x00000000,
468 /* Packet types for VXLAN with VNI */
469 static const u32 ice_ptypes_vxlan_vni[] = {
470 0x00000000, 0xBFBFF800, 0x00EFDFDF, 0xFEFDE000,
471 0x03BF7F7E, 0x00000000, 0x00000000, 0x00000000,
472 0x00000000, 0x00000000, 0x00000000, 0x00000000,
473 0x00000000, 0x00000000, 0x00000000, 0x00000000,
474 0x00000000, 0x00000000, 0x00000000, 0x00000000,
475 0x00000000, 0x00000000, 0x00000000, 0x00000000,
476 0x00000000, 0x00000000, 0x00000000, 0x00000000,
477 0x00000000, 0x00000000, 0x00000000, 0x00000000,
480 /* Packet types for GTPC with TEID */
481 static const u32 ice_ptypes_gtpc_tid[] = {
482 0x00000000, 0x00000000, 0x00000000, 0x00000000,
483 0x00000000, 0x00000000, 0x00000000, 0x00000000,
484 0x00000000, 0x00000000, 0x00000060, 0x00000000,
485 0x00000000, 0x00000000, 0x00000000, 0x00000000,
486 0x00000000, 0x00000000, 0x00000000, 0x00000000,
487 0x00000000, 0x00000000, 0x00000000, 0x00000000,
488 0x00000000, 0x00000000, 0x00000000, 0x00000000,
489 0x00000000, 0x00000000, 0x00000000, 0x00000000,
492 /* Packet types for GTPU */
493 static const struct ice_ptype_attributes ice_attr_gtpu_session[] = {
494 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
495 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
496 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
497 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
498 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
499 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
500 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
501 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
502 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
503 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
504 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
505 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
506 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
507 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
508 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
509 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
510 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
511 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
512 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
513 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
516 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
517 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
518 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
519 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
520 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
521 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
522 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
523 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
524 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
525 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
526 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
527 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
528 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
529 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
530 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
531 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
532 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
533 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
534 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
535 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
536 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
539 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
540 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
541 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
542 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
543 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
544 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
545 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
546 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
547 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
548 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
549 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
550 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
551 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
552 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
553 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
554 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
555 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
556 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
557 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
558 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
559 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
562 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
563 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
564 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
565 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
566 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
567 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
568 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
569 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
570 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
571 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
572 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
573 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
574 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
575 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
576 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
577 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
578 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
579 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
580 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
581 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
582 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
585 static const u32 ice_ptypes_gtpu[] = {
586 0x00000000, 0x00000000, 0x00000000, 0x00000000,
587 0x00000000, 0x00000000, 0x00000000, 0x00000000,
588 0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
589 0x00000000, 0x00000000, 0x00000000, 0x00000000,
590 0x00000000, 0x00000000, 0x00000000, 0x00000000,
591 0x00000000, 0x00000000, 0x00000000, 0x00000000,
592 0x00000000, 0x00000000, 0x00000000, 0x00000000,
593 0x00000000, 0x00000000, 0x00000000, 0x00000000,
596 /* Packet types for pppoe */
597 static const u32 ice_ptypes_pppoe[] = {
598 0x00000000, 0x00000000, 0x00000000, 0x00000000,
599 0x00000000, 0x00000000, 0x00000000, 0x00000000,
600 0x00000000, 0x03ffe000, 0x00000000, 0x00000000,
601 0x00000000, 0x00000000, 0x00000000, 0x00000000,
602 0x00000000, 0x00000000, 0x00000000, 0x00000000,
603 0x00000000, 0x00000000, 0x00000000, 0x00000000,
604 0x00000000, 0x00000000, 0x00000000, 0x00000000,
605 0x00000000, 0x00000000, 0x00000000, 0x00000000,
608 /* Packet types for packets with PFCP NODE header */
609 static const u32 ice_ptypes_pfcp_node[] = {
610 0x00000000, 0x00000000, 0x00000000, 0x00000000,
611 0x00000000, 0x00000000, 0x00000000, 0x00000000,
612 0x00000000, 0x00000000, 0x80000000, 0x00000002,
613 0x00000000, 0x00000000, 0x00000000, 0x00000000,
614 0x00000000, 0x00000000, 0x00000000, 0x00000000,
615 0x00000000, 0x00000000, 0x00000000, 0x00000000,
616 0x00000000, 0x00000000, 0x00000000, 0x00000000,
617 0x00000000, 0x00000000, 0x00000000, 0x00000000,
620 /* Packet types for packets with PFCP SESSION header */
621 static const u32 ice_ptypes_pfcp_session[] = {
622 0x00000000, 0x00000000, 0x00000000, 0x00000000,
623 0x00000000, 0x00000000, 0x00000000, 0x00000000,
624 0x00000000, 0x00000000, 0x00000000, 0x00000005,
625 0x00000000, 0x00000000, 0x00000000, 0x00000000,
626 0x00000000, 0x00000000, 0x00000000, 0x00000000,
627 0x00000000, 0x00000000, 0x00000000, 0x00000000,
628 0x00000000, 0x00000000, 0x00000000, 0x00000000,
629 0x00000000, 0x00000000, 0x00000000, 0x00000000,
632 /* Packet types for l2tpv3 */
633 static const u32 ice_ptypes_l2tpv3[] = {
634 0x00000000, 0x00000000, 0x00000000, 0x00000000,
635 0x00000000, 0x00000000, 0x00000000, 0x00000000,
636 0x00000000, 0x00000000, 0x00000000, 0x00000300,
637 0x00000000, 0x00000000, 0x00000000, 0x00000000,
638 0x00000000, 0x00000000, 0x00000000, 0x00000000,
639 0x00000000, 0x00000000, 0x00000000, 0x00000000,
640 0x00000000, 0x00000000, 0x00000000, 0x00000000,
641 0x00000000, 0x00000000, 0x00000000, 0x00000000,
644 /* Packet types for esp */
645 static const u32 ice_ptypes_esp[] = {
646 0x00000000, 0x00000000, 0x00000000, 0x00000000,
647 0x00000000, 0x00000003, 0x00000000, 0x00000000,
648 0x00000000, 0x00000000, 0x00000000, 0x00000000,
649 0x00000000, 0x00000000, 0x00000000, 0x00000000,
650 0x00000000, 0x00000000, 0x00000000, 0x00000000,
651 0x00000000, 0x00000000, 0x00000000, 0x00000000,
652 0x00000000, 0x00000000, 0x00000000, 0x00000000,
653 0x00000000, 0x00000000, 0x00000000, 0x00000000,
656 /* Packet types for ah */
657 static const u32 ice_ptypes_ah[] = {
658 0x00000000, 0x00000000, 0x00000000, 0x00000000,
659 0x00000000, 0x0000000C, 0x00000000, 0x00000000,
660 0x00000000, 0x00000000, 0x00000000, 0x00000000,
661 0x00000000, 0x00000000, 0x00000000, 0x00000000,
662 0x00000000, 0x00000000, 0x00000000, 0x00000000,
663 0x00000000, 0x00000000, 0x00000000, 0x00000000,
664 0x00000000, 0x00000000, 0x00000000, 0x00000000,
665 0x00000000, 0x00000000, 0x00000000, 0x00000000,
668 /* Packet types for packets with NAT_T ESP header */
669 static const u32 ice_ptypes_nat_t_esp[] = {
670 0x00000000, 0x00000000, 0x00000000, 0x00000000,
671 0x00000000, 0x00000030, 0x00000000, 0x00000000,
672 0x00000000, 0x00000000, 0x00000000, 0x00000000,
673 0x00000000, 0x00000000, 0x00000000, 0x00000000,
674 0x00000000, 0x00000000, 0x00000000, 0x00000000,
675 0x00000000, 0x00000000, 0x00000000, 0x00000000,
676 0x00000000, 0x00000000, 0x00000000, 0x00000000,
677 0x00000000, 0x00000000, 0x00000000, 0x00000000,
680 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
681 0x00000846, 0x00000000, 0x00000000, 0x00000000,
682 0x00000000, 0x00000000, 0x00000000, 0x00000000,
683 0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
684 0x00000000, 0x00000000, 0x00000000, 0x00000000,
685 0x00000000, 0x00000000, 0x00000000, 0x00000000,
686 0x00000000, 0x00000000, 0x00000000, 0x00000000,
687 0x00000000, 0x00000000, 0x00000000, 0x00000000,
688 0x00000000, 0x00000000, 0x00000000, 0x00000000,
691 static const u32 ice_ptypes_gtpu_no_ip[] = {
692 0x00000000, 0x00000000, 0x00000000, 0x00000000,
693 0x00000000, 0x00000000, 0x00000000, 0x00000000,
694 0x00000000, 0x00000000, 0x00000600, 0x00000000,
695 0x00000000, 0x00000000, 0x00000000, 0x00000000,
696 0x00000000, 0x00000000, 0x00000000, 0x00000000,
697 0x00000000, 0x00000000, 0x00000000, 0x00000000,
698 0x00000000, 0x00000000, 0x00000000, 0x00000000,
699 0x00000000, 0x00000000, 0x00000000, 0x00000000,
702 static const u32 ice_ptypes_ecpri_tp0[] = {
703 0x00000000, 0x00000000, 0x00000000, 0x00000000,
704 0x00000000, 0x00000000, 0x00000000, 0x00000000,
705 0x00000000, 0x00000000, 0x00000000, 0x00000400,
706 0x00000000, 0x00000000, 0x00000000, 0x00000000,
707 0x00000000, 0x00000000, 0x00000000, 0x00000000,
708 0x00000000, 0x00000000, 0x00000000, 0x00000000,
709 0x00000000, 0x00000000, 0x00000000, 0x00000000,
710 0x00000000, 0x00000000, 0x00000000, 0x00000000,
713 /* Manage parameters and info. used during the creation of a flow profile */
714 struct ice_flow_prof_params {
716 u16 entry_length; /* # of bytes formatted entry will require */
718 struct ice_flow_prof *prof;
720 /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
721 * This will give us the direction flags.
723 struct ice_fv_word es[ICE_MAX_FV_WORDS];
724 /* attributes can be used to add attributes to a particular PTYPE */
725 const struct ice_ptype_attributes *attr;
728 u16 mask[ICE_MAX_FV_WORDS];
729 ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
732 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
733 (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
734 ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
735 ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
736 ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
737 ICE_FLOW_SEG_HDR_NAT_T_ESP | ICE_FLOW_SEG_HDR_GTPU_NON_IP | \
738 ICE_FLOW_SEG_HDR_ECPRI_TP0)
740 #define ICE_FLOW_SEG_HDRS_L2_MASK \
741 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
742 #define ICE_FLOW_SEG_HDRS_L3_MASK \
743 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
744 ICE_FLOW_SEG_HDR_ARP)
745 #define ICE_FLOW_SEG_HDRS_L4_MASK \
746 (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
747 ICE_FLOW_SEG_HDR_SCTP)
748 /* mask for L4 protocols that are NOT part of IPV4/6 OTHER PTYPE groups */
749 #define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER \
750 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
753 * ice_flow_val_hdrs - validates packet segments for valid protocol headers
754 * @segs: array of one or more packet segments that describe the flow
755 * @segs_cnt: number of packet segments provided
757 static enum ice_status
758 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
762 for (i = 0; i < segs_cnt; i++) {
763 /* Multiple L3 headers */
764 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
765 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
766 return ICE_ERR_PARAM;
768 /* Multiple L4 headers */
769 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
770 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
771 return ICE_ERR_PARAM;
777 /* Sizes of fixed known protocol headers without header options */
778 #define ICE_FLOW_PROT_HDR_SZ_MAC 14
779 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
780 #define ICE_FLOW_PROT_HDR_SZ_IPV4 20
781 #define ICE_FLOW_PROT_HDR_SZ_IPV6 40
782 #define ICE_FLOW_PROT_HDR_SZ_ARP 28
783 #define ICE_FLOW_PROT_HDR_SZ_ICMP 8
784 #define ICE_FLOW_PROT_HDR_SZ_TCP 20
785 #define ICE_FLOW_PROT_HDR_SZ_UDP 8
786 #define ICE_FLOW_PROT_HDR_SZ_SCTP 12
789 * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
790 * @params: information about the flow to be processed
791 * @seg: index of packet segment whose header size is to be determined
793 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
798 sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
799 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
802 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
803 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
804 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
805 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
806 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
807 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
808 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
809 /* A L3 header is required if L4 is specified */
813 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
814 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
815 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
816 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
817 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
818 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
819 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
820 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
826 * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
827 * @params: information about the flow to be processed
829 * This function identifies the packet types associated with the protocol
830 * headers being present in packet segments of the specified flow profile.
832 static enum ice_status
833 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
835 struct ice_flow_prof *prof;
838 ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
843 for (i = 0; i < params->prof->segs_cnt; i++) {
844 const ice_bitmap_t *src;
847 hdrs = prof->segs[i].hdrs;
849 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
850 src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
851 (const ice_bitmap_t *)ice_ptypes_mac_il;
852 ice_and_bitmap(params->ptypes, params->ptypes, src,
856 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
857 src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
858 ice_and_bitmap(params->ptypes, params->ptypes, src,
862 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
863 ice_and_bitmap(params->ptypes, params->ptypes,
864 (const ice_bitmap_t *)ice_ptypes_arp_of,
868 if (hdrs & ICE_FLOW_SEG_HDR_ECPRI_TP0) {
869 src = (const ice_bitmap_t *)ice_ptypes_ecpri_tp0;
870 ice_and_bitmap(params->ptypes, params->ptypes, src,
873 if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
874 (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
876 (const ice_bitmap_t *)ice_ptypes_ipv4_il :
877 (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_all;
878 ice_and_bitmap(params->ptypes, params->ptypes, src,
880 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
881 (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
883 (const ice_bitmap_t *)ice_ptypes_ipv6_il :
884 (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_all;
885 ice_and_bitmap(params->ptypes, params->ptypes, src,
887 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
888 !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
889 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_no_l4 :
890 (const ice_bitmap_t *)ice_ptypes_ipv4_il_no_l4;
891 ice_and_bitmap(params->ptypes, params->ptypes, src,
893 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
894 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
895 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
896 ice_and_bitmap(params->ptypes, params->ptypes, src,
898 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
899 !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
900 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_no_l4 :
901 (const ice_bitmap_t *)ice_ptypes_ipv6_il_no_l4;
902 ice_and_bitmap(params->ptypes, params->ptypes, src,
904 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
905 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
906 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
907 ice_and_bitmap(params->ptypes, params->ptypes, src,
911 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
912 src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
913 ice_and_bitmap(params->ptypes, params->ptypes,
914 src, ICE_FLOW_PTYPE_MAX);
915 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
916 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
917 ice_and_bitmap(params->ptypes, params->ptypes, src,
920 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
921 ice_andnot_bitmap(params->ptypes, params->ptypes, src,
925 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
926 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
927 ice_and_bitmap(params->ptypes, params->ptypes, src,
929 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
930 ice_and_bitmap(params->ptypes, params->ptypes,
931 (const ice_bitmap_t *)ice_ptypes_tcp_il,
933 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
934 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
935 ice_and_bitmap(params->ptypes, params->ptypes, src,
939 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
940 src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
941 (const ice_bitmap_t *)ice_ptypes_icmp_il;
942 ice_and_bitmap(params->ptypes, params->ptypes, src,
944 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
946 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
947 ice_and_bitmap(params->ptypes, params->ptypes,
948 src, ICE_FLOW_PTYPE_MAX);
950 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
951 src = (const ice_bitmap_t *)ice_ptypes_gtpc;
952 ice_and_bitmap(params->ptypes, params->ptypes,
953 src, ICE_FLOW_PTYPE_MAX);
954 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
955 src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
956 ice_and_bitmap(params->ptypes, params->ptypes,
957 src, ICE_FLOW_PTYPE_MAX);
958 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_NON_IP) {
959 src = (const ice_bitmap_t *)ice_ptypes_gtpu_no_ip;
960 ice_and_bitmap(params->ptypes, params->ptypes,
961 src, ICE_FLOW_PTYPE_MAX);
962 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
963 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
964 ice_and_bitmap(params->ptypes, params->ptypes,
965 src, ICE_FLOW_PTYPE_MAX);
967 /* Attributes for GTP packet with downlink */
968 params->attr = ice_attr_gtpu_down;
969 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
970 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
971 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
972 ice_and_bitmap(params->ptypes, params->ptypes,
973 src, ICE_FLOW_PTYPE_MAX);
975 /* Attributes for GTP packet with uplink */
976 params->attr = ice_attr_gtpu_up;
977 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
978 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
979 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
980 ice_and_bitmap(params->ptypes, params->ptypes,
981 src, ICE_FLOW_PTYPE_MAX);
983 /* Attributes for GTP packet with Extension Header */
984 params->attr = ice_attr_gtpu_eh;
985 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
986 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
987 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
988 ice_and_bitmap(params->ptypes, params->ptypes,
989 src, ICE_FLOW_PTYPE_MAX);
991 /* Attributes for GTP packet without Extension Header */
992 params->attr = ice_attr_gtpu_session;
993 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session);
994 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
995 src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
996 ice_and_bitmap(params->ptypes, params->ptypes,
997 src, ICE_FLOW_PTYPE_MAX);
998 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
999 src = (const ice_bitmap_t *)ice_ptypes_esp;
1000 ice_and_bitmap(params->ptypes, params->ptypes,
1001 src, ICE_FLOW_PTYPE_MAX);
1002 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
1003 src = (const ice_bitmap_t *)ice_ptypes_ah;
1004 ice_and_bitmap(params->ptypes, params->ptypes,
1005 src, ICE_FLOW_PTYPE_MAX);
1006 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
1007 src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
1008 ice_and_bitmap(params->ptypes, params->ptypes,
1009 src, ICE_FLOW_PTYPE_MAX);
1010 } else if (hdrs & ICE_FLOW_SEG_HDR_VXLAN) {
1011 src = (const ice_bitmap_t *)ice_ptypes_vxlan_vni;
1012 ice_and_bitmap(params->ptypes, params->ptypes,
1013 src, ICE_FLOW_PTYPE_MAX);
1016 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
1017 if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
1019 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1022 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1024 ice_and_bitmap(params->ptypes, params->ptypes,
1025 src, ICE_FLOW_PTYPE_MAX);
1027 src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1028 ice_andnot_bitmap(params->ptypes, params->ptypes,
1029 src, ICE_FLOW_PTYPE_MAX);
1031 src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1032 ice_andnot_bitmap(params->ptypes, params->ptypes,
1033 src, ICE_FLOW_PTYPE_MAX);
1041 * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
1042 * @hw: pointer to the HW struct
1043 * @params: information about the flow to be processed
1044 * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
1046 * This function will allocate an extraction sequence entries for a DWORD size
1047 * chunk of the packet flags.
1049 static enum ice_status
1050 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
1051 struct ice_flow_prof_params *params,
1052 enum ice_flex_mdid_pkt_flags flags)
1054 u8 fv_words = hw->blk[params->blk].es.fvw;
1057 /* Make sure the number of extraction sequence entries required does not
1058 * exceed the block's capacity.
1060 if (params->es_cnt >= fv_words)
1061 return ICE_ERR_MAX_LIMIT;
1063 /* some blocks require a reversed field vector layout */
1064 if (hw->blk[params->blk].es.reverse)
1065 idx = fv_words - params->es_cnt - 1;
1067 idx = params->es_cnt;
1069 params->es[idx].prot_id = ICE_PROT_META_ID;
1070 params->es[idx].off = flags;
1077 * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
1078 * @hw: pointer to the HW struct
1079 * @params: information about the flow to be processed
1080 * @seg: packet segment index of the field to be extracted
1081 * @fld: ID of field to be extracted
1082 * @match: bitfield of all fields
1084 * This function determines the protocol ID, offset, and size of the given
1085 * field. It then allocates one or more extraction sequence entries for the
1086 * given field, and fill the entries with protocol ID and offset information.
1088 static enum ice_status
1089 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
1090 u8 seg, enum ice_flow_field fld, u64 match)
1092 enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
1093 enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
1094 u8 fv_words = hw->blk[params->blk].es.fvw;
1095 struct ice_flow_fld_info *flds;
1096 u16 cnt, ese_bits, i;
1101 flds = params->prof->segs[seg].fields;
1104 case ICE_FLOW_FIELD_IDX_ETH_DA:
1105 case ICE_FLOW_FIELD_IDX_ETH_SA:
1106 case ICE_FLOW_FIELD_IDX_S_VLAN:
1107 case ICE_FLOW_FIELD_IDX_C_VLAN:
1108 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
1110 case ICE_FLOW_FIELD_IDX_ETH_TYPE:
1111 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
1113 case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
1114 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1116 case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
1117 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1119 case ICE_FLOW_FIELD_IDX_IPV4_TTL:
1120 case ICE_FLOW_FIELD_IDX_IPV4_PROT:
1121 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1123 /* TTL and PROT share the same extraction seq. entry.
1124 * Each is considered a sibling to the other in terms of sharing
1125 * the same extraction sequence entry.
1127 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
1128 sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
1130 sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
1132 /* If the sibling field is also included, that field's
1133 * mask needs to be included.
1135 if (match & BIT(sib))
1136 sib_mask = ice_flds_info[sib].mask;
1138 case ICE_FLOW_FIELD_IDX_IPV6_TTL:
1139 case ICE_FLOW_FIELD_IDX_IPV6_PROT:
1140 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1142 /* TTL and PROT share the same extraction seq. entry.
1143 * Each is considered a sibling to the other in terms of sharing
1144 * the same extraction sequence entry.
1146 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
1147 sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
1149 sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
1151 /* If the sibling field is also included, that field's
1152 * mask needs to be included.
1154 if (match & BIT(sib))
1155 sib_mask = ice_flds_info[sib].mask;
1157 case ICE_FLOW_FIELD_IDX_IPV4_SA:
1158 case ICE_FLOW_FIELD_IDX_IPV4_DA:
1159 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1161 case ICE_FLOW_FIELD_IDX_IPV6_SA:
1162 case ICE_FLOW_FIELD_IDX_IPV6_DA:
1163 case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
1164 case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
1165 case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
1166 case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
1167 case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
1168 case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
1169 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1171 case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
1172 case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
1173 case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
1174 prot_id = ICE_PROT_TCP_IL;
1176 case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
1177 case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
1178 prot_id = ICE_PROT_UDP_IL_OR_S;
1180 case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
1181 case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
1182 prot_id = ICE_PROT_SCTP_IL;
1184 case ICE_FLOW_FIELD_IDX_VXLAN_VNI:
1185 case ICE_FLOW_FIELD_IDX_GTPC_TEID:
1186 case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
1187 case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
1188 case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
1189 case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
1190 case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
1191 /* GTP is accessed through UDP OF protocol */
1192 prot_id = ICE_PROT_UDP_OF;
1194 case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
1195 prot_id = ICE_PROT_PPPOE;
1197 case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1198 prot_id = ICE_PROT_UDP_IL_OR_S;
1200 case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1201 prot_id = ICE_PROT_L2TPV3;
1203 case ICE_FLOW_FIELD_IDX_ESP_SPI:
1204 prot_id = ICE_PROT_ESP_F;
1206 case ICE_FLOW_FIELD_IDX_AH_SPI:
1207 prot_id = ICE_PROT_ESP_2;
1209 case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1210 prot_id = ICE_PROT_UDP_IL_OR_S;
1212 case ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID:
1213 prot_id = ICE_PROT_ECPRI;
1215 case ICE_FLOW_FIELD_IDX_ARP_SIP:
1216 case ICE_FLOW_FIELD_IDX_ARP_DIP:
1217 case ICE_FLOW_FIELD_IDX_ARP_SHA:
1218 case ICE_FLOW_FIELD_IDX_ARP_DHA:
1219 case ICE_FLOW_FIELD_IDX_ARP_OP:
1220 prot_id = ICE_PROT_ARP_OF;
1222 case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1223 case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1224 /* ICMP type and code share the same extraction seq. entry */
1225 prot_id = (params->prof->segs[seg].hdrs &
1226 ICE_FLOW_SEG_HDR_IPV4) ?
1227 ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1228 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1229 ICE_FLOW_FIELD_IDX_ICMP_CODE :
1230 ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1232 case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1233 prot_id = ICE_PROT_GRE_OF;
1236 return ICE_ERR_NOT_IMPL;
1239 /* Each extraction sequence entry is a word in size, and extracts a
1240 * word-aligned offset from a protocol header.
1242 ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1244 flds[fld].xtrct.prot_id = prot_id;
1245 flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1246 ICE_FLOW_FV_EXTRACT_SZ;
1247 flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
1248 flds[fld].xtrct.idx = params->es_cnt;
1249 flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1251 /* Adjust the next field-entry index after accommodating the number of
1252 * entries this field consumes
1254 cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1255 ice_flds_info[fld].size, ese_bits);
1257 /* Fill in the extraction sequence entries needed for this field */
1258 off = flds[fld].xtrct.off;
1259 mask = flds[fld].xtrct.mask;
1260 for (i = 0; i < cnt; i++) {
1261 /* Only consume an extraction sequence entry if there is no
1262 * sibling field associated with this field or the sibling entry
1263 * already extracts the word shared with this field.
1265 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1266 flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1267 flds[sib].xtrct.off != off) {
1270 /* Make sure the number of extraction sequence required
1271 * does not exceed the block's capability
1273 if (params->es_cnt >= fv_words)
1274 return ICE_ERR_MAX_LIMIT;
1276 /* some blocks require a reversed field vector layout */
1277 if (hw->blk[params->blk].es.reverse)
1278 idx = fv_words - params->es_cnt - 1;
1280 idx = params->es_cnt;
1282 params->es[idx].prot_id = prot_id;
1283 params->es[idx].off = off;
1284 params->mask[idx] = mask | sib_mask;
1288 off += ICE_FLOW_FV_EXTRACT_SZ;
1295 * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1296 * @hw: pointer to the HW struct
1297 * @params: information about the flow to be processed
1298 * @seg: index of packet segment whose raw fields are to be extracted
1300 static enum ice_status
1301 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1308 if (!params->prof->segs[seg].raws_cnt)
1311 if (params->prof->segs[seg].raws_cnt >
1312 ARRAY_SIZE(params->prof->segs[seg].raws))
1313 return ICE_ERR_MAX_LIMIT;
1315 /* Offsets within the segment headers are not supported */
1316 hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1318 return ICE_ERR_PARAM;
1320 fv_words = hw->blk[params->blk].es.fvw;
1322 for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1323 struct ice_flow_seg_fld_raw *raw;
1326 raw = ¶ms->prof->segs[seg].raws[i];
1328 /* Storing extraction information */
1329 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1330 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1331 ICE_FLOW_FV_EXTRACT_SZ;
1332 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1334 raw->info.xtrct.idx = params->es_cnt;
1336 /* Determine the number of field vector entries this raw field
1339 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1340 (raw->info.src.last * BITS_PER_BYTE),
1341 (ICE_FLOW_FV_EXTRACT_SZ *
1343 off = raw->info.xtrct.off;
1344 for (j = 0; j < cnt; j++) {
1347 /* Make sure the number of extraction sequence required
1348 * does not exceed the block's capability
1350 if (params->es_cnt >= hw->blk[params->blk].es.count ||
1351 params->es_cnt >= ICE_MAX_FV_WORDS)
1352 return ICE_ERR_MAX_LIMIT;
1354 /* some blocks require a reversed field vector layout */
1355 if (hw->blk[params->blk].es.reverse)
1356 idx = fv_words - params->es_cnt - 1;
1358 idx = params->es_cnt;
1360 params->es[idx].prot_id = raw->info.xtrct.prot_id;
1361 params->es[idx].off = off;
1363 off += ICE_FLOW_FV_EXTRACT_SZ;
1371 * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1372 * @hw: pointer to the HW struct
1373 * @params: information about the flow to be processed
1375 * This function iterates through all matched fields in the given segments, and
1376 * creates an extraction sequence for the fields.
1378 static enum ice_status
1379 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1380 struct ice_flow_prof_params *params)
1382 enum ice_status status = ICE_SUCCESS;
1385 /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1388 if (params->blk == ICE_BLK_ACL) {
1389 status = ice_flow_xtract_pkt_flags(hw, params,
1390 ICE_RX_MDID_PKT_FLAGS_15_0);
1395 for (i = 0; i < params->prof->segs_cnt; i++) {
1396 u64 match = params->prof->segs[i].match;
1397 enum ice_flow_field j;
1399 ice_for_each_set_bit(j, (ice_bitmap_t *)&match,
1400 ICE_FLOW_FIELD_IDX_MAX) {
1401 status = ice_flow_xtract_fld(hw, params, i, j, match);
1404 ice_clear_bit(j, (ice_bitmap_t *)&match);
1407 /* Process raw matching bytes */
1408 status = ice_flow_xtract_raws(hw, params, i);
1417 * ice_flow_sel_acl_scen - returns the specific scenario
1418 * @hw: pointer to the hardware structure
1419 * @params: information about the flow to be processed
1421 * This function will return the specific scenario based on the
1422 * params passed to it
1424 static enum ice_status
1425 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1427 /* Find the best-fit scenario for the provided match width */
1428 struct ice_acl_scen *cand_scen = NULL, *scen;
1431 return ICE_ERR_DOES_NOT_EXIST;
1433 /* Loop through each scenario and match against the scenario width
1434 * to select the specific scenario
1436 LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1437 if (scen->eff_width >= params->entry_length &&
1438 (!cand_scen || cand_scen->eff_width > scen->eff_width))
1441 return ICE_ERR_DOES_NOT_EXIST;
1443 params->prof->cfg.scen = cand_scen;
1449 * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1450 * @params: information about the flow to be processed
1452 static enum ice_status
1453 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1455 u16 index, i, range_idx = 0;
1457 index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1459 for (i = 0; i < params->prof->segs_cnt; i++) {
1460 struct ice_flow_seg_info *seg = ¶ms->prof->segs[i];
1463 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
1464 ICE_FLOW_FIELD_IDX_MAX) {
1465 struct ice_flow_fld_info *fld = &seg->fields[j];
1467 fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1469 if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1470 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1472 /* Range checking only supported for single
1475 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1477 BITS_PER_BYTE * 2) > 1)
1478 return ICE_ERR_PARAM;
1480 /* Ranges must define low and high values */
1481 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1482 fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1483 return ICE_ERR_PARAM;
1485 fld->entry.val = range_idx++;
1487 /* Store adjusted byte-length of field for later
1488 * use, taking into account potential
1489 * non-byte-aligned displacement
1491 fld->entry.last = DIVIDE_AND_ROUND_UP
1492 (ice_flds_info[j].size +
1493 (fld->xtrct.disp % BITS_PER_BYTE),
1495 fld->entry.val = index;
1496 index += fld->entry.last;
1500 for (j = 0; j < seg->raws_cnt; j++) {
1501 struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1503 raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1504 raw->info.entry.val = index;
1505 raw->info.entry.last = raw->info.src.last;
1506 index += raw->info.entry.last;
1510 /* Currently only support using the byte selection base, which only
1511 * allows for an effective entry size of 30 bytes. Reject anything
1514 if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1515 return ICE_ERR_PARAM;
1517 /* Only 8 range checkers per profile, reject anything trying to use
1520 if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1521 return ICE_ERR_PARAM;
1523 /* Store # bytes required for entry for later use */
1524 params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1530 * ice_flow_proc_segs - process all packet segments associated with a profile
1531 * @hw: pointer to the HW struct
1532 * @params: information about the flow to be processed
1534 static enum ice_status
1535 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1537 enum ice_status status;
1539 status = ice_flow_proc_seg_hdrs(params);
1543 status = ice_flow_create_xtrct_seq(hw, params);
1547 switch (params->blk) {
1550 status = ICE_SUCCESS;
1553 status = ice_flow_acl_def_entry_frmt(params);
1556 status = ice_flow_sel_acl_scen(hw, params);
1561 return ICE_ERR_NOT_IMPL;
1567 #define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001
1568 #define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002
1569 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004
1572 * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1573 * @hw: pointer to the HW struct
1574 * @blk: classification stage
1575 * @dir: flow direction
1576 * @segs: array of one or more packet segments that describe the flow
1577 * @segs_cnt: number of packet segments provided
1578 * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1579 * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1581 static struct ice_flow_prof *
1582 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1583 enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1584 u8 segs_cnt, u16 vsi_handle, u32 conds)
1586 struct ice_flow_prof *p, *prof = NULL;
1588 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1589 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1590 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1591 segs_cnt && segs_cnt == p->segs_cnt) {
1594 /* Check for profile-VSI association if specified */
1595 if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1596 ice_is_vsi_valid(hw, vsi_handle) &&
1597 !ice_is_bit_set(p->vsis, vsi_handle))
1600 /* Protocol headers must be checked. Matched fields are
1601 * checked if specified.
1603 for (i = 0; i < segs_cnt; i++)
1604 if (segs[i].hdrs != p->segs[i].hdrs ||
1605 ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1606 segs[i].match != p->segs[i].match))
1609 /* A match is found if all segments are matched */
1610 if (i == segs_cnt) {
1615 ice_release_lock(&hw->fl_profs_locks[blk]);
1621 * ice_flow_find_prof - Look up a profile matching headers and matched fields
1622 * @hw: pointer to the HW struct
1623 * @blk: classification stage
1624 * @dir: flow direction
1625 * @segs: array of one or more packet segments that describe the flow
1626 * @segs_cnt: number of packet segments provided
1629 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1630 struct ice_flow_seg_info *segs, u8 segs_cnt)
1632 struct ice_flow_prof *p;
1634 p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1635 ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1637 return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1641 * ice_flow_find_prof_id - Look up a profile with given profile ID
1642 * @hw: pointer to the HW struct
1643 * @blk: classification stage
1644 * @prof_id: unique ID to identify this flow profile
1646 static struct ice_flow_prof *
1647 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1649 struct ice_flow_prof *p;
1651 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1652 if (p->id == prof_id)
1659 * ice_dealloc_flow_entry - Deallocate flow entry memory
1660 * @hw: pointer to the HW struct
1661 * @entry: flow entry to be removed
1664 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1670 ice_free(hw, entry->entry);
1672 if (entry->range_buf) {
1673 ice_free(hw, entry->range_buf);
1674 entry->range_buf = NULL;
1678 ice_free(hw, entry->acts);
1680 entry->acts_cnt = 0;
1683 ice_free(hw, entry);
1687 * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
1688 * @hw: pointer to the HW struct
1689 * @blk: classification stage
1690 * @prof_id: the profile ID handle
1691 * @hw_prof_id: pointer to variable to receive the HW profile ID
1694 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
1697 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
1698 struct ice_prof_map *map;
1700 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
1701 map = ice_search_prof_id(hw, blk, prof_id);
1703 *hw_prof_id = map->prof_id;
1704 status = ICE_SUCCESS;
1706 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
1710 #define ICE_ACL_INVALID_SCEN 0x3f
1713 * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
1714 * @hw: pointer to the hardware structure
1715 * @prof: pointer to flow profile
1716 * @buf: destination buffer function writes partial extraction sequence to
1718 * returns ICE_SUCCESS if no PF is associated to the given profile
1719 * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
1720 * returns other error code for real error
1722 static enum ice_status
1723 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1724 struct ice_aqc_acl_prof_generic_frmt *buf)
1726 enum ice_status status;
1729 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1733 status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1737 /* If all PF's associated scenarios are all 0 or all
1738 * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1739 * not been configured yet.
1741 if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1742 buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1743 buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1744 buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1747 if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1748 buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1749 buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1750 buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1751 buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1752 buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1753 buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1754 buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1757 return ICE_ERR_IN_USE;
1761 * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
1762 * @hw: pointer to the hardware structure
1763 * @acts: array of actions to be performed on a match
1764 * @acts_cnt: number of actions
1766 static enum ice_status
1767 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1772 for (i = 0; i < acts_cnt; i++) {
1773 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1774 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1775 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1776 struct ice_acl_cntrs cntrs;
1777 enum ice_status status;
1779 cntrs.bank = 0; /* Only bank0 for the moment */
1781 LE16_TO_CPU(acts[i].data.acl_act.value);
1783 LE16_TO_CPU(acts[i].data.acl_act.value);
1785 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1786 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1788 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1790 status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1799 * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
1800 * @hw: pointer to the hardware structure
1801 * @prof: pointer to flow profile
1803 * Disassociate the scenario from the profile for the PF of the VSI.
1805 static enum ice_status
1806 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1808 struct ice_aqc_acl_prof_generic_frmt buf;
1809 enum ice_status status = ICE_SUCCESS;
1812 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1814 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1818 status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1822 /* Clear scenario for this PF */
1823 buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1824 status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
1830 * ice_flow_rem_entry_sync - Remove a flow entry
1831 * @hw: pointer to the HW struct
1832 * @blk: classification stage
1833 * @entry: flow entry to be removed
1835 static enum ice_status
1836 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1837 struct ice_flow_entry *entry)
1840 return ICE_ERR_BAD_PTR;
1842 if (blk == ICE_BLK_ACL) {
1843 enum ice_status status;
1846 return ICE_ERR_BAD_PTR;
1848 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1849 entry->scen_entry_idx);
1853 /* Checks if we need to release an ACL counter. */
1854 if (entry->acts_cnt && entry->acts)
1855 ice_flow_acl_free_act_cntr(hw, entry->acts,
1859 LIST_DEL(&entry->l_entry);
1861 ice_dealloc_flow_entry(hw, entry);
1867 * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1868 * @hw: pointer to the HW struct
1869 * @blk: classification stage
1870 * @dir: flow direction
1871 * @prof_id: unique ID to identify this flow profile
1872 * @segs: array of one or more packet segments that describe the flow
1873 * @segs_cnt: number of packet segments provided
1874 * @acts: array of default actions
1875 * @acts_cnt: number of default actions
1876 * @prof: stores the returned flow profile added
1878 * Assumption: the caller has acquired the lock to the profile list
1880 static enum ice_status
1881 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1882 enum ice_flow_dir dir, u64 prof_id,
1883 struct ice_flow_seg_info *segs, u8 segs_cnt,
1884 struct ice_flow_action *acts, u8 acts_cnt,
1885 struct ice_flow_prof **prof)
1887 struct ice_flow_prof_params *params;
1888 enum ice_status status;
1891 if (!prof || (acts_cnt && !acts))
1892 return ICE_ERR_BAD_PTR;
1894 params = (struct ice_flow_prof_params *)ice_malloc(hw, sizeof(*params));
1896 return ICE_ERR_NO_MEMORY;
1898 params->prof = (struct ice_flow_prof *)
1899 ice_malloc(hw, sizeof(*params->prof));
1900 if (!params->prof) {
1901 status = ICE_ERR_NO_MEMORY;
1905 /* initialize extraction sequence to all invalid (0xff) */
1906 for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1907 params->es[i].prot_id = ICE_PROT_INVALID;
1908 params->es[i].off = ICE_FV_OFFSET_INVAL;
1912 params->prof->id = prof_id;
1913 params->prof->dir = dir;
1914 params->prof->segs_cnt = segs_cnt;
1916 /* Make a copy of the segments that need to be persistent in the flow
1919 for (i = 0; i < segs_cnt; i++)
1920 ice_memcpy(¶ms->prof->segs[i], &segs[i], sizeof(*segs),
1921 ICE_NONDMA_TO_NONDMA);
1923 /* Make a copy of the actions that need to be persistent in the flow
1927 params->prof->acts = (struct ice_flow_action *)
1928 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
1929 ICE_NONDMA_TO_NONDMA);
1931 if (!params->prof->acts) {
1932 status = ICE_ERR_NO_MEMORY;
1937 status = ice_flow_proc_segs(hw, params);
1939 ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n");
1943 /* Add a HW profile for this flow profile */
1944 status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
1945 params->attr, params->attr_cnt, params->es,
1948 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
1952 INIT_LIST_HEAD(¶ms->prof->entries);
1953 ice_init_lock(¶ms->prof->entries_lock);
1954 *prof = params->prof;
1958 if (params->prof->acts)
1959 ice_free(hw, params->prof->acts);
1960 ice_free(hw, params->prof);
1963 ice_free(hw, params);
1969 * ice_flow_rem_prof_sync - remove a flow profile
1970 * @hw: pointer to the hardware structure
1971 * @blk: classification stage
1972 * @prof: pointer to flow profile to remove
1974 * Assumption: the caller has acquired the lock to the profile list
1976 static enum ice_status
1977 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
1978 struct ice_flow_prof *prof)
1980 enum ice_status status;
1982 /* Remove all remaining flow entries before removing the flow profile */
1983 if (!LIST_EMPTY(&prof->entries)) {
1984 struct ice_flow_entry *e, *t;
1986 ice_acquire_lock(&prof->entries_lock);
1988 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
1990 status = ice_flow_rem_entry_sync(hw, blk, e);
1995 ice_release_lock(&prof->entries_lock);
1998 if (blk == ICE_BLK_ACL) {
1999 struct ice_aqc_acl_profile_ranges query_rng_buf;
2000 struct ice_aqc_acl_prof_generic_frmt buf;
2003 /* Disassociate the scenario from the profile for the PF */
2004 status = ice_flow_acl_disassoc_scen(hw, prof);
2008 /* Clear the range-checker if the profile ID is no longer
2011 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2012 if (status && status != ICE_ERR_IN_USE) {
2014 } else if (!status) {
2015 /* Clear the range-checker value for profile ID */
2016 ice_memset(&query_rng_buf, 0,
2017 sizeof(struct ice_aqc_acl_profile_ranges),
2020 status = ice_flow_get_hw_prof(hw, blk, prof->id,
2025 status = ice_prog_acl_prof_ranges(hw, prof_id,
2026 &query_rng_buf, NULL);
2032 /* Remove all hardware profiles associated with this flow profile */
2033 status = ice_rem_prof(hw, blk, prof->id);
2035 LIST_DEL(&prof->l_entry);
2036 ice_destroy_lock(&prof->entries_lock);
2038 ice_free(hw, prof->acts);
2046 * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
2047 * @buf: Destination buffer function writes partial xtrct sequence to
2048 * @info: Info about field
2051 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
2052 struct ice_flow_fld_info *info)
2057 src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
2058 info->xtrct.disp / BITS_PER_BYTE;
2059 dst = info->entry.val;
2060 for (i = 0; i < info->entry.last; i++)
2061 /* HW stores field vector words in LE, convert words back to BE
2062 * so constructed entries will end up in network order
2064 buf->byte_selection[dst++] = src++ ^ 1;
2068 * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
2069 * @hw: pointer to the hardware structure
2070 * @prof: pointer to flow profile
2072 static enum ice_status
2073 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
2075 struct ice_aqc_acl_prof_generic_frmt buf;
2076 struct ice_flow_fld_info *info;
2077 enum ice_status status;
2081 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2083 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2087 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2088 if (status && status != ICE_ERR_IN_USE)
2092 /* Program the profile dependent configuration. This is done
2093 * only once regardless of the number of PFs using that profile
2095 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2097 for (i = 0; i < prof->segs_cnt; i++) {
2098 struct ice_flow_seg_info *seg = &prof->segs[i];
2101 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2102 ICE_FLOW_FIELD_IDX_MAX) {
2103 info = &seg->fields[j];
2105 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2106 buf.word_selection[info->entry.val] =
2109 ice_flow_acl_set_xtrct_seq_fld(&buf,
2113 for (j = 0; j < seg->raws_cnt; j++) {
2114 info = &seg->raws[j].info;
2115 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
2119 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
2120 ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
2124 /* Update the current PF */
2125 buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
2126 status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
2132 * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
2133 * @hw: pointer to the hardware structure
2134 * @blk: classification stage
2135 * @vsi_handle: software VSI handle
2136 * @vsig: target VSI group
2138 * Assumption: the caller has already verified that the VSI to
2139 * be added has the same characteristics as the VSIG and will
2140 * thereby have access to all resources added to that VSIG.
2143 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
2146 enum ice_status status;
2148 if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
2149 return ICE_ERR_PARAM;
2151 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2152 status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
2154 ice_release_lock(&hw->fl_profs_locks[blk]);
2160 * ice_flow_assoc_prof - associate a VSI with a flow profile
2161 * @hw: pointer to the hardware structure
2162 * @blk: classification stage
2163 * @prof: pointer to flow profile
2164 * @vsi_handle: software VSI handle
2166 * Assumption: the caller has acquired the lock to the profile list
2167 * and the software VSI handle has been validated
2170 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
2171 struct ice_flow_prof *prof, u16 vsi_handle)
2173 enum ice_status status = ICE_SUCCESS;
2175 if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
2176 if (blk == ICE_BLK_ACL) {
2177 status = ice_flow_acl_set_xtrct_seq(hw, prof);
2181 status = ice_add_prof_id_flow(hw, blk,
2182 ice_get_hw_vsi_num(hw,
2186 ice_set_bit(vsi_handle, prof->vsis);
2188 ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n",
2196 * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
2197 * @hw: pointer to the hardware structure
2198 * @blk: classification stage
2199 * @prof: pointer to flow profile
2200 * @vsi_handle: software VSI handle
2202 * Assumption: the caller has acquired the lock to the profile list
2203 * and the software VSI handle has been validated
2205 static enum ice_status
2206 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
2207 struct ice_flow_prof *prof, u16 vsi_handle)
2209 enum ice_status status = ICE_SUCCESS;
2211 if (ice_is_bit_set(prof->vsis, vsi_handle)) {
2212 status = ice_rem_prof_id_flow(hw, blk,
2213 ice_get_hw_vsi_num(hw,
2217 ice_clear_bit(vsi_handle, prof->vsis);
2219 ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n",
2227 * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2228 * @hw: pointer to the HW struct
2229 * @blk: classification stage
2230 * @dir: flow direction
2231 * @prof_id: unique ID to identify this flow profile
2232 * @segs: array of one or more packet segments that describe the flow
2233 * @segs_cnt: number of packet segments provided
2234 * @acts: array of default actions
2235 * @acts_cnt: number of default actions
2236 * @prof: stores the returned flow profile added
2239 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2240 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2241 struct ice_flow_action *acts, u8 acts_cnt,
2242 struct ice_flow_prof **prof)
2244 enum ice_status status;
2246 if (segs_cnt > ICE_FLOW_SEG_MAX)
2247 return ICE_ERR_MAX_LIMIT;
2250 return ICE_ERR_PARAM;
2253 return ICE_ERR_BAD_PTR;
2255 status = ice_flow_val_hdrs(segs, segs_cnt);
2259 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2261 status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2262 acts, acts_cnt, prof);
2264 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2266 ice_release_lock(&hw->fl_profs_locks[blk]);
2272 * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2273 * @hw: pointer to the HW struct
2274 * @blk: the block for which the flow profile is to be removed
2275 * @prof_id: unique ID of the flow profile to be removed
2278 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2280 struct ice_flow_prof *prof;
2281 enum ice_status status;
2283 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2285 prof = ice_flow_find_prof_id(hw, blk, prof_id);
2287 status = ICE_ERR_DOES_NOT_EXIST;
2291 /* prof becomes invalid after the call */
2292 status = ice_flow_rem_prof_sync(hw, blk, prof);
2295 ice_release_lock(&hw->fl_profs_locks[blk]);
2301 * ice_flow_find_entry - look for a flow entry using its unique ID
2302 * @hw: pointer to the HW struct
2303 * @blk: classification stage
2304 * @entry_id: unique ID to identify this flow entry
2306 * This function looks for the flow entry with the specified unique ID in all
2307 * flow profiles of the specified classification stage. If the entry is found,
2308 * and it returns the handle to the flow entry. Otherwise, it returns
2309 * ICE_FLOW_ENTRY_ID_INVAL.
2311 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2313 struct ice_flow_entry *found = NULL;
2314 struct ice_flow_prof *p;
2316 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2318 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2319 struct ice_flow_entry *e;
2321 ice_acquire_lock(&p->entries_lock);
2322 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2323 if (e->id == entry_id) {
2327 ice_release_lock(&p->entries_lock);
2333 ice_release_lock(&hw->fl_profs_locks[blk]);
2335 return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2339 * ice_flow_acl_check_actions - Checks the ACL rule's actions
2340 * @hw: pointer to the hardware structure
2341 * @acts: array of actions to be performed on a match
2342 * @acts_cnt: number of actions
2343 * @cnt_alloc: indicates if an ACL counter has been allocated.
2345 static enum ice_status
2346 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2347 u8 acts_cnt, bool *cnt_alloc)
2349 ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2352 ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2355 if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2356 return ICE_ERR_OUT_OF_RANGE;
2358 for (i = 0; i < acts_cnt; i++) {
2359 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2360 acts[i].type != ICE_FLOW_ACT_DROP &&
2361 acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2362 acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2365 /* If the caller want to add two actions of the same type, then
2366 * it is considered invalid configuration.
2368 if (ice_test_and_set_bit(acts[i].type, dup_check))
2369 return ICE_ERR_PARAM;
2372 /* Checks if ACL counters are needed. */
2373 for (i = 0; i < acts_cnt; i++) {
2374 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2375 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2376 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2377 struct ice_acl_cntrs cntrs;
2378 enum ice_status status;
2381 cntrs.bank = 0; /* Only bank0 for the moment */
2383 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2384 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2386 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2388 status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2391 /* Counter index within the bank */
2392 acts[i].data.acl_act.value =
2393 CPU_TO_LE16(cntrs.first_cntr);
2402 * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2403 * @fld: number of the given field
2404 * @info: info about field
2405 * @range_buf: range checker configuration buffer
2406 * @data: pointer to a data buffer containing flow entry's match values/masks
2407 * @range: Input/output param indicating which range checkers are being used
2410 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2411 struct ice_aqc_acl_profile_ranges *range_buf,
2412 u8 *data, u8 *range)
2416 /* If not specified, default mask is all bits in field */
2417 new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2418 BIT(ice_flds_info[fld].size) - 1 :
2419 (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2421 /* If the mask is 0, then we don't need to worry about this input
2422 * range checker value.
2426 (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2428 (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2429 u8 range_idx = info->entry.val;
2431 range_buf->checker_cfg[range_idx].low_boundary =
2432 CPU_TO_BE16(new_low);
2433 range_buf->checker_cfg[range_idx].high_boundary =
2434 CPU_TO_BE16(new_high);
2435 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2437 /* Indicate which range checker is being used */
2438 *range |= BIT(range_idx);
2443 * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2444 * @fld: number of the given field
2445 * @info: info about the field
2446 * @buf: buffer containing the entry
2447 * @dontcare: buffer containing don't care mask for entry
2448 * @data: pointer to a data buffer containing flow entry's match values/masks
2451 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2452 u8 *dontcare, u8 *data)
2454 u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2455 bool use_mask = false;
2458 src = info->src.val;
2459 mask = info->src.mask;
2460 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2461 disp = info->xtrct.disp % BITS_PER_BYTE;
2463 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2466 for (k = 0; k < info->entry.last; k++, dst++) {
2467 /* Add overflow bits from previous byte */
2468 buf[dst] = (tmp_s & 0xff00) >> 8;
2470 /* If mask is not valid, tmp_m is always zero, so just setting
2471 * dontcare to 0 (no masked bits). If mask is valid, pulls in
2472 * overflow bits of mask from prev byte
2474 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2476 /* If there is displacement, last byte will only contain
2477 * displaced data, but there is no more data to read from user
2478 * buffer, so skip so as not to potentially read beyond end of
2481 if (!disp || k < info->entry.last - 1) {
2482 /* Store shifted data to use in next byte */
2483 tmp_s = data[src++] << disp;
2485 /* Add current (shifted) byte */
2486 buf[dst] |= tmp_s & 0xff;
2488 /* Handle mask if valid */
2490 tmp_m = (~data[mask++] & 0xff) << disp;
2491 dontcare[dst] |= tmp_m & 0xff;
2496 /* Fill in don't care bits at beginning of field */
2498 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2499 for (k = 0; k < disp; k++)
2500 dontcare[dst] |= BIT(k);
2503 end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2505 /* Fill in don't care bits at end of field */
2507 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2508 info->entry.last - 1;
2509 for (k = end_disp; k < BITS_PER_BYTE; k++)
2510 dontcare[dst] |= BIT(k);
2515 * ice_flow_acl_frmt_entry - Format ACL entry
2516 * @hw: pointer to the hardware structure
2517 * @prof: pointer to flow profile
2518 * @e: pointer to the flow entry
2519 * @data: pointer to a data buffer containing flow entry's match values/masks
2520 * @acts: array of actions to be performed on a match
2521 * @acts_cnt: number of actions
2523 * Formats the key (and key_inverse) to be matched from the data passed in,
2524 * along with data from the flow profile. This key/key_inverse pair makes up
2525 * the 'entry' for an ACL flow entry.
2527 static enum ice_status
2528 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2529 struct ice_flow_entry *e, u8 *data,
2530 struct ice_flow_action *acts, u8 acts_cnt)
2532 u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2533 struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2534 enum ice_status status;
2539 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2543 /* Format the result action */
2545 status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2549 status = ICE_ERR_NO_MEMORY;
2551 e->acts = (struct ice_flow_action *)
2552 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2553 ICE_NONDMA_TO_NONDMA);
2557 e->acts_cnt = acts_cnt;
2559 /* Format the matching data */
2560 buf_sz = prof->cfg.scen->width;
2561 buf = (u8 *)ice_malloc(hw, buf_sz);
2565 dontcare = (u8 *)ice_malloc(hw, buf_sz);
2569 /* 'key' buffer will store both key and key_inverse, so must be twice
2572 key = (u8 *)ice_malloc(hw, buf_sz * 2);
2576 range_buf = (struct ice_aqc_acl_profile_ranges *)
2577 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2581 /* Set don't care mask to all 1's to start, will zero out used bytes */
2582 ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2584 for (i = 0; i < prof->segs_cnt; i++) {
2585 struct ice_flow_seg_info *seg = &prof->segs[i];
2588 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2589 ICE_FLOW_FIELD_IDX_MAX) {
2590 struct ice_flow_fld_info *info = &seg->fields[j];
2592 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2593 ice_flow_acl_frmt_entry_range(j, info,
2597 ice_flow_acl_frmt_entry_fld(j, info, buf,
2601 for (j = 0; j < seg->raws_cnt; j++) {
2602 struct ice_flow_fld_info *info = &seg->raws[j].info;
2603 u16 dst, src, mask, k;
2604 bool use_mask = false;
2606 src = info->src.val;
2607 dst = info->entry.val -
2608 ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2609 mask = info->src.mask;
2611 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2614 for (k = 0; k < info->entry.last; k++, dst++) {
2615 buf[dst] = data[src++];
2617 dontcare[dst] = ~data[mask++];
2624 buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2625 dontcare[prof->cfg.scen->pid_idx] = 0;
2627 /* Format the buffer for direction flags */
2628 dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2630 if (prof->dir == ICE_FLOW_RX)
2631 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2634 buf[prof->cfg.scen->rng_chk_idx] = range;
2635 /* Mark any unused range checkers as don't care */
2636 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2637 e->range_buf = range_buf;
2639 ice_free(hw, range_buf);
2642 status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2648 e->entry_sz = buf_sz * 2;
2655 ice_free(hw, dontcare);
2660 if (status && range_buf) {
2661 ice_free(hw, range_buf);
2662 e->range_buf = NULL;
2665 if (status && e->acts) {
2666 ice_free(hw, e->acts);
2671 if (status && cnt_alloc)
2672 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2678 * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2679 * the compared data.
2680 * @prof: pointer to flow profile
2681 * @e: pointer to the comparing flow entry
2682 * @do_chg_action: decide if we want to change the ACL action
2683 * @do_add_entry: decide if we want to add the new ACL entry
2684 * @do_rem_entry: decide if we want to remove the current ACL entry
2686 * Find an ACL scenario entry that matches the compared data. In the same time,
2687 * this function also figure out:
2688 * a/ If we want to change the ACL action
2689 * b/ If we want to add the new ACL entry
2690 * c/ If we want to remove the current ACL entry
2692 static struct ice_flow_entry *
2693 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2694 struct ice_flow_entry *e, bool *do_chg_action,
2695 bool *do_add_entry, bool *do_rem_entry)
2697 struct ice_flow_entry *p, *return_entry = NULL;
2701 * a/ There exists an entry with same matching data, but different
2702 * priority, then we remove this existing ACL entry. Then, we
2703 * will add the new entry to the ACL scenario.
2704 * b/ There exists an entry with same matching data, priority, and
2705 * result action, then we do nothing
2706 * c/ There exists an entry with same matching data, priority, but
2707 * different, action, then do only change the action's entry.
2708 * d/ Else, we add this new entry to the ACL scenario.
2710 *do_chg_action = false;
2711 *do_add_entry = true;
2712 *do_rem_entry = false;
2713 LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2714 if (memcmp(p->entry, e->entry, p->entry_sz))
2717 /* From this point, we have the same matching_data. */
2718 *do_add_entry = false;
2721 if (p->priority != e->priority) {
2722 /* matching data && !priority */
2723 *do_add_entry = true;
2724 *do_rem_entry = true;
2728 /* From this point, we will have matching_data && priority */
2729 if (p->acts_cnt != e->acts_cnt)
2730 *do_chg_action = true;
2731 for (i = 0; i < p->acts_cnt; i++) {
2732 bool found_not_match = false;
2734 for (j = 0; j < e->acts_cnt; j++)
2735 if (memcmp(&p->acts[i], &e->acts[j],
2736 sizeof(struct ice_flow_action))) {
2737 found_not_match = true;
2741 if (found_not_match) {
2742 *do_chg_action = true;
2747 /* (do_chg_action = true) means :
2748 * matching_data && priority && !result_action
2749 * (do_chg_action = false) means :
2750 * matching_data && priority && result_action
2755 return return_entry;
2759 * ice_flow_acl_convert_to_acl_prio - Convert to ACL priority
2762 static enum ice_acl_entry_prio
2763 ice_flow_acl_convert_to_acl_prio(enum ice_flow_priority p)
2765 enum ice_acl_entry_prio acl_prio;
2768 case ICE_FLOW_PRIO_LOW:
2769 acl_prio = ICE_ACL_PRIO_LOW;
2771 case ICE_FLOW_PRIO_NORMAL:
2772 acl_prio = ICE_ACL_PRIO_NORMAL;
2774 case ICE_FLOW_PRIO_HIGH:
2775 acl_prio = ICE_ACL_PRIO_HIGH;
2778 acl_prio = ICE_ACL_PRIO_NORMAL;
2786 * ice_flow_acl_union_rng_chk - Perform union operation between two
2787 * range-range checker buffers
2788 * @dst_buf: pointer to destination range checker buffer
2789 * @src_buf: pointer to source range checker buffer
2791 * For this function, we do the union between dst_buf and src_buf
2792 * range checker buffer, and we will save the result back to dst_buf
2794 static enum ice_status
2795 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2796 struct ice_aqc_acl_profile_ranges *src_buf)
2800 if (!dst_buf || !src_buf)
2801 return ICE_ERR_BAD_PTR;
2803 for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2804 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2805 bool will_populate = false;
2807 in_data = &src_buf->checker_cfg[i];
2812 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2813 cfg_data = &dst_buf->checker_cfg[j];
2815 if (!cfg_data->mask ||
2816 !memcmp(cfg_data, in_data,
2817 sizeof(struct ice_acl_rng_data))) {
2818 will_populate = true;
2823 if (will_populate) {
2824 ice_memcpy(cfg_data, in_data,
2825 sizeof(struct ice_acl_rng_data),
2826 ICE_NONDMA_TO_NONDMA);
2828 /* No available slot left to program range checker */
2829 return ICE_ERR_MAX_LIMIT;
2837 * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2838 * @hw: pointer to the hardware structure
2839 * @prof: pointer to flow profile
2840 * @entry: double pointer to the flow entry
2842 * For this function, we will look at the current added entries in the
2843 * corresponding ACL scenario. Then, we will perform matching logic to
2844 * see if we want to add/modify/do nothing with this new entry.
2846 static enum ice_status
2847 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2848 struct ice_flow_entry **entry)
2850 bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2851 struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2852 struct ice_acl_act_entry *acts = NULL;
2853 struct ice_flow_entry *exist;
2854 enum ice_status status = ICE_SUCCESS;
2855 struct ice_flow_entry *e;
2858 if (!entry || !(*entry) || !prof)
2859 return ICE_ERR_BAD_PTR;
2863 do_chg_rng_chk = false;
2867 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2872 /* Query the current range-checker value in FW */
2873 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2877 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2878 sizeof(struct ice_aqc_acl_profile_ranges),
2879 ICE_NONDMA_TO_NONDMA);
2881 /* Generate the new range-checker value */
2882 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2886 /* Reconfigure the range check if the buffer is changed. */
2887 do_chg_rng_chk = false;
2888 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2889 sizeof(struct ice_aqc_acl_profile_ranges))) {
2890 status = ice_prog_acl_prof_ranges(hw, prof_id,
2891 &cfg_rng_buf, NULL);
2895 do_chg_rng_chk = true;
2899 /* Figure out if we want to (change the ACL action) and/or
2900 * (Add the new ACL entry) and/or (Remove the current ACL entry)
2902 exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
2903 &do_add_entry, &do_rem_entry);
2905 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
2910 /* Prepare the result action buffer */
2911 acts = (struct ice_acl_act_entry *)
2912 ice_calloc(hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
2914 return ICE_ERR_NO_MEMORY;
2916 for (i = 0; i < e->acts_cnt; i++)
2917 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
2918 sizeof(struct ice_acl_act_entry),
2919 ICE_NONDMA_TO_NONDMA);
2922 enum ice_acl_entry_prio prio;
2926 keys = (u8 *)e->entry;
2927 inverts = keys + (e->entry_sz / 2);
2928 prio = ice_flow_acl_convert_to_acl_prio(e->priority);
2930 status = ice_acl_add_entry(hw, prof->cfg.scen, prio, keys,
2931 inverts, acts, e->acts_cnt,
2936 e->scen_entry_idx = entry_idx;
2937 LIST_ADD(&e->l_entry, &prof->entries);
2939 if (do_chg_action) {
2940 /* For the action memory info, update the SW's copy of
2941 * exist entry with e's action memory info
2943 ice_free(hw, exist->acts);
2944 exist->acts_cnt = e->acts_cnt;
2945 exist->acts = (struct ice_flow_action *)
2946 ice_calloc(hw, exist->acts_cnt,
2947 sizeof(struct ice_flow_action));
2949 status = ICE_ERR_NO_MEMORY;
2953 ice_memcpy(exist->acts, e->acts,
2954 sizeof(struct ice_flow_action) * e->acts_cnt,
2955 ICE_NONDMA_TO_NONDMA);
2957 status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
2959 exist->scen_entry_idx);
2964 if (do_chg_rng_chk) {
2965 /* In this case, we want to update the range checker
2966 * information of the exist entry
2968 status = ice_flow_acl_union_rng_chk(exist->range_buf,
2974 /* As we don't add the new entry to our SW DB, deallocate its
2975 * memories, and return the exist entry to the caller
2977 ice_dealloc_flow_entry(hw, e);
2987 * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
2988 * @hw: pointer to the hardware structure
2989 * @prof: pointer to flow profile
2990 * @e: double pointer to the flow entry
2992 static enum ice_status
2993 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2994 struct ice_flow_entry **e)
2996 enum ice_status status;
2998 ice_acquire_lock(&prof->entries_lock);
2999 status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
3000 ice_release_lock(&prof->entries_lock);
3006 * ice_flow_add_entry - Add a flow entry
3007 * @hw: pointer to the HW struct
3008 * @blk: classification stage
3009 * @prof_id: ID of the profile to add a new flow entry to
3010 * @entry_id: unique ID to identify this flow entry
3011 * @vsi_handle: software VSI handle for the flow entry
3012 * @prio: priority of the flow entry
3013 * @data: pointer to a data buffer containing flow entry's match values/masks
3014 * @acts: arrays of actions to be performed on a match
3015 * @acts_cnt: number of actions
3016 * @entry_h: pointer to buffer that receives the new flow entry's handle
3019 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
3020 u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
3021 void *data, struct ice_flow_action *acts, u8 acts_cnt,
3024 struct ice_flow_entry *e = NULL;
3025 struct ice_flow_prof *prof;
3026 enum ice_status status = ICE_SUCCESS;
3028 /* ACL entries must indicate an action */
3029 if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
3030 return ICE_ERR_PARAM;
3032 /* No flow entry data is expected for RSS */
3033 if (!entry_h || (!data && blk != ICE_BLK_RSS))
3034 return ICE_ERR_BAD_PTR;
3036 if (!ice_is_vsi_valid(hw, vsi_handle))
3037 return ICE_ERR_PARAM;
3039 ice_acquire_lock(&hw->fl_profs_locks[blk]);
3041 prof = ice_flow_find_prof_id(hw, blk, prof_id);
3043 status = ICE_ERR_DOES_NOT_EXIST;
3045 /* Allocate memory for the entry being added and associate
3046 * the VSI to the found flow profile
3048 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
3050 status = ICE_ERR_NO_MEMORY;
3052 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3055 ice_release_lock(&hw->fl_profs_locks[blk]);
3060 e->vsi_handle = vsi_handle;
3069 /* ACL will handle the entry management */
3070 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
3075 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
3081 status = ICE_ERR_NOT_IMPL;
3085 if (blk != ICE_BLK_ACL) {
3086 /* ACL will handle the entry management */
3087 ice_acquire_lock(&prof->entries_lock);
3088 LIST_ADD(&e->l_entry, &prof->entries);
3089 ice_release_lock(&prof->entries_lock);
3092 *entry_h = ICE_FLOW_ENTRY_HNDL(e);
3097 ice_free(hw, e->entry);
3105 * ice_flow_rem_entry - Remove a flow entry
3106 * @hw: pointer to the HW struct
3107 * @blk: classification stage
3108 * @entry_h: handle to the flow entry to be removed
3110 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
3113 struct ice_flow_entry *entry;
3114 struct ice_flow_prof *prof;
3115 enum ice_status status = ICE_SUCCESS;
3117 if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
3118 return ICE_ERR_PARAM;
3120 entry = ICE_FLOW_ENTRY_PTR((unsigned long)entry_h);
3122 /* Retain the pointer to the flow profile as the entry will be freed */
3126 ice_acquire_lock(&prof->entries_lock);
3127 status = ice_flow_rem_entry_sync(hw, blk, entry);
3128 ice_release_lock(&prof->entries_lock);
3135 * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
3136 * @seg: packet segment the field being set belongs to
3137 * @fld: field to be set
3138 * @field_type: type of the field
3139 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3140 * entry's input buffer
3141 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3143 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3144 * entry's input buffer
3146 * This helper function stores information of a field being matched, including
3147 * the type of the field and the locations of the value to match, the mask, and
3148 * the upper-bound value in the start of the input buffer for a flow entry.
3149 * This function should only be used for fixed-size data structures.
3151 * This function also opportunistically determines the protocol headers to be
3152 * present based on the fields being set. Some fields cannot be used alone to
3153 * determine the protocol headers present. Sometimes, fields for particular
3154 * protocol headers are not matched. In those cases, the protocol headers
3155 * must be explicitly set.
3158 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3159 enum ice_flow_fld_match_type field_type, u16 val_loc,
3160 u16 mask_loc, u16 last_loc)
3162 u64 bit = BIT_ULL(fld);
3165 if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
3168 seg->fields[fld].type = field_type;
3169 seg->fields[fld].src.val = val_loc;
3170 seg->fields[fld].src.mask = mask_loc;
3171 seg->fields[fld].src.last = last_loc;
3173 ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
3177 * ice_flow_set_fld - specifies locations of field from entry's input buffer
3178 * @seg: packet segment the field being set belongs to
3179 * @fld: field to be set
3180 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3181 * entry's input buffer
3182 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3184 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3185 * entry's input buffer
3186 * @range: indicate if field being matched is to be in a range
3188 * This function specifies the locations, in the form of byte offsets from the
3189 * start of the input buffer for a flow entry, from where the value to match,
3190 * the mask value, and upper value can be extracted. These locations are then
3191 * stored in the flow profile. When adding a flow entry associated with the
3192 * flow profile, these locations will be used to quickly extract the values and
3193 * create the content of a match entry. This function should only be used for
3194 * fixed-size data structures.
3197 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3198 u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3200 enum ice_flow_fld_match_type t = range ?
3201 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3203 ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3207 * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3208 * @seg: packet segment the field being set belongs to
3209 * @fld: field to be set
3210 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3211 * entry's input buffer
3212 * @pref_loc: location of prefix value from entry's input buffer
3213 * @pref_sz: size of the location holding the prefix value
3215 * This function specifies the locations, in the form of byte offsets from the
3216 * start of the input buffer for a flow entry, from where the value to match
3217 * and the IPv4 prefix value can be extracted. These locations are then stored
3218 * in the flow profile. When adding flow entries to the associated flow profile,
3219 * these locations can be used to quickly extract the values to create the
3220 * content of a match entry. This function should only be used for fixed-size
3224 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3225 u16 val_loc, u16 pref_loc, u8 pref_sz)
3227 /* For this type of field, the "mask" location is for the prefix value's
3228 * location and the "last" location is for the size of the location of
3231 ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3232 pref_loc, (u16)pref_sz);
3236 * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3237 * @seg: packet segment the field being set belongs to
3238 * @off: offset of the raw field from the beginning of the segment in bytes
3239 * @len: length of the raw pattern to be matched
3240 * @val_loc: location of the value to match from entry's input buffer
3241 * @mask_loc: location of mask value from entry's input buffer
3243 * This function specifies the offset of the raw field to be match from the
3244 * beginning of the specified packet segment, and the locations, in the form of
3245 * byte offsets from the start of the input buffer for a flow entry, from where
3246 * the value to match and the mask value to be extracted. These locations are
3247 * then stored in the flow profile. When adding flow entries to the associated
3248 * flow profile, these locations can be used to quickly extract the values to
3249 * create the content of a match entry. This function should only be used for
3250 * fixed-size data structures.
3253 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3254 u16 val_loc, u16 mask_loc)
3256 if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3257 seg->raws[seg->raws_cnt].off = off;
3258 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3259 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3260 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3261 /* The "last" field is used to store the length of the field */
3262 seg->raws[seg->raws_cnt].info.src.last = len;
3265 /* Overflows of "raws" will be handled as an error condition later in
3266 * the flow when this information is processed.
3271 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3272 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
3274 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3275 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3277 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3278 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
3280 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3281 (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3282 ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3283 ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3286 * ice_flow_set_rss_seg_info - setup packet segments for RSS
3287 * @segs: pointer to the flow field segment(s)
3288 * @seg_cnt: segment count
3289 * @cfg: configure parameters
3291 * Helper function to extract fields from hash bitmap and use flow
3292 * header value to set flow field segment for further use in flow
3293 * profile entry or removal.
3295 static enum ice_status
3296 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
3297 const struct ice_rss_hash_cfg *cfg)
3299 struct ice_flow_seg_info *seg;
3303 /* set inner most segment */
3304 seg = &segs[seg_cnt - 1];
3306 ice_for_each_set_bit(i, (const ice_bitmap_t *)&cfg->hash_flds,
3307 ICE_FLOW_FIELD_IDX_MAX)
3308 ice_flow_set_fld(seg, (enum ice_flow_field)i,
3309 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
3310 ICE_FLOW_FLD_OFF_INVAL, false);
3312 ICE_FLOW_SET_HDRS(seg, cfg->addl_hdrs);
3314 /* set outer most header */
3315 if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4)
3316 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
3317 ICE_FLOW_SEG_HDR_IPV_OTHER;
3318 else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6)
3319 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
3320 ICE_FLOW_SEG_HDR_IPV_OTHER;
3322 if (seg->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3323 ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER)
3324 return ICE_ERR_PARAM;
3326 val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3327 if (val && !ice_is_pow2(val))
3330 val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3331 if (val && !ice_is_pow2(val))
3338 * ice_rem_vsi_rss_list - remove VSI from RSS list
3339 * @hw: pointer to the hardware structure
3340 * @vsi_handle: software VSI handle
3342 * Remove the VSI from all RSS configurations in the list.
3344 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3346 struct ice_rss_cfg *r, *tmp;
3348 if (LIST_EMPTY(&hw->rss_list_head))
3351 ice_acquire_lock(&hw->rss_locks);
3352 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3353 ice_rss_cfg, l_entry)
3354 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3355 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3356 LIST_DEL(&r->l_entry);
3359 ice_release_lock(&hw->rss_locks);
3363 * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3364 * @hw: pointer to the hardware structure
3365 * @vsi_handle: software VSI handle
3367 * This function will iterate through all flow profiles and disassociate
3368 * the VSI from that profile. If the flow profile has no VSIs it will
3371 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3373 const enum ice_block blk = ICE_BLK_RSS;
3374 struct ice_flow_prof *p, *t;
3375 enum ice_status status = ICE_SUCCESS;
3377 if (!ice_is_vsi_valid(hw, vsi_handle))
3378 return ICE_ERR_PARAM;
3380 if (LIST_EMPTY(&hw->fl_profs[blk]))
3383 ice_acquire_lock(&hw->rss_locks);
3384 LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3386 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3387 status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3391 if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3392 status = ice_flow_rem_prof(hw, blk, p->id);
3397 ice_release_lock(&hw->rss_locks);
3403 * ice_get_rss_hdr_type - get a RSS profile's header type
3404 * @prof: RSS flow profile
3406 static enum ice_rss_cfg_hdr_type
3407 ice_get_rss_hdr_type(struct ice_flow_prof *prof)
3409 enum ice_rss_cfg_hdr_type hdr_type = ICE_RSS_ANY_HEADERS;
3411 if (prof->segs_cnt == ICE_FLOW_SEG_SINGLE) {
3412 hdr_type = ICE_RSS_OUTER_HEADERS;
3413 } else if (prof->segs_cnt == ICE_FLOW_SEG_MAX) {
3414 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs == ICE_FLOW_SEG_HDR_NONE)
3415 hdr_type = ICE_RSS_INNER_HEADERS;
3416 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV4)
3417 hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV4;
3418 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV6)
3419 hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV6;
3426 * ice_rem_rss_list - remove RSS configuration from list
3427 * @hw: pointer to the hardware structure
3428 * @vsi_handle: software VSI handle
3429 * @prof: pointer to flow profile
3431 * Assumption: lock has already been acquired for RSS list
3434 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3436 enum ice_rss_cfg_hdr_type hdr_type;
3437 struct ice_rss_cfg *r, *tmp;
3439 /* Search for RSS hash fields associated to the VSI that match the
3440 * hash configurations associated to the flow profile. If found
3441 * remove from the RSS entry list of the VSI context and delete entry.
3443 hdr_type = ice_get_rss_hdr_type(prof);
3444 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3445 ice_rss_cfg, l_entry)
3446 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3447 r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3448 r->hash.hdr_type == hdr_type) {
3449 ice_clear_bit(vsi_handle, r->vsis);
3450 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3451 LIST_DEL(&r->l_entry);
3459 * ice_add_rss_list - add RSS configuration to list
3460 * @hw: pointer to the hardware structure
3461 * @vsi_handle: software VSI handle
3462 * @prof: pointer to flow profile
3464 * Assumption: lock has already been acquired for RSS list
3466 static enum ice_status
3467 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3469 enum ice_rss_cfg_hdr_type hdr_type;
3470 struct ice_rss_cfg *r, *rss_cfg;
3472 hdr_type = ice_get_rss_hdr_type(prof);
3473 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3474 ice_rss_cfg, l_entry)
3475 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3476 r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3477 r->hash.hdr_type == hdr_type) {
3478 ice_set_bit(vsi_handle, r->vsis);
3482 rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3484 return ICE_ERR_NO_MEMORY;
3486 rss_cfg->hash.hash_flds = prof->segs[prof->segs_cnt - 1].match;
3487 rss_cfg->hash.addl_hdrs = prof->segs[prof->segs_cnt - 1].hdrs;
3488 rss_cfg->hash.hdr_type = hdr_type;
3489 rss_cfg->hash.symm = prof->cfg.symm;
3490 ice_set_bit(vsi_handle, rss_cfg->vsis);
3492 LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3497 #define ICE_FLOW_PROF_HASH_S 0
3498 #define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3499 #define ICE_FLOW_PROF_HDR_S 32
3500 #define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3501 #define ICE_FLOW_PROF_ENCAP_S 62
3502 #define ICE_FLOW_PROF_ENCAP_M (0x3ULL << ICE_FLOW_PROF_ENCAP_S)
3504 /* Flow profile ID format:
3505 * [0:31] - Packet match fields
3506 * [32:61] - Protocol header
3507 * [62:63] - Encapsulation flag:
3510 * 2 for tunneled with outer ipv4
3511 * 3 for tunneled with outer ipv6
3513 #define ICE_FLOW_GEN_PROFID(hash, hdr, encap) \
3514 (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3515 (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3516 (((u64)(encap) << ICE_FLOW_PROF_ENCAP_S) & ICE_FLOW_PROF_ENCAP_M))
3519 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3521 u32 s = ((src % 4) << 3); /* byte shift */
3522 u32 v = dst | 0x80; /* value to program */
3523 u8 i = src / 4; /* register index */
3526 reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3527 reg = (reg & ~(0xff << s)) | (v << s);
3528 wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3532 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3535 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3538 for (i = 0; i < len; i++) {
3539 ice_rss_config_xor_word(hw, prof_id,
3540 /* Yes, field vector in GLQF_HSYMM and
3541 * GLQF_HINSET is inversed!
3543 fv_last_word - (src + i),
3544 fv_last_word - (dst + i));
3545 ice_rss_config_xor_word(hw, prof_id,
3546 fv_last_word - (dst + i),
3547 fv_last_word - (src + i));
3552 ice_rss_update_symm(struct ice_hw *hw,
3553 struct ice_flow_prof *prof)
3555 struct ice_prof_map *map;
3558 ice_acquire_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3559 map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3561 prof_id = map->prof_id;
3562 ice_release_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3565 /* clear to default */
3566 for (m = 0; m < 6; m++)
3567 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3568 if (prof->cfg.symm) {
3569 struct ice_flow_seg_info *seg =
3570 &prof->segs[prof->segs_cnt - 1];
3572 struct ice_flow_seg_xtrct *ipv4_src =
3573 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3574 struct ice_flow_seg_xtrct *ipv4_dst =
3575 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3576 struct ice_flow_seg_xtrct *ipv6_src =
3577 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3578 struct ice_flow_seg_xtrct *ipv6_dst =
3579 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3581 struct ice_flow_seg_xtrct *tcp_src =
3582 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3583 struct ice_flow_seg_xtrct *tcp_dst =
3584 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3586 struct ice_flow_seg_xtrct *udp_src =
3587 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3588 struct ice_flow_seg_xtrct *udp_dst =
3589 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3591 struct ice_flow_seg_xtrct *sctp_src =
3592 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3593 struct ice_flow_seg_xtrct *sctp_dst =
3594 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3597 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3598 ice_rss_config_xor(hw, prof_id,
3599 ipv4_src->idx, ipv4_dst->idx, 2);
3602 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3603 ice_rss_config_xor(hw, prof_id,
3604 ipv6_src->idx, ipv6_dst->idx, 8);
3607 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3608 ice_rss_config_xor(hw, prof_id,
3609 tcp_src->idx, tcp_dst->idx, 1);
3612 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3613 ice_rss_config_xor(hw, prof_id,
3614 udp_src->idx, udp_dst->idx, 1);
3617 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3618 ice_rss_config_xor(hw, prof_id,
3619 sctp_src->idx, sctp_dst->idx, 1);
3624 * ice_add_rss_cfg_sync - add an RSS configuration
3625 * @hw: pointer to the hardware structure
3626 * @vsi_handle: software VSI handle
3627 * @cfg: configure parameters
3629 * Assumption: lock has already been acquired for RSS list
3631 static enum ice_status
3632 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
3633 const struct ice_rss_hash_cfg *cfg)
3635 const enum ice_block blk = ICE_BLK_RSS;
3636 struct ice_flow_prof *prof = NULL;
3637 struct ice_flow_seg_info *segs;
3638 enum ice_status status;
3641 segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
3642 ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
3644 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3647 return ICE_ERR_NO_MEMORY;
3649 /* Construct the packet segment info from the hashed fields */
3650 status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
3654 /* Don't do RSS for GTPU Outer */
3655 if (segs_cnt == ICE_FLOW_SEG_SINGLE &&
3656 segs[segs_cnt - 1].hdrs & ICE_FLOW_SEG_HDR_GTPU) {
3657 status = ICE_SUCCESS;
3661 /* Search for a flow profile that has matching headers, hash fields
3662 * and has the input VSI associated to it. If found, no further
3663 * operations required and exit.
3665 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3667 ICE_FLOW_FIND_PROF_CHK_FLDS |
3668 ICE_FLOW_FIND_PROF_CHK_VSI);
3670 if (prof->cfg.symm == cfg->symm)
3672 prof->cfg.symm = cfg->symm;
3676 /* Check if a flow profile exists with the same protocol headers and
3677 * associated with the input VSI. If so disassociate the VSI from
3678 * this profile. The VSI will be added to a new profile created with
3679 * the protocol header and new hash field configuration.
3681 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3682 vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3684 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3686 ice_rem_rss_list(hw, vsi_handle, prof);
3690 /* Remove profile if it has no VSIs associated */
3691 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3692 status = ice_flow_rem_prof(hw, blk, prof->id);
3698 /* Search for a profile that has same match fields only. If this
3699 * exists then associate the VSI to this profile.
3701 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3703 ICE_FLOW_FIND_PROF_CHK_FLDS);
3705 if (prof->cfg.symm == cfg->symm) {
3706 status = ice_flow_assoc_prof(hw, blk, prof,
3709 status = ice_add_rss_list(hw, vsi_handle,
3712 /* if a profile exist but with different symmetric
3713 * requirement, just return error.
3715 status = ICE_ERR_NOT_SUPPORTED;
3720 /* Create a new flow profile with generated profile and packet
3721 * segment information.
3723 status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3724 ICE_FLOW_GEN_PROFID(cfg->hash_flds,
3725 segs[segs_cnt - 1].hdrs,
3727 segs, segs_cnt, NULL, 0, &prof);
3731 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3732 /* If association to a new flow profile failed then this profile can
3736 ice_flow_rem_prof(hw, blk, prof->id);
3740 status = ice_add_rss_list(hw, vsi_handle, prof);
3742 prof->cfg.symm = cfg->symm;
3744 ice_rss_update_symm(hw, prof);
3752 * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3753 * @hw: pointer to the hardware structure
3754 * @vsi_handle: software VSI handle
3755 * @cfg: configure parameters
3757 * This function will generate a flow profile based on fields associated with
3758 * the input fields to hash on, the flow type and use the VSI number to add
3759 * a flow entry to the profile.
3762 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
3763 const struct ice_rss_hash_cfg *cfg)
3765 struct ice_rss_hash_cfg local_cfg;
3766 enum ice_status status;
3768 if (!ice_is_vsi_valid(hw, vsi_handle) ||
3769 !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
3770 cfg->hash_flds == ICE_HASH_INVALID)
3771 return ICE_ERR_PARAM;
3774 if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
3775 ice_acquire_lock(&hw->rss_locks);
3776 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3777 ice_release_lock(&hw->rss_locks);
3779 ice_acquire_lock(&hw->rss_locks);
3780 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
3781 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3783 local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
3784 status = ice_add_rss_cfg_sync(hw, vsi_handle,
3787 ice_release_lock(&hw->rss_locks);
3794 * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3795 * @hw: pointer to the hardware structure
3796 * @vsi_handle: software VSI handle
3797 * @cfg: configure parameters
3799 * Assumption: lock has already been acquired for RSS list
3801 static enum ice_status
3802 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
3803 const struct ice_rss_hash_cfg *cfg)
3805 const enum ice_block blk = ICE_BLK_RSS;
3806 struct ice_flow_seg_info *segs;
3807 struct ice_flow_prof *prof;
3808 enum ice_status status;
3811 segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
3812 ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
3813 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3816 return ICE_ERR_NO_MEMORY;
3818 /* Construct the packet segment info from the hashed fields */
3819 status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
3823 /* Don't do RSS for GTPU Outer */
3824 if (segs_cnt == ICE_FLOW_SEG_SINGLE &&
3825 segs[segs_cnt - 1].hdrs & ICE_FLOW_SEG_HDR_GTPU) {
3826 status = ICE_SUCCESS;
3830 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3832 ICE_FLOW_FIND_PROF_CHK_FLDS);
3834 status = ICE_ERR_DOES_NOT_EXIST;
3838 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3842 /* Remove RSS configuration from VSI context before deleting
3845 ice_rem_rss_list(hw, vsi_handle, prof);
3847 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
3848 status = ice_flow_rem_prof(hw, blk, prof->id);
3856 * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
3857 * @hw: pointer to the hardware structure
3858 * @vsi_handle: software VSI handle
3859 * @cfg: configure parameters
3861 * This function will lookup the flow profile based on the input
3862 * hash field bitmap, iterate through the profile entry list of
3863 * that profile and find entry associated with input VSI to be
3864 * removed. Calls are made to underlying flow apis which will in
3865 * turn build or update buffers for RSS XLT1 section.
3868 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
3869 const struct ice_rss_hash_cfg *cfg)
3871 struct ice_rss_hash_cfg local_cfg;
3872 enum ice_status status;
3874 if (!ice_is_vsi_valid(hw, vsi_handle) ||
3875 !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
3876 cfg->hash_flds == ICE_HASH_INVALID)
3877 return ICE_ERR_PARAM;
3879 ice_acquire_lock(&hw->rss_locks);
3881 if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
3882 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3884 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
3885 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3888 local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
3889 status = ice_rem_rss_cfg_sync(hw, vsi_handle,
3893 ice_release_lock(&hw->rss_locks);
3899 * ice_replay_rss_cfg - replay RSS configurations associated with VSI
3900 * @hw: pointer to the hardware structure
3901 * @vsi_handle: software VSI handle
3903 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3905 enum ice_status status = ICE_SUCCESS;
3906 struct ice_rss_cfg *r;
3908 if (!ice_is_vsi_valid(hw, vsi_handle))
3909 return ICE_ERR_PARAM;
3911 ice_acquire_lock(&hw->rss_locks);
3912 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3913 ice_rss_cfg, l_entry) {
3914 if (ice_is_bit_set(r->vsis, vsi_handle)) {
3915 status = ice_add_rss_cfg_sync(hw, vsi_handle, &r->hash);
3920 ice_release_lock(&hw->rss_locks);
3926 * ice_get_rss_cfg - returns hashed fields for the given header types
3927 * @hw: pointer to the hardware structure
3928 * @vsi_handle: software VSI handle
3929 * @hdrs: protocol header type
3931 * This function will return the match fields of the first instance of flow
3932 * profile having the given header types and containing input VSI
3934 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
3936 u64 rss_hash = ICE_HASH_INVALID;
3937 struct ice_rss_cfg *r;
3939 /* verify if the protocol header is non zero and VSI is valid */
3940 if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
3941 return ICE_HASH_INVALID;
3943 ice_acquire_lock(&hw->rss_locks);
3944 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3945 ice_rss_cfg, l_entry)
3946 if (ice_is_bit_set(r->vsis, vsi_handle) &&
3947 r->hash.addl_hdrs == hdrs) {
3948 rss_hash = r->hash.hash_flds;
3951 ice_release_lock(&hw->rss_locks);