1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2021 Intel Corporation
5 #include "ice_common.h"
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE 2
10 #define ICE_FLOW_FLD_SZ_VLAN 2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR 4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR 16
13 #define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
14 #define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
15 #define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
16 #define ICE_FLOW_FLD_SZ_IPV4_ID 2
17 #define ICE_FLOW_FLD_SZ_IPV6_ID 4
18 #define ICE_FLOW_FLD_SZ_IP_DSCP 1
19 #define ICE_FLOW_FLD_SZ_IP_TTL 1
20 #define ICE_FLOW_FLD_SZ_IP_PROT 1
21 #define ICE_FLOW_FLD_SZ_PORT 2
22 #define ICE_FLOW_FLD_SZ_TCP_FLAGS 1
23 #define ICE_FLOW_FLD_SZ_ICMP_TYPE 1
24 #define ICE_FLOW_FLD_SZ_ICMP_CODE 1
25 #define ICE_FLOW_FLD_SZ_ARP_OPER 2
26 #define ICE_FLOW_FLD_SZ_GRE_KEYID 4
27 #define ICE_FLOW_FLD_SZ_GTP_TEID 4
28 #define ICE_FLOW_FLD_SZ_GTP_QFI 2
29 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID 2
30 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
31 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID 4
32 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
33 #define ICE_FLOW_FLD_SZ_AH_SPI 4
34 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI 4
35 #define ICE_FLOW_FLD_SZ_VXLAN_VNI 4
36 #define ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID 2
38 /* Describe properties of a protocol header field */
39 struct ice_flow_field_info {
40 enum ice_flow_seg_hdr hdr;
41 s16 off; /* Offset from start of a protocol header, in bits */
42 u16 size; /* Size of fields in bits */
43 u16 mask; /* 16-bit mask for field */
46 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
48 .off = (_offset_bytes) * BITS_PER_BYTE, \
49 .size = (_size_bytes) * BITS_PER_BYTE, \
53 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
55 .off = (_offset_bytes) * BITS_PER_BYTE, \
56 .size = (_size_bytes) * BITS_PER_BYTE, \
60 /* Table containing properties of supported protocol header fields */
62 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
64 /* ICE_FLOW_FIELD_IDX_ETH_DA */
65 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
66 /* ICE_FLOW_FIELD_IDX_ETH_SA */
67 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
68 /* ICE_FLOW_FIELD_IDX_S_VLAN */
69 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
70 /* ICE_FLOW_FIELD_IDX_C_VLAN */
71 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
72 /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
73 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
75 /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
76 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
78 /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
79 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
81 /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
82 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
83 ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
84 /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
85 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
86 ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
87 /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
88 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
89 ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
90 /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
91 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
92 ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
93 /* ICE_FLOW_FIELD_IDX_IPV4_SA */
94 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
95 /* ICE_FLOW_FIELD_IDX_IPV4_DA */
96 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
97 /* ICE_FLOW_FIELD_IDX_IPV6_SA */
98 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
99 /* ICE_FLOW_FIELD_IDX_IPV6_DA */
100 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
101 /* ICE_FLOW_FIELD_IDX_IPV4_FRAG */
102 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4,
103 ICE_FLOW_FLD_SZ_IPV4_ID),
104 /* ICE_FLOW_FIELD_IDX_IPV6_FRAG */
105 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4,
106 ICE_FLOW_FLD_SZ_IPV6_ID),
107 /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
108 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
109 ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
110 /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
111 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
112 ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
113 /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
114 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
115 ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
116 /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
117 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
118 ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
119 /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
120 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
121 ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
122 /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
123 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
124 ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
126 /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
127 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
128 /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
129 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
130 /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
131 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
132 /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
133 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
134 /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
135 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
136 /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
137 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
138 /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
139 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
141 /* ICE_FLOW_FIELD_IDX_ARP_SIP */
142 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
143 /* ICE_FLOW_FIELD_IDX_ARP_DIP */
144 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
145 /* ICE_FLOW_FIELD_IDX_ARP_SHA */
146 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
147 /* ICE_FLOW_FIELD_IDX_ARP_DHA */
148 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
149 /* ICE_FLOW_FIELD_IDX_ARP_OP */
150 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
152 /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
153 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
154 /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
155 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
157 /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
158 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
160 /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
161 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
162 ICE_FLOW_FLD_SZ_GTP_TEID),
163 /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
164 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
165 ICE_FLOW_FLD_SZ_GTP_TEID),
166 /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
167 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
168 ICE_FLOW_FLD_SZ_GTP_TEID),
169 /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
170 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
171 ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
172 /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
173 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
174 ICE_FLOW_FLD_SZ_GTP_TEID),
175 /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
176 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
177 ICE_FLOW_FLD_SZ_GTP_TEID),
179 /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
180 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
181 ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
183 /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
184 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
185 ICE_FLOW_FLD_SZ_PFCP_SEID),
187 /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
188 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
189 ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
191 /* ICE_FLOW_FIELD_IDX_ESP_SPI */
192 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
193 ICE_FLOW_FLD_SZ_ESP_SPI),
195 /* ICE_FLOW_FIELD_IDX_AH_SPI */
196 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
197 ICE_FLOW_FLD_SZ_AH_SPI),
199 /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
200 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
201 ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
202 /* ICE_FLOW_FIELD_IDX_VXLAN_VNI */
203 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VXLAN, 12,
204 ICE_FLOW_FLD_SZ_VXLAN_VNI),
206 /* ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID */
207 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ECPRI_TP0, 4,
208 ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
210 /* ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID */
211 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0, 12,
212 ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
215 /* Bitmaps indicating relevant packet types for a particular protocol header
217 * Packet types for packets with an Outer/First/Single MAC header
219 static const u32 ice_ptypes_mac_ofos[] = {
220 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
221 0x0000077E, 0x000003FF, 0x00000000, 0x00000000,
222 0x00400000, 0x03FFF000, 0xFFFFFFE0, 0x00100707,
223 0xFFFFFF00, 0x0000003F, 0x00000000, 0x00000000,
224 0x00000000, 0x00000000, 0x00000000, 0x00000000,
225 0x00000000, 0x00000000, 0x00000000, 0x00000000,
226 0x00000000, 0x00000000, 0x00000000, 0x00000000,
227 0x00000000, 0x00000000, 0x00000000, 0x00000000,
230 /* Packet types for packets with an Innermost/Last MAC VLAN header */
231 static const u32 ice_ptypes_macvlan_il[] = {
232 0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
233 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
234 0x00000000, 0x00000000, 0x00000000, 0x00000000,
235 0x00000000, 0x00000000, 0x00000000, 0x00000000,
236 0x00000000, 0x00000000, 0x00000000, 0x00000000,
237 0x00000000, 0x00000000, 0x00000000, 0x00000000,
238 0x00000000, 0x00000000, 0x00000000, 0x00000000,
239 0x00000000, 0x00000000, 0x00000000, 0x00000000,
242 /* Packet types for packets with an Outer/First/Single non-frag IPv4 header,
243 * does NOT include IPV4 other PTYPEs
245 static const u32 ice_ptypes_ipv4_ofos[] = {
246 0x1D800000, 0x24000800, 0x00000000, 0x00000000,
247 0x00000000, 0x00000155, 0x00000000, 0x00000000,
248 0x00000000, 0x000FC000, 0x000002A0, 0x00100000,
249 0x00001500, 0x00000000, 0x00000000, 0x00000000,
250 0x00000000, 0x00000000, 0x00000000, 0x00000000,
251 0x00000000, 0x00000000, 0x00000000, 0x00000000,
252 0x00000000, 0x00000000, 0x00000000, 0x00000000,
253 0x00000000, 0x00000000, 0x00000000, 0x00000000,
256 /* Packet types for packets with an Outer/First/Single non-frag IPv4 header,
257 * includes IPV4 other PTYPEs
259 static const u32 ice_ptypes_ipv4_ofos_all[] = {
260 0x1D800000, 0x27BF7800, 0x00000000, 0x00000000,
261 0x00000000, 0x00000155, 0x00000000, 0x00000000,
262 0x00000000, 0x000FC000, 0x83E0FAA0, 0x00000101,
263 0x03FFD000, 0x00000000, 0x02FBEFBC, 0x00000000,
264 0x00000000, 0x00000000, 0x00000000, 0x00000000,
265 0x00000000, 0x00000000, 0x00000000, 0x00000000,
266 0x00000000, 0x00000000, 0x00000000, 0x00000000,
267 0x00000000, 0x00000000, 0x00000000, 0x00000000,
270 /* Packet types for packets with an Innermost/Last IPv4 header */
271 static const u32 ice_ptypes_ipv4_il[] = {
272 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
273 0x0000000E, 0x00000000, 0x00000000, 0x00000000,
274 0x00000000, 0x00000000, 0x001FF800, 0x00100000,
275 0xFC0FC000, 0x00000000, 0xBC0BC0BC, 0x00000BC0,
276 0x00000000, 0x00000000, 0x00000000, 0x00000000,
277 0x00000000, 0x00000000, 0x00000000, 0x00000000,
278 0x00000000, 0x00000000, 0x00000000, 0x00000000,
279 0x00000000, 0x00000000, 0x00000000, 0x00000000,
282 /* Packet types for packets with an Outer/First/Single non-frag IPv6 header,
283 * does NOT include IVP6 other PTYPEs
285 static const u32 ice_ptypes_ipv6_ofos[] = {
286 0x00000000, 0x00000000, 0x76000000, 0x10002000,
287 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
288 0x00000000, 0x03F00000, 0x00000540, 0x00000000,
289 0x00002A00, 0x00000000, 0x00000000, 0x00000000,
290 0x00000000, 0x00000000, 0x00000000, 0x00000000,
291 0x00000000, 0x00000000, 0x00000000, 0x00000000,
292 0x00000000, 0x00000000, 0x00000000, 0x00000000,
293 0x00000000, 0x00000000, 0x00000000, 0x00000000,
296 /* Packet types for packets with an Outer/First/Single non-frag IPv6 header,
297 * includes IPV6 other PTYPEs
299 static const u32 ice_ptypes_ipv6_ofos_all[] = {
300 0x00000000, 0x00000000, 0x76000000, 0x1EFDE000,
301 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
302 0x00000000, 0x03F00000, 0x7C1F0540, 0x00000206,
303 0xFC002000, 0x0000003F, 0xBC000000, 0x0002FBEF,
304 0x00000000, 0x00000000, 0x00000000, 0x00000000,
305 0x00000000, 0x00000000, 0x00000000, 0x00000000,
306 0x00000000, 0x00000000, 0x00000000, 0x00000000,
307 0x00000000, 0x00000000, 0x00000000, 0x00000000,
310 /* Packet types for packets with an Innermost/Last IPv6 header */
311 static const u32 ice_ptypes_ipv6_il[] = {
312 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
313 0x00000770, 0x00000000, 0x00000000, 0x00000000,
314 0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
315 0x03F00000, 0x0000003F, 0x02F02F00, 0x0002F02F,
316 0x00000000, 0x00000000, 0x00000000, 0x00000000,
317 0x00000000, 0x00000000, 0x00000000, 0x00000000,
318 0x00000000, 0x00000000, 0x00000000, 0x00000000,
319 0x00000000, 0x00000000, 0x00000000, 0x00000000,
322 /* Packet types for packets with an Outer/First/Single
323 * non-frag IPv4 header - no L4
325 static const u32 ice_ptypes_ipv4_ofos_no_l4[] = {
326 0x10800000, 0x04000800, 0x00000000, 0x00000000,
327 0x00000000, 0x00000000, 0x00000000, 0x00000000,
328 0x00000000, 0x000cc000, 0x000002A0, 0x00000000,
329 0x00001500, 0x00000000, 0x00000000, 0x00000000,
330 0x00000000, 0x00000000, 0x00000000, 0x00000000,
331 0x00000000, 0x00000000, 0x00000000, 0x00000000,
332 0x00000000, 0x00000000, 0x00000000, 0x00000000,
333 0x00000000, 0x00000000, 0x00000000, 0x00000000,
336 /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
337 static const u32 ice_ptypes_ipv4_il_no_l4[] = {
338 0x60000000, 0x18043008, 0x80000002, 0x6010c021,
339 0x00000008, 0x00000000, 0x00000000, 0x00000000,
340 0x00000000, 0x00000000, 0x00139800, 0x00000000,
341 0x8C08C000, 0x00000000, 0x00000000, 0x00000000,
342 0x00000000, 0x00000000, 0x00000000, 0x00000000,
343 0x00000000, 0x00000000, 0x00000000, 0x00000000,
344 0x00000000, 0x00000000, 0x00000000, 0x00000000,
345 0x00000000, 0x00000000, 0x00000000, 0x00000000,
348 /* Packet types for packets with an Outer/First/Single
349 * non-frag IPv6 header - no L4
351 static const u32 ice_ptypes_ipv6_ofos_no_l4[] = {
352 0x00000000, 0x00000000, 0x42000000, 0x10002000,
353 0x00000000, 0x00000000, 0x00000000, 0x00000000,
354 0x00000000, 0x02300000, 0x00000540, 0x00000000,
355 0x00002A00, 0x00000000, 0x00000000, 0x00000000,
356 0x00000000, 0x00000000, 0x00000000, 0x00000000,
357 0x00000000, 0x00000000, 0x00000000, 0x00000000,
358 0x00000000, 0x00000000, 0x00000000, 0x00000000,
359 0x00000000, 0x00000000, 0x00000000, 0x00000000,
362 /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
363 static const u32 ice_ptypes_ipv6_il_no_l4[] = {
364 0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
365 0x00000430, 0x00000000, 0x00000000, 0x00000000,
366 0x00000000, 0x00000000, 0x4e600000, 0x00000000,
367 0x02300000, 0x00000023, 0x00000000, 0x00000000,
368 0x00000000, 0x00000000, 0x00000000, 0x00000000,
369 0x00000000, 0x00000000, 0x00000000, 0x00000000,
370 0x00000000, 0x00000000, 0x00000000, 0x00000000,
371 0x00000000, 0x00000000, 0x00000000, 0x00000000,
374 /* Packet types for packets with an Outermost/First ARP header */
375 static const u32 ice_ptypes_arp_of[] = {
376 0x00000800, 0x00000000, 0x00000000, 0x00000000,
377 0x00000000, 0x00000000, 0x00000000, 0x00000000,
378 0x00000000, 0x00000000, 0x00000000, 0x00000000,
379 0x00000000, 0x00000000, 0x00000000, 0x00000000,
380 0x00000000, 0x00000000, 0x00000000, 0x00000000,
381 0x00000000, 0x00000000, 0x00000000, 0x00000000,
382 0x00000000, 0x00000000, 0x00000000, 0x00000000,
383 0x00000000, 0x00000000, 0x00000000, 0x00000000,
386 /* UDP Packet types for non-tunneled packets or tunneled
387 * packets with inner UDP.
389 static const u32 ice_ptypes_udp_il[] = {
390 0x81000000, 0x20204040, 0x04000010, 0x80810102,
391 0x00000040, 0x00000000, 0x00000000, 0x00000000,
392 0x00000000, 0x00410000, 0x908427E0, 0x00100007,
393 0x10410000, 0x00000004, 0x10410410, 0x00004104,
394 0x00000000, 0x00000000, 0x00000000, 0x00000000,
395 0x00000000, 0x00000000, 0x00000000, 0x00000000,
396 0x00000000, 0x00000000, 0x00000000, 0x00000000,
397 0x00000000, 0x00000000, 0x00000000, 0x00000000,
400 /* Packet types for packets with an Innermost/Last TCP header */
401 static const u32 ice_ptypes_tcp_il[] = {
402 0x04000000, 0x80810102, 0x10000040, 0x02040408,
403 0x00000102, 0x00000000, 0x00000000, 0x00000000,
404 0x00000000, 0x00820000, 0x21084000, 0x00000000,
405 0x20820000, 0x00000008, 0x20820820, 0x00008208,
406 0x00000000, 0x00000000, 0x00000000, 0x00000000,
407 0x00000000, 0x00000000, 0x00000000, 0x00000000,
408 0x00000000, 0x00000000, 0x00000000, 0x00000000,
409 0x00000000, 0x00000000, 0x00000000, 0x00000000,
412 /* Packet types for packets with an Innermost/Last SCTP header */
413 static const u32 ice_ptypes_sctp_il[] = {
414 0x08000000, 0x01020204, 0x20000081, 0x04080810,
415 0x00000204, 0x00000000, 0x00000000, 0x00000000,
416 0x00000000, 0x01040000, 0x00000000, 0x00000000,
417 0x41040000, 0x00000010, 0x00000000, 0x00000000,
418 0x00000000, 0x00000000, 0x00000000, 0x00000000,
419 0x00000000, 0x00000000, 0x00000000, 0x00000000,
420 0x00000000, 0x00000000, 0x00000000, 0x00000000,
421 0x00000000, 0x00000000, 0x00000000, 0x00000000,
424 /* Packet types for packets with an Outermost/First ICMP header */
425 static const u32 ice_ptypes_icmp_of[] = {
426 0x10000000, 0x00000000, 0x00000000, 0x00000000,
427 0x00000000, 0x00000000, 0x00000000, 0x00000000,
428 0x00000000, 0x00000000, 0x00000000, 0x00000000,
429 0x00000000, 0x00000000, 0x00000000, 0x00000000,
430 0x00000000, 0x00000000, 0x00000000, 0x00000000,
431 0x00000000, 0x00000000, 0x00000000, 0x00000000,
432 0x00000000, 0x00000000, 0x00000000, 0x00000000,
433 0x00000000, 0x00000000, 0x00000000, 0x00000000,
436 /* Packet types for packets with an Innermost/Last ICMP header */
437 static const u32 ice_ptypes_icmp_il[] = {
438 0x00000000, 0x02040408, 0x40000102, 0x08101020,
439 0x00000408, 0x00000000, 0x00000000, 0x00000000,
440 0x00000000, 0x00000000, 0x42108000, 0x00000000,
441 0x82080000, 0x00000020, 0x00000000, 0x00000000,
442 0x00000000, 0x00000000, 0x00000000, 0x00000000,
443 0x00000000, 0x00000000, 0x00000000, 0x00000000,
444 0x00000000, 0x00000000, 0x00000000, 0x00000000,
445 0x00000000, 0x00000000, 0x00000000, 0x00000000,
448 /* Packet types for packets with an Outermost/First GRE header */
449 static const u32 ice_ptypes_gre_of[] = {
450 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
451 0x0000017E, 0x00000000, 0x00000000, 0x00000000,
452 0x00000000, 0x00000000, 0x00000000, 0x00000000,
453 0x00000000, 0x00000000, 0xBEFBEFBC, 0x0002FBEF,
454 0x00000000, 0x00000000, 0x00000000, 0x00000000,
455 0x00000000, 0x00000000, 0x00000000, 0x00000000,
456 0x00000000, 0x00000000, 0x00000000, 0x00000000,
457 0x00000000, 0x00000000, 0x00000000, 0x00000000,
460 /* Packet types for packets with an Innermost/Last MAC header */
461 static const u32 ice_ptypes_mac_il[] = {
462 0x00000000, 0x20000000, 0x00000000, 0x00000000,
463 0x00000000, 0x00000000, 0x00000000, 0x00000000,
464 0x00000000, 0x00000000, 0x00000000, 0x00000000,
465 0x00000000, 0x00000000, 0x00000000, 0x00000000,
466 0x00000000, 0x00000000, 0x00000000, 0x00000000,
467 0x00000000, 0x00000000, 0x00000000, 0x00000000,
468 0x00000000, 0x00000000, 0x00000000, 0x00000000,
469 0x00000000, 0x00000000, 0x00000000, 0x00000000,
472 /* Packet types for GTPC */
473 static const u32 ice_ptypes_gtpc[] = {
474 0x00000000, 0x00000000, 0x00000000, 0x00000000,
475 0x00000000, 0x00000000, 0x00000000, 0x00000000,
476 0x00000000, 0x00000000, 0x000001E0, 0x00000000,
477 0x00000000, 0x00000000, 0x00000000, 0x00000000,
478 0x00000000, 0x00000000, 0x00000000, 0x00000000,
479 0x00000000, 0x00000000, 0x00000000, 0x00000000,
480 0x00000000, 0x00000000, 0x00000000, 0x00000000,
481 0x00000000, 0x00000000, 0x00000000, 0x00000000,
484 /* Packet types for VXLAN with VNI */
485 static const u32 ice_ptypes_vxlan_vni[] = {
486 0x00000000, 0xBFBFF800, 0x00EFDFDF, 0xFEFDE000,
487 0x03BF7F7E, 0x00000000, 0x00000000, 0x00000000,
488 0x00000000, 0x00000000, 0x00000000, 0x00000000,
489 0x00000000, 0x00000000, 0x00000000, 0x00000000,
490 0x00000000, 0x00000000, 0x00000000, 0x00000000,
491 0x00000000, 0x00000000, 0x00000000, 0x00000000,
492 0x00000000, 0x00000000, 0x00000000, 0x00000000,
493 0x00000000, 0x00000000, 0x00000000, 0x00000000,
496 /* Packet types for GTPC with TEID */
497 static const u32 ice_ptypes_gtpc_tid[] = {
498 0x00000000, 0x00000000, 0x00000000, 0x00000000,
499 0x00000000, 0x00000000, 0x00000000, 0x00000000,
500 0x00000000, 0x00000000, 0x00000060, 0x00000000,
501 0x00000000, 0x00000000, 0x00000000, 0x00000000,
502 0x00000000, 0x00000000, 0x00000000, 0x00000000,
503 0x00000000, 0x00000000, 0x00000000, 0x00000000,
504 0x00000000, 0x00000000, 0x00000000, 0x00000000,
505 0x00000000, 0x00000000, 0x00000000, 0x00000000,
508 /* Packet types for GTPU */
509 static const struct ice_ptype_attributes ice_attr_gtpu_session[] = {
510 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
511 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
512 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
513 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
514 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
515 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
516 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
517 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
518 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
519 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
520 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
521 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
522 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
523 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
524 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
525 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
526 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
527 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
528 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
529 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
530 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
531 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
532 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
533 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
534 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
535 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
536 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
537 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
538 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
539 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
540 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
541 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
542 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
543 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
544 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
545 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
546 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
547 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
548 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
549 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
550 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
551 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
552 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
553 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
554 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
555 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
556 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
557 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
558 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
559 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
560 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
561 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
562 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
563 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
564 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
565 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
566 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
567 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
568 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
569 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
572 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
573 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
574 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
575 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
576 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
577 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
578 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
579 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
580 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
581 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
582 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
583 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
584 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
585 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
586 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
587 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
588 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
589 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
590 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
591 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
592 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
593 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
594 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
595 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
596 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
597 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
598 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
599 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
600 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
601 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
602 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
603 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
604 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
605 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
606 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
607 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
608 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
609 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
610 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
611 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
612 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
613 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
614 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
615 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
616 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
617 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
618 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
619 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
620 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
621 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
622 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
623 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
624 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
625 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
626 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
627 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
628 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
629 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
630 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
631 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
632 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
635 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
636 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
637 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
638 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
639 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
640 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
641 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
642 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
643 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
644 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
645 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
646 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
647 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
648 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
649 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
650 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
651 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
652 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
653 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
654 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
655 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
656 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
657 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
658 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
659 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
660 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
661 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
662 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
663 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
664 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
665 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
666 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
667 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
668 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
669 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
670 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
671 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
672 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
673 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
674 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
675 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
676 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
677 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
678 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
679 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
680 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
681 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
682 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
683 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
684 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
685 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
686 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
687 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
688 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
689 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
690 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
691 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
692 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
693 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
694 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
695 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
698 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
699 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
700 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
701 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
702 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
703 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
704 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
705 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
706 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
707 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
708 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
709 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
710 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
711 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
712 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
713 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
714 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
715 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
716 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
717 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
718 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
719 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
720 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
721 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
722 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
723 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
724 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
725 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
726 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
727 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
728 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
729 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
730 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
731 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
732 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
733 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
734 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
735 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
736 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
737 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
738 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
739 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
740 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
741 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
742 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
743 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
744 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
745 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
746 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
747 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
748 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
749 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
750 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
751 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
752 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
753 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
754 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
755 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
756 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
757 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
758 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
761 static const u32 ice_ptypes_gtpu[] = {
762 0x00000000, 0x00000000, 0x00000000, 0x00000000,
763 0x00000000, 0x00000000, 0x00000000, 0x00000000,
764 0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
765 0x00000000, 0x0000003F, 0xBEFBEFBC, 0x0002FBEF,
766 0x00000000, 0x00000000, 0x00000000, 0x00000000,
767 0x00000000, 0x00000000, 0x00000000, 0x00000000,
768 0x00000000, 0x00000000, 0x00000000, 0x00000000,
769 0x00000000, 0x00000000, 0x00000000, 0x00000000,
772 /* Packet types for pppoe */
773 static const u32 ice_ptypes_pppoe[] = {
774 0x00000000, 0x00000000, 0x00000000, 0x00000000,
775 0x00000000, 0x00000000, 0x00000000, 0x00000000,
776 0x00000000, 0x03ffe000, 0x00000000, 0x00000000,
777 0x00000000, 0x00000000, 0x00000000, 0x00000000,
778 0x00000000, 0x00000000, 0x00000000, 0x00000000,
779 0x00000000, 0x00000000, 0x00000000, 0x00000000,
780 0x00000000, 0x00000000, 0x00000000, 0x00000000,
781 0x00000000, 0x00000000, 0x00000000, 0x00000000,
784 /* Packet types for packets with PFCP NODE header */
785 static const u32 ice_ptypes_pfcp_node[] = {
786 0x00000000, 0x00000000, 0x00000000, 0x00000000,
787 0x00000000, 0x00000000, 0x00000000, 0x00000000,
788 0x00000000, 0x00000000, 0x80000000, 0x00000002,
789 0x00000000, 0x00000000, 0x00000000, 0x00000000,
790 0x00000000, 0x00000000, 0x00000000, 0x00000000,
791 0x00000000, 0x00000000, 0x00000000, 0x00000000,
792 0x00000000, 0x00000000, 0x00000000, 0x00000000,
793 0x00000000, 0x00000000, 0x00000000, 0x00000000,
796 /* Packet types for packets with PFCP SESSION header */
797 static const u32 ice_ptypes_pfcp_session[] = {
798 0x00000000, 0x00000000, 0x00000000, 0x00000000,
799 0x00000000, 0x00000000, 0x00000000, 0x00000000,
800 0x00000000, 0x00000000, 0x00000000, 0x00000005,
801 0x00000000, 0x00000000, 0x00000000, 0x00000000,
802 0x00000000, 0x00000000, 0x00000000, 0x00000000,
803 0x00000000, 0x00000000, 0x00000000, 0x00000000,
804 0x00000000, 0x00000000, 0x00000000, 0x00000000,
805 0x00000000, 0x00000000, 0x00000000, 0x00000000,
808 /* Packet types for l2tpv3 */
809 static const u32 ice_ptypes_l2tpv3[] = {
810 0x00000000, 0x00000000, 0x00000000, 0x00000000,
811 0x00000000, 0x00000000, 0x00000000, 0x00000000,
812 0x00000000, 0x00000000, 0x00000000, 0x00000300,
813 0x00000000, 0x00000000, 0x00000000, 0x00000000,
814 0x00000000, 0x00000000, 0x00000000, 0x00000000,
815 0x00000000, 0x00000000, 0x00000000, 0x00000000,
816 0x00000000, 0x00000000, 0x00000000, 0x00000000,
817 0x00000000, 0x00000000, 0x00000000, 0x00000000,
820 /* Packet types for esp */
821 static const u32 ice_ptypes_esp[] = {
822 0x00000000, 0x00000000, 0x00000000, 0x00000000,
823 0x00000000, 0x00000003, 0x00000000, 0x00000000,
824 0x00000000, 0x00000000, 0x00000000, 0x00000000,
825 0x00000000, 0x00000000, 0x00000000, 0x00000000,
826 0x00000000, 0x00000000, 0x00000000, 0x00000000,
827 0x00000000, 0x00000000, 0x00000000, 0x00000000,
828 0x00000000, 0x00000000, 0x00000000, 0x00000000,
829 0x00000000, 0x00000000, 0x00000000, 0x00000000,
832 /* Packet types for ah */
833 static const u32 ice_ptypes_ah[] = {
834 0x00000000, 0x00000000, 0x00000000, 0x00000000,
835 0x00000000, 0x0000000C, 0x00000000, 0x00000000,
836 0x00000000, 0x00000000, 0x00000000, 0x00000000,
837 0x00000000, 0x00000000, 0x00000000, 0x00000000,
838 0x00000000, 0x00000000, 0x00000000, 0x00000000,
839 0x00000000, 0x00000000, 0x00000000, 0x00000000,
840 0x00000000, 0x00000000, 0x00000000, 0x00000000,
841 0x00000000, 0x00000000, 0x00000000, 0x00000000,
844 /* Packet types for packets with NAT_T ESP header */
845 static const u32 ice_ptypes_nat_t_esp[] = {
846 0x00000000, 0x00000000, 0x00000000, 0x00000000,
847 0x00000000, 0x00000030, 0x00000000, 0x00000000,
848 0x00000000, 0x00000000, 0x00000000, 0x00000000,
849 0x00000000, 0x00000000, 0x00000000, 0x00000000,
850 0x00000000, 0x00000000, 0x00000000, 0x00000000,
851 0x00000000, 0x00000000, 0x00000000, 0x00000000,
852 0x00000000, 0x00000000, 0x00000000, 0x00000000,
853 0x00000000, 0x00000000, 0x00000000, 0x00000000,
856 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
857 0x00000846, 0x00000000, 0x00000000, 0x00000000,
858 0x00000000, 0x00000000, 0x00000000, 0x00000000,
859 0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
860 0x00000000, 0x00000000, 0x00000000, 0x00000000,
861 0x00000000, 0x00000000, 0x00000000, 0x00000000,
862 0x00000000, 0x00000000, 0x00000000, 0x00000000,
863 0x00000000, 0x00000000, 0x00000000, 0x00000000,
864 0x00000000, 0x00000000, 0x00000000, 0x00000000,
867 static const u32 ice_ptypes_gtpu_no_ip[] = {
868 0x00000000, 0x00000000, 0x00000000, 0x00000000,
869 0x00000000, 0x00000000, 0x00000000, 0x00000000,
870 0x00000000, 0x00000000, 0x00000600, 0x00000000,
871 0x00000000, 0x00000000, 0x00000000, 0x00000000,
872 0x00000000, 0x00000000, 0x00000000, 0x00000000,
873 0x00000000, 0x00000000, 0x00000000, 0x00000000,
874 0x00000000, 0x00000000, 0x00000000, 0x00000000,
875 0x00000000, 0x00000000, 0x00000000, 0x00000000,
878 static const u32 ice_ptypes_ecpri_tp0[] = {
879 0x00000000, 0x00000000, 0x00000000, 0x00000000,
880 0x00000000, 0x00000000, 0x00000000, 0x00000000,
881 0x00000000, 0x00000000, 0x00000000, 0x00000400,
882 0x00000000, 0x00000000, 0x00000000, 0x00000000,
883 0x00000000, 0x00000000, 0x00000000, 0x00000000,
884 0x00000000, 0x00000000, 0x00000000, 0x00000000,
885 0x00000000, 0x00000000, 0x00000000, 0x00000000,
886 0x00000000, 0x00000000, 0x00000000, 0x00000000,
889 static const u32 ice_ptypes_udp_ecpri_tp0[] = {
890 0x00000000, 0x00000000, 0x00000000, 0x00000000,
891 0x00000000, 0x00000000, 0x00000000, 0x00000000,
892 0x00000000, 0x00000000, 0x00000000, 0x00100000,
893 0x00000000, 0x00000000, 0x00000000, 0x00000000,
894 0x00000000, 0x00000000, 0x00000000, 0x00000000,
895 0x00000000, 0x00000000, 0x00000000, 0x00000000,
896 0x00000000, 0x00000000, 0x00000000, 0x00000000,
897 0x00000000, 0x00000000, 0x00000000, 0x00000000,
900 static const u32 ice_ptypes_l2tpv2[] = {
901 0x00000000, 0x00000000, 0x00000000, 0x00000000,
902 0x00000000, 0x00000000, 0x00000000, 0x00000000,
903 0x00000000, 0x00000000, 0x00000000, 0x00000000,
904 0xFFFFFF00, 0x0000003F, 0x00000000, 0x00000000,
905 0x00000000, 0x00000000, 0x00000000, 0x00000000,
906 0x00000000, 0x00000000, 0x00000000, 0x00000000,
907 0x00000000, 0x00000000, 0x00000000, 0x00000000,
908 0x00000000, 0x00000000, 0x00000000, 0x00000000,
911 static const u32 ice_ptypes_ppp[] = {
912 0x00000000, 0x00000000, 0x00000000, 0x00000000,
913 0x00000000, 0x00000000, 0x00000000, 0x00000000,
914 0x00000000, 0x00000000, 0x00000000, 0x00000000,
915 0xFFFFF000, 0x0000003F, 0x00000000, 0x00000000,
916 0x00000000, 0x00000000, 0x00000000, 0x00000000,
917 0x00000000, 0x00000000, 0x00000000, 0x00000000,
918 0x00000000, 0x00000000, 0x00000000, 0x00000000,
919 0x00000000, 0x00000000, 0x00000000, 0x00000000,
922 static const u32 ice_ptypes_ipv4_frag[] = {
923 0x00400000, 0x00000000, 0x00000000, 0x00000000,
924 0x00000000, 0x00000000, 0x00000000, 0x00000000,
925 0x00000000, 0x00000000, 0x00000000, 0x00000000,
926 0x00000000, 0x00000000, 0x00000000, 0x00000000,
927 0x00000000, 0x00000000, 0x00000000, 0x00000000,
928 0x00000000, 0x00000000, 0x00000000, 0x00000000,
929 0x00000000, 0x00000000, 0x00000000, 0x00000000,
930 0x00000000, 0x00000000, 0x00000000, 0x00000000,
933 static const u32 ice_ptypes_ipv6_frag[] = {
934 0x00000000, 0x00000000, 0x01000000, 0x00000000,
935 0x00000000, 0x00000000, 0x00000000, 0x00000000,
936 0x00000000, 0x00000000, 0x00000000, 0x00000000,
937 0x00000000, 0x00000000, 0x00000000, 0x00000000,
938 0x00000000, 0x00000000, 0x00000000, 0x00000000,
939 0x00000000, 0x00000000, 0x00000000, 0x00000000,
940 0x00000000, 0x00000000, 0x00000000, 0x00000000,
941 0x00000000, 0x00000000, 0x00000000, 0x00000000,
944 /* Manage parameters and info. used during the creation of a flow profile */
945 struct ice_flow_prof_params {
947 u16 entry_length; /* # of bytes formatted entry will require */
949 struct ice_flow_prof *prof;
951 /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
952 * This will give us the direction flags.
954 struct ice_fv_word es[ICE_MAX_FV_WORDS];
955 /* attributes can be used to add attributes to a particular PTYPE */
956 const struct ice_ptype_attributes *attr;
959 u16 mask[ICE_MAX_FV_WORDS];
960 ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
963 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
964 (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
965 ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
966 ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
967 ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
968 ICE_FLOW_SEG_HDR_NAT_T_ESP | ICE_FLOW_SEG_HDR_GTPU_NON_IP | \
969 ICE_FLOW_SEG_HDR_ECPRI_TP0 | ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0 | \
970 ICE_FLOW_SEG_HDR_L2TPV2 | ICE_FLOW_SEG_HDR_PPP | ICE_FLOW_SEG_HDR_GRE)
972 #define ICE_FLOW_SEG_HDRS_L2_MASK \
973 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
974 #define ICE_FLOW_SEG_HDRS_L3_MASK \
975 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
976 ICE_FLOW_SEG_HDR_ARP)
977 #define ICE_FLOW_SEG_HDRS_L4_MASK \
978 (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
979 ICE_FLOW_SEG_HDR_SCTP)
980 /* mask for L4 protocols that are NOT part of IPV4/6 OTHER PTYPE groups */
981 #define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER \
982 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
985 * ice_flow_val_hdrs - validates packet segments for valid protocol headers
986 * @segs: array of one or more packet segments that describe the flow
987 * @segs_cnt: number of packet segments provided
989 static enum ice_status
990 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
994 for (i = 0; i < segs_cnt; i++) {
995 /* Multiple L3 headers */
996 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
997 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
998 return ICE_ERR_PARAM;
1000 /* Multiple L4 headers */
1001 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
1002 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
1003 return ICE_ERR_PARAM;
1009 /* Sizes of fixed known protocol headers without header options */
1010 #define ICE_FLOW_PROT_HDR_SZ_MAC 14
1011 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
1012 #define ICE_FLOW_PROT_HDR_SZ_IPV4 20
1013 #define ICE_FLOW_PROT_HDR_SZ_IPV6 40
1014 #define ICE_FLOW_PROT_HDR_SZ_ARP 28
1015 #define ICE_FLOW_PROT_HDR_SZ_ICMP 8
1016 #define ICE_FLOW_PROT_HDR_SZ_TCP 20
1017 #define ICE_FLOW_PROT_HDR_SZ_UDP 8
1018 #define ICE_FLOW_PROT_HDR_SZ_SCTP 12
1021 * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
1022 * @params: information about the flow to be processed
1023 * @seg: index of packet segment whose header size is to be determined
1025 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
1030 sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
1031 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
1034 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
1035 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
1036 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
1037 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
1038 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
1039 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
1040 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
1041 /* A L3 header is required if L4 is specified */
1045 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
1046 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
1047 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
1048 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
1049 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
1050 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
1051 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
1052 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
1058 * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
1059 * @params: information about the flow to be processed
1061 * This function identifies the packet types associated with the protocol
1062 * headers being present in packet segments of the specified flow profile.
1064 static enum ice_status
1065 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
1067 struct ice_flow_prof *prof;
1070 ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
1073 prof = params->prof;
1075 for (i = 0; i < params->prof->segs_cnt; i++) {
1076 const ice_bitmap_t *src;
1079 hdrs = prof->segs[i].hdrs;
1081 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
1082 src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
1083 (const ice_bitmap_t *)ice_ptypes_mac_il;
1084 ice_and_bitmap(params->ptypes, params->ptypes, src,
1085 ICE_FLOW_PTYPE_MAX);
1088 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
1089 src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
1090 ice_and_bitmap(params->ptypes, params->ptypes, src,
1091 ICE_FLOW_PTYPE_MAX);
1094 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
1095 ice_and_bitmap(params->ptypes, params->ptypes,
1096 (const ice_bitmap_t *)ice_ptypes_arp_of,
1097 ICE_FLOW_PTYPE_MAX);
1100 if (hdrs & ICE_FLOW_SEG_HDR_ECPRI_TP0) {
1101 src = (const ice_bitmap_t *)ice_ptypes_ecpri_tp0;
1102 ice_and_bitmap(params->ptypes, params->ptypes, src,
1103 ICE_FLOW_PTYPE_MAX);
1105 if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
1106 (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
1108 (const ice_bitmap_t *)ice_ptypes_ipv4_il :
1109 (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_all;
1110 ice_and_bitmap(params->ptypes, params->ptypes, src,
1111 ICE_FLOW_PTYPE_MAX);
1112 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
1113 (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
1115 (const ice_bitmap_t *)ice_ptypes_ipv6_il :
1116 (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_all;
1117 ice_and_bitmap(params->ptypes, params->ptypes, src,
1118 ICE_FLOW_PTYPE_MAX);
1119 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
1120 (hdrs & ICE_FLOW_SEG_HDR_IPV_FRAG)) {
1121 src = (const ice_bitmap_t *)ice_ptypes_ipv4_frag;
1122 ice_and_bitmap(params->ptypes, params->ptypes, src,
1123 ICE_FLOW_PTYPE_MAX);
1124 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
1125 (hdrs & ICE_FLOW_SEG_HDR_IPV_FRAG)) {
1126 src = (const ice_bitmap_t *)ice_ptypes_ipv6_frag;
1127 ice_and_bitmap(params->ptypes, params->ptypes, src,
1128 ICE_FLOW_PTYPE_MAX);
1129 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
1130 !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
1131 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_no_l4 :
1132 (const ice_bitmap_t *)ice_ptypes_ipv4_il_no_l4;
1133 ice_and_bitmap(params->ptypes, params->ptypes, src,
1134 ICE_FLOW_PTYPE_MAX);
1135 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
1136 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
1137 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
1138 ice_and_bitmap(params->ptypes, params->ptypes, src,
1139 ICE_FLOW_PTYPE_MAX);
1140 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
1141 !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
1142 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_no_l4 :
1143 (const ice_bitmap_t *)ice_ptypes_ipv6_il_no_l4;
1144 ice_and_bitmap(params->ptypes, params->ptypes, src,
1145 ICE_FLOW_PTYPE_MAX);
1146 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
1147 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
1148 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
1149 ice_and_bitmap(params->ptypes, params->ptypes, src,
1150 ICE_FLOW_PTYPE_MAX);
1153 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
1154 src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
1155 ice_and_bitmap(params->ptypes, params->ptypes,
1156 src, ICE_FLOW_PTYPE_MAX);
1157 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
1158 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
1159 ice_and_bitmap(params->ptypes, params->ptypes, src,
1160 ICE_FLOW_PTYPE_MAX);
1162 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
1163 ice_andnot_bitmap(params->ptypes, params->ptypes, src,
1164 ICE_FLOW_PTYPE_MAX);
1167 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
1168 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
1169 ice_and_bitmap(params->ptypes, params->ptypes, src,
1170 ICE_FLOW_PTYPE_MAX);
1171 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
1172 ice_and_bitmap(params->ptypes, params->ptypes,
1173 (const ice_bitmap_t *)ice_ptypes_tcp_il,
1174 ICE_FLOW_PTYPE_MAX);
1175 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
1176 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
1177 ice_and_bitmap(params->ptypes, params->ptypes, src,
1178 ICE_FLOW_PTYPE_MAX);
1181 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
1182 src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
1183 (const ice_bitmap_t *)ice_ptypes_icmp_il;
1184 ice_and_bitmap(params->ptypes, params->ptypes, src,
1185 ICE_FLOW_PTYPE_MAX);
1186 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
1187 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
1188 ice_and_bitmap(params->ptypes, params->ptypes, src,
1189 ICE_FLOW_PTYPE_MAX);
1190 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
1191 src = (const ice_bitmap_t *)ice_ptypes_gtpc;
1192 ice_and_bitmap(params->ptypes, params->ptypes,
1193 src, ICE_FLOW_PTYPE_MAX);
1194 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
1195 src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
1196 ice_and_bitmap(params->ptypes, params->ptypes,
1197 src, ICE_FLOW_PTYPE_MAX);
1198 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_NON_IP) {
1199 src = (const ice_bitmap_t *)ice_ptypes_gtpu_no_ip;
1200 ice_and_bitmap(params->ptypes, params->ptypes,
1201 src, ICE_FLOW_PTYPE_MAX);
1202 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
1203 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1204 ice_and_bitmap(params->ptypes, params->ptypes,
1205 src, ICE_FLOW_PTYPE_MAX);
1207 /* Attributes for GTP packet with downlink */
1208 params->attr = ice_attr_gtpu_down;
1209 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
1210 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
1211 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1212 ice_and_bitmap(params->ptypes, params->ptypes,
1213 src, ICE_FLOW_PTYPE_MAX);
1215 /* Attributes for GTP packet with uplink */
1216 params->attr = ice_attr_gtpu_up;
1217 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
1218 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
1219 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1220 ice_and_bitmap(params->ptypes, params->ptypes,
1221 src, ICE_FLOW_PTYPE_MAX);
1223 /* Attributes for GTP packet with Extension Header */
1224 params->attr = ice_attr_gtpu_eh;
1225 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
1226 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
1227 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1228 ice_and_bitmap(params->ptypes, params->ptypes,
1229 src, ICE_FLOW_PTYPE_MAX);
1231 /* Attributes for GTP packet without Extension Header */
1232 params->attr = ice_attr_gtpu_session;
1233 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session);
1234 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV2) {
1235 src = (const ice_bitmap_t *)ice_ptypes_l2tpv2;
1236 ice_and_bitmap(params->ptypes, params->ptypes,
1237 src, ICE_FLOW_PTYPE_MAX);
1238 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
1239 src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
1240 ice_and_bitmap(params->ptypes, params->ptypes,
1241 src, ICE_FLOW_PTYPE_MAX);
1242 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
1243 src = (const ice_bitmap_t *)ice_ptypes_esp;
1244 ice_and_bitmap(params->ptypes, params->ptypes,
1245 src, ICE_FLOW_PTYPE_MAX);
1246 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
1247 src = (const ice_bitmap_t *)ice_ptypes_ah;
1248 ice_and_bitmap(params->ptypes, params->ptypes,
1249 src, ICE_FLOW_PTYPE_MAX);
1250 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
1251 src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
1252 ice_and_bitmap(params->ptypes, params->ptypes,
1253 src, ICE_FLOW_PTYPE_MAX);
1254 } else if (hdrs & ICE_FLOW_SEG_HDR_VXLAN) {
1255 src = (const ice_bitmap_t *)ice_ptypes_vxlan_vni;
1256 ice_and_bitmap(params->ptypes, params->ptypes,
1257 src, ICE_FLOW_PTYPE_MAX);
1258 } else if (hdrs & ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0) {
1259 src = (const ice_bitmap_t *)ice_ptypes_udp_ecpri_tp0;
1260 ice_and_bitmap(params->ptypes, params->ptypes,
1261 src, ICE_FLOW_PTYPE_MAX);
1264 if (hdrs & ICE_FLOW_SEG_HDR_PPP) {
1265 src = (const ice_bitmap_t *)ice_ptypes_ppp;
1266 ice_and_bitmap(params->ptypes, params->ptypes,
1267 src, ICE_FLOW_PTYPE_MAX);
1270 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
1271 if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
1273 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1276 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1278 ice_and_bitmap(params->ptypes, params->ptypes,
1279 src, ICE_FLOW_PTYPE_MAX);
1281 src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1282 ice_andnot_bitmap(params->ptypes, params->ptypes,
1283 src, ICE_FLOW_PTYPE_MAX);
1285 src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1286 ice_andnot_bitmap(params->ptypes, params->ptypes,
1287 src, ICE_FLOW_PTYPE_MAX);
1295 * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
1296 * @hw: pointer to the HW struct
1297 * @params: information about the flow to be processed
1298 * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
1300 * This function will allocate an extraction sequence entries for a DWORD size
1301 * chunk of the packet flags.
1303 static enum ice_status
1304 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
1305 struct ice_flow_prof_params *params,
1306 enum ice_flex_mdid_pkt_flags flags)
1308 u8 fv_words = hw->blk[params->blk].es.fvw;
1311 /* Make sure the number of extraction sequence entries required does not
1312 * exceed the block's capacity.
1314 if (params->es_cnt >= fv_words)
1315 return ICE_ERR_MAX_LIMIT;
1317 /* some blocks require a reversed field vector layout */
1318 if (hw->blk[params->blk].es.reverse)
1319 idx = fv_words - params->es_cnt - 1;
1321 idx = params->es_cnt;
1323 params->es[idx].prot_id = ICE_PROT_META_ID;
1324 params->es[idx].off = flags;
1331 * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
1332 * @hw: pointer to the HW struct
1333 * @params: information about the flow to be processed
1334 * @seg: packet segment index of the field to be extracted
1335 * @fld: ID of field to be extracted
1336 * @match: bitfield of all fields
1338 * This function determines the protocol ID, offset, and size of the given
1339 * field. It then allocates one or more extraction sequence entries for the
1340 * given field, and fill the entries with protocol ID and offset information.
1342 static enum ice_status
1343 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
1344 u8 seg, enum ice_flow_field fld, u64 match)
1346 enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
1347 enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
1348 u8 fv_words = hw->blk[params->blk].es.fvw;
1349 struct ice_flow_fld_info *flds;
1350 u16 cnt, ese_bits, i;
1355 flds = params->prof->segs[seg].fields;
1358 case ICE_FLOW_FIELD_IDX_ETH_DA:
1359 case ICE_FLOW_FIELD_IDX_ETH_SA:
1360 case ICE_FLOW_FIELD_IDX_S_VLAN:
1361 case ICE_FLOW_FIELD_IDX_C_VLAN:
1362 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
1364 case ICE_FLOW_FIELD_IDX_ETH_TYPE:
1365 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
1367 case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
1368 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1370 case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
1371 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1373 case ICE_FLOW_FIELD_IDX_IPV4_TTL:
1374 case ICE_FLOW_FIELD_IDX_IPV4_PROT:
1375 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1377 /* TTL and PROT share the same extraction seq. entry.
1378 * Each is considered a sibling to the other in terms of sharing
1379 * the same extraction sequence entry.
1381 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
1382 sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
1384 sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
1386 /* If the sibling field is also included, that field's
1387 * mask needs to be included.
1389 if (match & BIT(sib))
1390 sib_mask = ice_flds_info[sib].mask;
1392 case ICE_FLOW_FIELD_IDX_IPV6_TTL:
1393 case ICE_FLOW_FIELD_IDX_IPV6_PROT:
1394 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1396 /* TTL and PROT share the same extraction seq. entry.
1397 * Each is considered a sibling to the other in terms of sharing
1398 * the same extraction sequence entry.
1400 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
1401 sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
1403 sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
1405 /* If the sibling field is also included, that field's
1406 * mask needs to be included.
1408 if (match & BIT(sib))
1409 sib_mask = ice_flds_info[sib].mask;
1411 case ICE_FLOW_FIELD_IDX_IPV4_SA:
1412 case ICE_FLOW_FIELD_IDX_IPV4_DA:
1413 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1414 if (params->prof->segs[0].hdrs & ICE_FLOW_SEG_HDR_GRE &&
1415 params->prof->segs[1].hdrs & ICE_FLOW_SEG_HDR_GTPU &&
1417 prot_id = ICE_PROT_IPV4_IL_IL;
1419 case ICE_FLOW_FIELD_IDX_IPV4_ID:
1420 prot_id = ICE_PROT_IPV4_OF_OR_S;
1422 case ICE_FLOW_FIELD_IDX_IPV6_SA:
1423 case ICE_FLOW_FIELD_IDX_IPV6_DA:
1424 case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
1425 case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
1426 case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
1427 case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
1428 case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
1429 case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
1430 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1431 if (params->prof->segs[0].hdrs & ICE_FLOW_SEG_HDR_GRE &&
1432 params->prof->segs[1].hdrs & ICE_FLOW_SEG_HDR_GTPU &&
1434 prot_id = ICE_PROT_IPV6_IL_IL;
1436 case ICE_FLOW_FIELD_IDX_IPV6_ID:
1437 prot_id = ICE_PROT_IPV6_FRAG;
1439 case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
1440 case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
1441 case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
1442 prot_id = ICE_PROT_TCP_IL;
1444 case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
1445 case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
1446 prot_id = ICE_PROT_UDP_IL_OR_S;
1448 case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
1449 case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
1450 prot_id = ICE_PROT_SCTP_IL;
1452 case ICE_FLOW_FIELD_IDX_VXLAN_VNI:
1453 case ICE_FLOW_FIELD_IDX_GTPC_TEID:
1454 case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
1455 case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
1456 case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
1457 case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
1458 case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
1459 /* GTP is accessed through UDP OF protocol */
1460 prot_id = ICE_PROT_UDP_OF;
1462 case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
1463 prot_id = ICE_PROT_PPPOE;
1465 case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1466 prot_id = ICE_PROT_UDP_IL_OR_S;
1468 case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1469 prot_id = ICE_PROT_L2TPV3;
1471 case ICE_FLOW_FIELD_IDX_ESP_SPI:
1472 prot_id = ICE_PROT_ESP_F;
1474 case ICE_FLOW_FIELD_IDX_AH_SPI:
1475 prot_id = ICE_PROT_ESP_2;
1477 case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1478 prot_id = ICE_PROT_UDP_IL_OR_S;
1480 case ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID:
1481 prot_id = ICE_PROT_ECPRI;
1483 case ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID:
1484 prot_id = ICE_PROT_UDP_IL_OR_S;
1486 case ICE_FLOW_FIELD_IDX_ARP_SIP:
1487 case ICE_FLOW_FIELD_IDX_ARP_DIP:
1488 case ICE_FLOW_FIELD_IDX_ARP_SHA:
1489 case ICE_FLOW_FIELD_IDX_ARP_DHA:
1490 case ICE_FLOW_FIELD_IDX_ARP_OP:
1491 prot_id = ICE_PROT_ARP_OF;
1493 case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1494 case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1495 /* ICMP type and code share the same extraction seq. entry */
1496 prot_id = (params->prof->segs[seg].hdrs &
1497 ICE_FLOW_SEG_HDR_IPV4) ?
1498 ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1499 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1500 ICE_FLOW_FIELD_IDX_ICMP_CODE :
1501 ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1503 case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1504 prot_id = ICE_PROT_GRE_OF;
1507 return ICE_ERR_NOT_IMPL;
1510 /* Each extraction sequence entry is a word in size, and extracts a
1511 * word-aligned offset from a protocol header.
1513 ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1515 flds[fld].xtrct.prot_id = prot_id;
1516 flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1517 ICE_FLOW_FV_EXTRACT_SZ;
1518 flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
1519 flds[fld].xtrct.idx = params->es_cnt;
1520 flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1522 /* Adjust the next field-entry index after accommodating the number of
1523 * entries this field consumes
1525 cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1526 ice_flds_info[fld].size, ese_bits);
1528 /* Fill in the extraction sequence entries needed for this field */
1529 off = flds[fld].xtrct.off;
1530 mask = flds[fld].xtrct.mask;
1531 for (i = 0; i < cnt; i++) {
1532 /* Only consume an extraction sequence entry if there is no
1533 * sibling field associated with this field or the sibling entry
1534 * already extracts the word shared with this field.
1536 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1537 flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1538 flds[sib].xtrct.off != off) {
1541 /* Make sure the number of extraction sequence required
1542 * does not exceed the block's capability
1544 if (params->es_cnt >= fv_words)
1545 return ICE_ERR_MAX_LIMIT;
1547 /* some blocks require a reversed field vector layout */
1548 if (hw->blk[params->blk].es.reverse)
1549 idx = fv_words - params->es_cnt - 1;
1551 idx = params->es_cnt;
1553 params->es[idx].prot_id = prot_id;
1554 params->es[idx].off = off;
1555 params->mask[idx] = mask | sib_mask;
1559 off += ICE_FLOW_FV_EXTRACT_SZ;
1566 * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1567 * @hw: pointer to the HW struct
1568 * @params: information about the flow to be processed
1569 * @seg: index of packet segment whose raw fields are to be extracted
1571 static enum ice_status
1572 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1579 if (!params->prof->segs[seg].raws_cnt)
1582 if (params->prof->segs[seg].raws_cnt >
1583 ARRAY_SIZE(params->prof->segs[seg].raws))
1584 return ICE_ERR_MAX_LIMIT;
1586 /* Offsets within the segment headers are not supported */
1587 hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1589 return ICE_ERR_PARAM;
1591 fv_words = hw->blk[params->blk].es.fvw;
1593 for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1594 struct ice_flow_seg_fld_raw *raw;
1597 raw = ¶ms->prof->segs[seg].raws[i];
1599 /* Storing extraction information */
1600 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1601 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1602 ICE_FLOW_FV_EXTRACT_SZ;
1603 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1605 raw->info.xtrct.idx = params->es_cnt;
1607 /* Determine the number of field vector entries this raw field
1610 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1611 (raw->info.src.last * BITS_PER_BYTE),
1612 (ICE_FLOW_FV_EXTRACT_SZ *
1614 off = raw->info.xtrct.off;
1615 for (j = 0; j < cnt; j++) {
1618 /* Make sure the number of extraction sequence required
1619 * does not exceed the block's capability
1621 if (params->es_cnt >= hw->blk[params->blk].es.count ||
1622 params->es_cnt >= ICE_MAX_FV_WORDS)
1623 return ICE_ERR_MAX_LIMIT;
1625 /* some blocks require a reversed field vector layout */
1626 if (hw->blk[params->blk].es.reverse)
1627 idx = fv_words - params->es_cnt - 1;
1629 idx = params->es_cnt;
1631 params->es[idx].prot_id = raw->info.xtrct.prot_id;
1632 params->es[idx].off = off;
1634 off += ICE_FLOW_FV_EXTRACT_SZ;
1642 * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1643 * @hw: pointer to the HW struct
1644 * @params: information about the flow to be processed
1646 * This function iterates through all matched fields in the given segments, and
1647 * creates an extraction sequence for the fields.
1649 static enum ice_status
1650 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1651 struct ice_flow_prof_params *params)
1653 enum ice_status status = ICE_SUCCESS;
1656 /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1659 if (params->blk == ICE_BLK_ACL) {
1660 status = ice_flow_xtract_pkt_flags(hw, params,
1661 ICE_RX_MDID_PKT_FLAGS_15_0);
1666 for (i = 0; i < params->prof->segs_cnt; i++) {
1667 u64 match = params->prof->segs[i].match;
1668 enum ice_flow_field j;
1670 ice_for_each_set_bit(j, (ice_bitmap_t *)&match,
1671 ICE_FLOW_FIELD_IDX_MAX) {
1672 status = ice_flow_xtract_fld(hw, params, i, j, match);
1675 ice_clear_bit(j, (ice_bitmap_t *)&match);
1678 /* Process raw matching bytes */
1679 status = ice_flow_xtract_raws(hw, params, i);
1688 * ice_flow_sel_acl_scen - returns the specific scenario
1689 * @hw: pointer to the hardware structure
1690 * @params: information about the flow to be processed
1692 * This function will return the specific scenario based on the
1693 * params passed to it
1695 static enum ice_status
1696 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1698 /* Find the best-fit scenario for the provided match width */
1699 struct ice_acl_scen *cand_scen = NULL, *scen;
1702 return ICE_ERR_DOES_NOT_EXIST;
1704 /* Loop through each scenario and match against the scenario width
1705 * to select the specific scenario
1707 LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1708 if (scen->eff_width >= params->entry_length &&
1709 (!cand_scen || cand_scen->eff_width > scen->eff_width))
1712 return ICE_ERR_DOES_NOT_EXIST;
1714 params->prof->cfg.scen = cand_scen;
1720 * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1721 * @params: information about the flow to be processed
1723 static enum ice_status
1724 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1726 u16 index, i, range_idx = 0;
1728 index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1730 for (i = 0; i < params->prof->segs_cnt; i++) {
1731 struct ice_flow_seg_info *seg = ¶ms->prof->segs[i];
1734 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
1735 ICE_FLOW_FIELD_IDX_MAX) {
1736 struct ice_flow_fld_info *fld = &seg->fields[j];
1738 fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1740 if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1741 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1743 /* Range checking only supported for single
1746 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1748 BITS_PER_BYTE * 2) > 1)
1749 return ICE_ERR_PARAM;
1751 /* Ranges must define low and high values */
1752 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1753 fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1754 return ICE_ERR_PARAM;
1756 fld->entry.val = range_idx++;
1758 /* Store adjusted byte-length of field for later
1759 * use, taking into account potential
1760 * non-byte-aligned displacement
1762 fld->entry.last = DIVIDE_AND_ROUND_UP
1763 (ice_flds_info[j].size +
1764 (fld->xtrct.disp % BITS_PER_BYTE),
1766 fld->entry.val = index;
1767 index += fld->entry.last;
1771 for (j = 0; j < seg->raws_cnt; j++) {
1772 struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1774 raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1775 raw->info.entry.val = index;
1776 raw->info.entry.last = raw->info.src.last;
1777 index += raw->info.entry.last;
1781 /* Currently only support using the byte selection base, which only
1782 * allows for an effective entry size of 30 bytes. Reject anything
1785 if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1786 return ICE_ERR_PARAM;
1788 /* Only 8 range checkers per profile, reject anything trying to use
1791 if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1792 return ICE_ERR_PARAM;
1794 /* Store # bytes required for entry for later use */
1795 params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1801 * ice_flow_proc_segs - process all packet segments associated with a profile
1802 * @hw: pointer to the HW struct
1803 * @params: information about the flow to be processed
1805 static enum ice_status
1806 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1808 enum ice_status status;
1810 status = ice_flow_proc_seg_hdrs(params);
1814 status = ice_flow_create_xtrct_seq(hw, params);
1818 switch (params->blk) {
1821 status = ICE_SUCCESS;
1824 status = ice_flow_acl_def_entry_frmt(params);
1827 status = ice_flow_sel_acl_scen(hw, params);
1832 return ICE_ERR_NOT_IMPL;
1838 #define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001
1839 #define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002
1840 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004
1843 * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1844 * @hw: pointer to the HW struct
1845 * @blk: classification stage
1846 * @dir: flow direction
1847 * @segs: array of one or more packet segments that describe the flow
1848 * @segs_cnt: number of packet segments provided
1849 * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1850 * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1852 static struct ice_flow_prof *
1853 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1854 enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1855 u8 segs_cnt, u16 vsi_handle, u32 conds)
1857 struct ice_flow_prof *p, *prof = NULL;
1859 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1860 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1861 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1862 segs_cnt && segs_cnt == p->segs_cnt) {
1865 /* Check for profile-VSI association if specified */
1866 if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1867 ice_is_vsi_valid(hw, vsi_handle) &&
1868 !ice_is_bit_set(p->vsis, vsi_handle))
1871 /* Protocol headers must be checked. Matched fields are
1872 * checked if specified.
1874 for (i = 0; i < segs_cnt; i++)
1875 if (segs[i].hdrs != p->segs[i].hdrs ||
1876 ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1877 segs[i].match != p->segs[i].match))
1880 /* A match is found if all segments are matched */
1881 if (i == segs_cnt) {
1886 ice_release_lock(&hw->fl_profs_locks[blk]);
1892 * ice_flow_find_prof - Look up a profile matching headers and matched fields
1893 * @hw: pointer to the HW struct
1894 * @blk: classification stage
1895 * @dir: flow direction
1896 * @segs: array of one or more packet segments that describe the flow
1897 * @segs_cnt: number of packet segments provided
1900 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1901 struct ice_flow_seg_info *segs, u8 segs_cnt)
1903 struct ice_flow_prof *p;
1905 p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1906 ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1908 return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1912 * ice_flow_find_prof_id - Look up a profile with given profile ID
1913 * @hw: pointer to the HW struct
1914 * @blk: classification stage
1915 * @prof_id: unique ID to identify this flow profile
1917 static struct ice_flow_prof *
1918 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1920 struct ice_flow_prof *p;
1922 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1923 if (p->id == prof_id)
1930 * ice_dealloc_flow_entry - Deallocate flow entry memory
1931 * @hw: pointer to the HW struct
1932 * @entry: flow entry to be removed
1935 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1941 ice_free(hw, entry->entry);
1943 if (entry->range_buf) {
1944 ice_free(hw, entry->range_buf);
1945 entry->range_buf = NULL;
1949 ice_free(hw, entry->acts);
1951 entry->acts_cnt = 0;
1954 ice_free(hw, entry);
1958 * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
1959 * @hw: pointer to the HW struct
1960 * @blk: classification stage
1961 * @prof_id: the profile ID handle
1962 * @hw_prof_id: pointer to variable to receive the HW profile ID
1965 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
1968 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
1969 struct ice_prof_map *map;
1971 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
1972 map = ice_search_prof_id(hw, blk, prof_id);
1974 *hw_prof_id = map->prof_id;
1975 status = ICE_SUCCESS;
1977 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
1981 #define ICE_ACL_INVALID_SCEN 0x3f
1984 * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
1985 * @hw: pointer to the hardware structure
1986 * @prof: pointer to flow profile
1987 * @buf: destination buffer function writes partial extraction sequence to
1989 * returns ICE_SUCCESS if no PF is associated to the given profile
1990 * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
1991 * returns other error code for real error
1993 static enum ice_status
1994 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1995 struct ice_aqc_acl_prof_generic_frmt *buf)
1997 enum ice_status status;
2000 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2004 status = ice_query_acl_prof(hw, prof_id, buf, NULL);
2008 /* If all PF's associated scenarios are all 0 or all
2009 * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
2010 * not been configured yet.
2012 if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
2013 buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
2014 buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
2015 buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
2018 if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
2019 buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
2020 buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
2021 buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
2022 buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
2023 buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
2024 buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
2025 buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
2028 return ICE_ERR_IN_USE;
2032 * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
2033 * @hw: pointer to the hardware structure
2034 * @acts: array of actions to be performed on a match
2035 * @acts_cnt: number of actions
2037 static enum ice_status
2038 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
2043 for (i = 0; i < acts_cnt; i++) {
2044 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2045 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2046 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2047 struct ice_acl_cntrs cntrs = { 0 };
2048 enum ice_status status;
2050 /* amount is unused in the dealloc path but the common
2051 * parameter check routine wants a value set, as zero
2052 * is invalid for the check. Just set it.
2055 cntrs.bank = 0; /* Only bank0 for the moment */
2057 LE16_TO_CPU(acts[i].data.acl_act.value);
2059 LE16_TO_CPU(acts[i].data.acl_act.value);
2061 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2062 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2064 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2066 status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
2075 * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
2076 * @hw: pointer to the hardware structure
2077 * @prof: pointer to flow profile
2079 * Disassociate the scenario from the profile for the PF of the VSI.
2081 static enum ice_status
2082 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
2084 struct ice_aqc_acl_prof_generic_frmt buf;
2085 enum ice_status status = ICE_SUCCESS;
2088 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2090 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2094 status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
2098 /* Clear scenario for this PF */
2099 buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
2100 status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
2106 * ice_flow_rem_entry_sync - Remove a flow entry
2107 * @hw: pointer to the HW struct
2108 * @blk: classification stage
2109 * @entry: flow entry to be removed
2111 static enum ice_status
2112 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
2113 struct ice_flow_entry *entry)
2116 return ICE_ERR_BAD_PTR;
2118 if (blk == ICE_BLK_ACL) {
2119 enum ice_status status;
2122 return ICE_ERR_BAD_PTR;
2124 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
2125 entry->scen_entry_idx);
2129 /* Checks if we need to release an ACL counter. */
2130 if (entry->acts_cnt && entry->acts)
2131 ice_flow_acl_free_act_cntr(hw, entry->acts,
2135 LIST_DEL(&entry->l_entry);
2137 ice_dealloc_flow_entry(hw, entry);
2143 * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
2144 * @hw: pointer to the HW struct
2145 * @blk: classification stage
2146 * @dir: flow direction
2147 * @prof_id: unique ID to identify this flow profile
2148 * @segs: array of one or more packet segments that describe the flow
2149 * @segs_cnt: number of packet segments provided
2150 * @acts: array of default actions
2151 * @acts_cnt: number of default actions
2152 * @prof: stores the returned flow profile added
2154 * Assumption: the caller has acquired the lock to the profile list
2156 static enum ice_status
2157 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
2158 enum ice_flow_dir dir, u64 prof_id,
2159 struct ice_flow_seg_info *segs, u8 segs_cnt,
2160 struct ice_flow_action *acts, u8 acts_cnt,
2161 struct ice_flow_prof **prof)
2163 struct ice_flow_prof_params *params;
2164 enum ice_status status;
2167 if (!prof || (acts_cnt && !acts))
2168 return ICE_ERR_BAD_PTR;
2170 params = (struct ice_flow_prof_params *)ice_malloc(hw, sizeof(*params));
2172 return ICE_ERR_NO_MEMORY;
2174 params->prof = (struct ice_flow_prof *)
2175 ice_malloc(hw, sizeof(*params->prof));
2176 if (!params->prof) {
2177 status = ICE_ERR_NO_MEMORY;
2181 /* initialize extraction sequence to all invalid (0xff) */
2182 for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
2183 params->es[i].prot_id = ICE_PROT_INVALID;
2184 params->es[i].off = ICE_FV_OFFSET_INVAL;
2188 params->prof->id = prof_id;
2189 params->prof->dir = dir;
2190 params->prof->segs_cnt = segs_cnt;
2192 /* Make a copy of the segments that need to be persistent in the flow
2195 for (i = 0; i < segs_cnt; i++)
2196 ice_memcpy(¶ms->prof->segs[i], &segs[i], sizeof(*segs),
2197 ICE_NONDMA_TO_NONDMA);
2199 /* Make a copy of the actions that need to be persistent in the flow
2203 params->prof->acts = (struct ice_flow_action *)
2204 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2205 ICE_NONDMA_TO_NONDMA);
2207 if (!params->prof->acts) {
2208 status = ICE_ERR_NO_MEMORY;
2213 status = ice_flow_proc_segs(hw, params);
2215 ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n");
2219 /* Add a HW profile for this flow profile */
2220 status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
2221 params->attr, params->attr_cnt, params->es,
2224 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
2228 INIT_LIST_HEAD(¶ms->prof->entries);
2229 ice_init_lock(¶ms->prof->entries_lock);
2230 *prof = params->prof;
2234 if (params->prof->acts)
2235 ice_free(hw, params->prof->acts);
2236 ice_free(hw, params->prof);
2239 ice_free(hw, params);
2245 * ice_flow_rem_prof_sync - remove a flow profile
2246 * @hw: pointer to the hardware structure
2247 * @blk: classification stage
2248 * @prof: pointer to flow profile to remove
2250 * Assumption: the caller has acquired the lock to the profile list
2252 static enum ice_status
2253 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
2254 struct ice_flow_prof *prof)
2256 enum ice_status status;
2258 /* Remove all remaining flow entries before removing the flow profile */
2259 if (!LIST_EMPTY(&prof->entries)) {
2260 struct ice_flow_entry *e, *t;
2262 ice_acquire_lock(&prof->entries_lock);
2264 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
2266 status = ice_flow_rem_entry_sync(hw, blk, e);
2271 ice_release_lock(&prof->entries_lock);
2274 if (blk == ICE_BLK_ACL) {
2275 struct ice_aqc_acl_profile_ranges query_rng_buf;
2276 struct ice_aqc_acl_prof_generic_frmt buf;
2279 /* Disassociate the scenario from the profile for the PF */
2280 status = ice_flow_acl_disassoc_scen(hw, prof);
2284 /* Clear the range-checker if the profile ID is no longer
2287 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2288 if (status && status != ICE_ERR_IN_USE) {
2290 } else if (!status) {
2291 /* Clear the range-checker value for profile ID */
2292 ice_memset(&query_rng_buf, 0,
2293 sizeof(struct ice_aqc_acl_profile_ranges),
2296 status = ice_flow_get_hw_prof(hw, blk, prof->id,
2301 status = ice_prog_acl_prof_ranges(hw, prof_id,
2302 &query_rng_buf, NULL);
2308 /* Remove all hardware profiles associated with this flow profile */
2309 status = ice_rem_prof(hw, blk, prof->id);
2311 LIST_DEL(&prof->l_entry);
2312 ice_destroy_lock(&prof->entries_lock);
2314 ice_free(hw, prof->acts);
2322 * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
2323 * @buf: Destination buffer function writes partial xtrct sequence to
2324 * @info: Info about field
2327 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
2328 struct ice_flow_fld_info *info)
2333 src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
2334 info->xtrct.disp / BITS_PER_BYTE;
2335 dst = info->entry.val;
2336 for (i = 0; i < info->entry.last; i++)
2337 /* HW stores field vector words in LE, convert words back to BE
2338 * so constructed entries will end up in network order
2340 buf->byte_selection[dst++] = src++ ^ 1;
2344 * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
2345 * @hw: pointer to the hardware structure
2346 * @prof: pointer to flow profile
2348 static enum ice_status
2349 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
2351 struct ice_aqc_acl_prof_generic_frmt buf;
2352 struct ice_flow_fld_info *info;
2353 enum ice_status status;
2357 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2359 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2363 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2364 if (status && status != ICE_ERR_IN_USE)
2368 /* Program the profile dependent configuration. This is done
2369 * only once regardless of the number of PFs using that profile
2371 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2373 for (i = 0; i < prof->segs_cnt; i++) {
2374 struct ice_flow_seg_info *seg = &prof->segs[i];
2377 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2378 ICE_FLOW_FIELD_IDX_MAX) {
2379 info = &seg->fields[j];
2381 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2382 buf.word_selection[info->entry.val] =
2385 ice_flow_acl_set_xtrct_seq_fld(&buf,
2389 for (j = 0; j < seg->raws_cnt; j++) {
2390 info = &seg->raws[j].info;
2391 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
2395 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
2396 ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
2400 /* Update the current PF */
2401 buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
2402 status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
2408 * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
2409 * @hw: pointer to the hardware structure
2410 * @blk: classification stage
2411 * @vsi_handle: software VSI handle
2412 * @vsig: target VSI group
2414 * Assumption: the caller has already verified that the VSI to
2415 * be added has the same characteristics as the VSIG and will
2416 * thereby have access to all resources added to that VSIG.
2419 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
2422 enum ice_status status;
2424 if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
2425 return ICE_ERR_PARAM;
2427 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2428 status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
2430 ice_release_lock(&hw->fl_profs_locks[blk]);
2436 * ice_flow_assoc_prof - associate a VSI with a flow profile
2437 * @hw: pointer to the hardware structure
2438 * @blk: classification stage
2439 * @prof: pointer to flow profile
2440 * @vsi_handle: software VSI handle
2442 * Assumption: the caller has acquired the lock to the profile list
2443 * and the software VSI handle has been validated
2446 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
2447 struct ice_flow_prof *prof, u16 vsi_handle)
2449 enum ice_status status = ICE_SUCCESS;
2451 if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
2452 if (blk == ICE_BLK_ACL) {
2453 status = ice_flow_acl_set_xtrct_seq(hw, prof);
2457 status = ice_add_prof_id_flow(hw, blk,
2458 ice_get_hw_vsi_num(hw,
2462 ice_set_bit(vsi_handle, prof->vsis);
2464 ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n",
2472 * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
2473 * @hw: pointer to the hardware structure
2474 * @blk: classification stage
2475 * @prof: pointer to flow profile
2476 * @vsi_handle: software VSI handle
2478 * Assumption: the caller has acquired the lock to the profile list
2479 * and the software VSI handle has been validated
2481 static enum ice_status
2482 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
2483 struct ice_flow_prof *prof, u16 vsi_handle)
2485 enum ice_status status = ICE_SUCCESS;
2487 if (ice_is_bit_set(prof->vsis, vsi_handle)) {
2488 status = ice_rem_prof_id_flow(hw, blk,
2489 ice_get_hw_vsi_num(hw,
2493 ice_clear_bit(vsi_handle, prof->vsis);
2495 ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n",
2503 * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2504 * @hw: pointer to the HW struct
2505 * @blk: classification stage
2506 * @dir: flow direction
2507 * @prof_id: unique ID to identify this flow profile
2508 * @segs: array of one or more packet segments that describe the flow
2509 * @segs_cnt: number of packet segments provided
2510 * @acts: array of default actions
2511 * @acts_cnt: number of default actions
2512 * @prof: stores the returned flow profile added
2515 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2516 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2517 struct ice_flow_action *acts, u8 acts_cnt,
2518 struct ice_flow_prof **prof)
2520 enum ice_status status;
2522 if (segs_cnt > ICE_FLOW_SEG_MAX)
2523 return ICE_ERR_MAX_LIMIT;
2526 return ICE_ERR_PARAM;
2529 return ICE_ERR_BAD_PTR;
2531 status = ice_flow_val_hdrs(segs, segs_cnt);
2535 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2537 status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2538 acts, acts_cnt, prof);
2540 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2542 ice_release_lock(&hw->fl_profs_locks[blk]);
2548 * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2549 * @hw: pointer to the HW struct
2550 * @blk: the block for which the flow profile is to be removed
2551 * @prof_id: unique ID of the flow profile to be removed
2554 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2556 struct ice_flow_prof *prof;
2557 enum ice_status status;
2559 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2561 prof = ice_flow_find_prof_id(hw, blk, prof_id);
2563 status = ICE_ERR_DOES_NOT_EXIST;
2567 /* prof becomes invalid after the call */
2568 status = ice_flow_rem_prof_sync(hw, blk, prof);
2571 ice_release_lock(&hw->fl_profs_locks[blk]);
2577 * ice_flow_find_entry - look for a flow entry using its unique ID
2578 * @hw: pointer to the HW struct
2579 * @blk: classification stage
2580 * @entry_id: unique ID to identify this flow entry
2582 * This function looks for the flow entry with the specified unique ID in all
2583 * flow profiles of the specified classification stage. If the entry is found,
2584 * and it returns the handle to the flow entry. Otherwise, it returns
2585 * ICE_FLOW_ENTRY_ID_INVAL.
2587 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2589 struct ice_flow_entry *found = NULL;
2590 struct ice_flow_prof *p;
2592 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2594 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2595 struct ice_flow_entry *e;
2597 ice_acquire_lock(&p->entries_lock);
2598 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2599 if (e->id == entry_id) {
2603 ice_release_lock(&p->entries_lock);
2609 ice_release_lock(&hw->fl_profs_locks[blk]);
2611 return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2615 * ice_flow_acl_check_actions - Checks the ACL rule's actions
2616 * @hw: pointer to the hardware structure
2617 * @acts: array of actions to be performed on a match
2618 * @acts_cnt: number of actions
2619 * @cnt_alloc: indicates if an ACL counter has been allocated.
2621 static enum ice_status
2622 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2623 u8 acts_cnt, bool *cnt_alloc)
2625 ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2628 ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2631 if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2632 return ICE_ERR_OUT_OF_RANGE;
2634 for (i = 0; i < acts_cnt; i++) {
2635 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2636 acts[i].type != ICE_FLOW_ACT_DROP &&
2637 acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2638 acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2641 /* If the caller want to add two actions of the same type, then
2642 * it is considered invalid configuration.
2644 if (ice_test_and_set_bit(acts[i].type, dup_check))
2645 return ICE_ERR_PARAM;
2648 /* Checks if ACL counters are needed. */
2649 for (i = 0; i < acts_cnt; i++) {
2650 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2651 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2652 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2653 struct ice_acl_cntrs cntrs = { 0 };
2654 enum ice_status status;
2657 cntrs.bank = 0; /* Only bank0 for the moment */
2659 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2660 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2662 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2664 status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2667 /* Counter index within the bank */
2668 acts[i].data.acl_act.value =
2669 CPU_TO_LE16(cntrs.first_cntr);
2678 * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2679 * @fld: number of the given field
2680 * @info: info about field
2681 * @range_buf: range checker configuration buffer
2682 * @data: pointer to a data buffer containing flow entry's match values/masks
2683 * @range: Input/output param indicating which range checkers are being used
2686 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2687 struct ice_aqc_acl_profile_ranges *range_buf,
2688 u8 *data, u8 *range)
2692 /* If not specified, default mask is all bits in field */
2693 new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2694 BIT(ice_flds_info[fld].size) - 1 :
2695 (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2697 /* If the mask is 0, then we don't need to worry about this input
2698 * range checker value.
2702 (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2704 (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2705 u8 range_idx = info->entry.val;
2707 range_buf->checker_cfg[range_idx].low_boundary =
2708 CPU_TO_BE16(new_low);
2709 range_buf->checker_cfg[range_idx].high_boundary =
2710 CPU_TO_BE16(new_high);
2711 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2713 /* Indicate which range checker is being used */
2714 *range |= BIT(range_idx);
2719 * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2720 * @fld: number of the given field
2721 * @info: info about the field
2722 * @buf: buffer containing the entry
2723 * @dontcare: buffer containing don't care mask for entry
2724 * @data: pointer to a data buffer containing flow entry's match values/masks
2727 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2728 u8 *dontcare, u8 *data)
2730 u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2731 bool use_mask = false;
2734 src = info->src.val;
2735 mask = info->src.mask;
2736 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2737 disp = info->xtrct.disp % BITS_PER_BYTE;
2739 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2742 for (k = 0; k < info->entry.last; k++, dst++) {
2743 /* Add overflow bits from previous byte */
2744 buf[dst] = (tmp_s & 0xff00) >> 8;
2746 /* If mask is not valid, tmp_m is always zero, so just setting
2747 * dontcare to 0 (no masked bits). If mask is valid, pulls in
2748 * overflow bits of mask from prev byte
2750 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2752 /* If there is displacement, last byte will only contain
2753 * displaced data, but there is no more data to read from user
2754 * buffer, so skip so as not to potentially read beyond end of
2757 if (!disp || k < info->entry.last - 1) {
2758 /* Store shifted data to use in next byte */
2759 tmp_s = data[src++] << disp;
2761 /* Add current (shifted) byte */
2762 buf[dst] |= tmp_s & 0xff;
2764 /* Handle mask if valid */
2766 tmp_m = (~data[mask++] & 0xff) << disp;
2767 dontcare[dst] |= tmp_m & 0xff;
2772 /* Fill in don't care bits at beginning of field */
2774 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2775 for (k = 0; k < disp; k++)
2776 dontcare[dst] |= BIT(k);
2779 end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2781 /* Fill in don't care bits at end of field */
2783 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2784 info->entry.last - 1;
2785 for (k = end_disp; k < BITS_PER_BYTE; k++)
2786 dontcare[dst] |= BIT(k);
2791 * ice_flow_acl_frmt_entry - Format ACL entry
2792 * @hw: pointer to the hardware structure
2793 * @prof: pointer to flow profile
2794 * @e: pointer to the flow entry
2795 * @data: pointer to a data buffer containing flow entry's match values/masks
2796 * @acts: array of actions to be performed on a match
2797 * @acts_cnt: number of actions
2799 * Formats the key (and key_inverse) to be matched from the data passed in,
2800 * along with data from the flow profile. This key/key_inverse pair makes up
2801 * the 'entry' for an ACL flow entry.
2803 static enum ice_status
2804 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2805 struct ice_flow_entry *e, u8 *data,
2806 struct ice_flow_action *acts, u8 acts_cnt)
2808 u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2809 struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2810 enum ice_status status;
2815 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2819 /* Format the result action */
2821 status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2825 status = ICE_ERR_NO_MEMORY;
2827 e->acts = (struct ice_flow_action *)
2828 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2829 ICE_NONDMA_TO_NONDMA);
2833 e->acts_cnt = acts_cnt;
2835 /* Format the matching data */
2836 buf_sz = prof->cfg.scen->width;
2837 buf = (u8 *)ice_malloc(hw, buf_sz);
2841 dontcare = (u8 *)ice_malloc(hw, buf_sz);
2845 /* 'key' buffer will store both key and key_inverse, so must be twice
2848 key = (u8 *)ice_malloc(hw, buf_sz * 2);
2852 range_buf = (struct ice_aqc_acl_profile_ranges *)
2853 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2857 /* Set don't care mask to all 1's to start, will zero out used bytes */
2858 ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2860 for (i = 0; i < prof->segs_cnt; i++) {
2861 struct ice_flow_seg_info *seg = &prof->segs[i];
2864 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2865 ICE_FLOW_FIELD_IDX_MAX) {
2866 struct ice_flow_fld_info *info = &seg->fields[j];
2868 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2869 ice_flow_acl_frmt_entry_range(j, info,
2873 ice_flow_acl_frmt_entry_fld(j, info, buf,
2877 for (j = 0; j < seg->raws_cnt; j++) {
2878 struct ice_flow_fld_info *info = &seg->raws[j].info;
2879 u16 dst, src, mask, k;
2880 bool use_mask = false;
2882 src = info->src.val;
2883 dst = info->entry.val -
2884 ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2885 mask = info->src.mask;
2887 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2890 for (k = 0; k < info->entry.last; k++, dst++) {
2891 buf[dst] = data[src++];
2893 dontcare[dst] = ~data[mask++];
2900 buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2901 dontcare[prof->cfg.scen->pid_idx] = 0;
2903 /* Format the buffer for direction flags */
2904 dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2906 if (prof->dir == ICE_FLOW_RX)
2907 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2910 buf[prof->cfg.scen->rng_chk_idx] = range;
2911 /* Mark any unused range checkers as don't care */
2912 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2913 e->range_buf = range_buf;
2915 ice_free(hw, range_buf);
2918 status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2924 e->entry_sz = buf_sz * 2;
2931 ice_free(hw, dontcare);
2936 if (status && range_buf) {
2937 ice_free(hw, range_buf);
2938 e->range_buf = NULL;
2941 if (status && e->acts) {
2942 ice_free(hw, e->acts);
2947 if (status && cnt_alloc)
2948 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2954 * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2955 * the compared data.
2956 * @prof: pointer to flow profile
2957 * @e: pointer to the comparing flow entry
2958 * @do_chg_action: decide if we want to change the ACL action
2959 * @do_add_entry: decide if we want to add the new ACL entry
2960 * @do_rem_entry: decide if we want to remove the current ACL entry
2962 * Find an ACL scenario entry that matches the compared data. In the same time,
2963 * this function also figure out:
2964 * a/ If we want to change the ACL action
2965 * b/ If we want to add the new ACL entry
2966 * c/ If we want to remove the current ACL entry
2968 static struct ice_flow_entry *
2969 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2970 struct ice_flow_entry *e, bool *do_chg_action,
2971 bool *do_add_entry, bool *do_rem_entry)
2973 struct ice_flow_entry *p, *return_entry = NULL;
2977 * a/ There exists an entry with same matching data, but different
2978 * priority, then we remove this existing ACL entry. Then, we
2979 * will add the new entry to the ACL scenario.
2980 * b/ There exists an entry with same matching data, priority, and
2981 * result action, then we do nothing
2982 * c/ There exists an entry with same matching data, priority, but
2983 * different, action, then do only change the action's entry.
2984 * d/ Else, we add this new entry to the ACL scenario.
2986 *do_chg_action = false;
2987 *do_add_entry = true;
2988 *do_rem_entry = false;
2989 LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2990 if (memcmp(p->entry, e->entry, p->entry_sz))
2993 /* From this point, we have the same matching_data. */
2994 *do_add_entry = false;
2997 if (p->priority != e->priority) {
2998 /* matching data && !priority */
2999 *do_add_entry = true;
3000 *do_rem_entry = true;
3004 /* From this point, we will have matching_data && priority */
3005 if (p->acts_cnt != e->acts_cnt)
3006 *do_chg_action = true;
3007 for (i = 0; i < p->acts_cnt; i++) {
3008 bool found_not_match = false;
3010 for (j = 0; j < e->acts_cnt; j++)
3011 if (memcmp(&p->acts[i], &e->acts[j],
3012 sizeof(struct ice_flow_action))) {
3013 found_not_match = true;
3017 if (found_not_match) {
3018 *do_chg_action = true;
3023 /* (do_chg_action = true) means :
3024 * matching_data && priority && !result_action
3025 * (do_chg_action = false) means :
3026 * matching_data && priority && result_action
3031 return return_entry;
3035 * ice_flow_acl_convert_to_acl_prio - Convert to ACL priority
3038 static enum ice_acl_entry_prio
3039 ice_flow_acl_convert_to_acl_prio(enum ice_flow_priority p)
3041 enum ice_acl_entry_prio acl_prio;
3044 case ICE_FLOW_PRIO_LOW:
3045 acl_prio = ICE_ACL_PRIO_LOW;
3047 case ICE_FLOW_PRIO_NORMAL:
3048 acl_prio = ICE_ACL_PRIO_NORMAL;
3050 case ICE_FLOW_PRIO_HIGH:
3051 acl_prio = ICE_ACL_PRIO_HIGH;
3054 acl_prio = ICE_ACL_PRIO_NORMAL;
3062 * ice_flow_acl_union_rng_chk - Perform union operation between two
3063 * range-range checker buffers
3064 * @dst_buf: pointer to destination range checker buffer
3065 * @src_buf: pointer to source range checker buffer
3067 * For this function, we do the union between dst_buf and src_buf
3068 * range checker buffer, and we will save the result back to dst_buf
3070 static enum ice_status
3071 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
3072 struct ice_aqc_acl_profile_ranges *src_buf)
3076 if (!dst_buf || !src_buf)
3077 return ICE_ERR_BAD_PTR;
3079 for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
3080 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
3081 bool will_populate = false;
3083 in_data = &src_buf->checker_cfg[i];
3088 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
3089 cfg_data = &dst_buf->checker_cfg[j];
3091 if (!cfg_data->mask ||
3092 !memcmp(cfg_data, in_data,
3093 sizeof(struct ice_acl_rng_data))) {
3094 will_populate = true;
3099 if (will_populate) {
3100 ice_memcpy(cfg_data, in_data,
3101 sizeof(struct ice_acl_rng_data),
3102 ICE_NONDMA_TO_NONDMA);
3104 /* No available slot left to program range checker */
3105 return ICE_ERR_MAX_LIMIT;
3113 * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
3114 * @hw: pointer to the hardware structure
3115 * @prof: pointer to flow profile
3116 * @entry: double pointer to the flow entry
3118 * For this function, we will look at the current added entries in the
3119 * corresponding ACL scenario. Then, we will perform matching logic to
3120 * see if we want to add/modify/do nothing with this new entry.
3122 static enum ice_status
3123 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
3124 struct ice_flow_entry **entry)
3126 bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
3127 struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
3128 struct ice_acl_act_entry *acts = NULL;
3129 struct ice_flow_entry *exist;
3130 enum ice_status status = ICE_SUCCESS;
3131 struct ice_flow_entry *e;
3134 if (!entry || !(*entry) || !prof)
3135 return ICE_ERR_BAD_PTR;
3139 do_chg_rng_chk = false;
3143 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
3148 /* Query the current range-checker value in FW */
3149 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
3153 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
3154 sizeof(struct ice_aqc_acl_profile_ranges),
3155 ICE_NONDMA_TO_NONDMA);
3157 /* Generate the new range-checker value */
3158 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
3162 /* Reconfigure the range check if the buffer is changed. */
3163 do_chg_rng_chk = false;
3164 if (memcmp(&query_rng_buf, &cfg_rng_buf,
3165 sizeof(struct ice_aqc_acl_profile_ranges))) {
3166 status = ice_prog_acl_prof_ranges(hw, prof_id,
3167 &cfg_rng_buf, NULL);
3171 do_chg_rng_chk = true;
3175 /* Figure out if we want to (change the ACL action) and/or
3176 * (Add the new ACL entry) and/or (Remove the current ACL entry)
3178 exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
3179 &do_add_entry, &do_rem_entry);
3181 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
3186 /* Prepare the result action buffer */
3187 acts = (struct ice_acl_act_entry *)
3188 ice_calloc(hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
3190 return ICE_ERR_NO_MEMORY;
3192 for (i = 0; i < e->acts_cnt; i++)
3193 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
3194 sizeof(struct ice_acl_act_entry),
3195 ICE_NONDMA_TO_NONDMA);
3198 enum ice_acl_entry_prio prio;
3202 keys = (u8 *)e->entry;
3203 inverts = keys + (e->entry_sz / 2);
3204 prio = ice_flow_acl_convert_to_acl_prio(e->priority);
3206 status = ice_acl_add_entry(hw, prof->cfg.scen, prio, keys,
3207 inverts, acts, e->acts_cnt,
3212 e->scen_entry_idx = entry_idx;
3213 LIST_ADD(&e->l_entry, &prof->entries);
3215 if (do_chg_action) {
3216 /* For the action memory info, update the SW's copy of
3217 * exist entry with e's action memory info
3219 ice_free(hw, exist->acts);
3220 exist->acts_cnt = e->acts_cnt;
3221 exist->acts = (struct ice_flow_action *)
3222 ice_calloc(hw, exist->acts_cnt,
3223 sizeof(struct ice_flow_action));
3225 status = ICE_ERR_NO_MEMORY;
3229 ice_memcpy(exist->acts, e->acts,
3230 sizeof(struct ice_flow_action) * e->acts_cnt,
3231 ICE_NONDMA_TO_NONDMA);
3233 status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
3235 exist->scen_entry_idx);
3240 if (do_chg_rng_chk) {
3241 /* In this case, we want to update the range checker
3242 * information of the exist entry
3244 status = ice_flow_acl_union_rng_chk(exist->range_buf,
3250 /* As we don't add the new entry to our SW DB, deallocate its
3251 * memories, and return the exist entry to the caller
3253 ice_dealloc_flow_entry(hw, e);
3263 * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
3264 * @hw: pointer to the hardware structure
3265 * @prof: pointer to flow profile
3266 * @e: double pointer to the flow entry
3268 static enum ice_status
3269 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
3270 struct ice_flow_entry **e)
3272 enum ice_status status;
3274 ice_acquire_lock(&prof->entries_lock);
3275 status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
3276 ice_release_lock(&prof->entries_lock);
3282 * ice_flow_add_entry - Add a flow entry
3283 * @hw: pointer to the HW struct
3284 * @blk: classification stage
3285 * @prof_id: ID of the profile to add a new flow entry to
3286 * @entry_id: unique ID to identify this flow entry
3287 * @vsi_handle: software VSI handle for the flow entry
3288 * @prio: priority of the flow entry
3289 * @data: pointer to a data buffer containing flow entry's match values/masks
3290 * @acts: arrays of actions to be performed on a match
3291 * @acts_cnt: number of actions
3292 * @entry_h: pointer to buffer that receives the new flow entry's handle
3295 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
3296 u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
3297 void *data, struct ice_flow_action *acts, u8 acts_cnt,
3300 struct ice_flow_entry *e = NULL;
3301 struct ice_flow_prof *prof;
3302 enum ice_status status = ICE_SUCCESS;
3304 /* ACL entries must indicate an action */
3305 if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
3306 return ICE_ERR_PARAM;
3308 /* No flow entry data is expected for RSS */
3309 if (!entry_h || (!data && blk != ICE_BLK_RSS))
3310 return ICE_ERR_BAD_PTR;
3312 if (!ice_is_vsi_valid(hw, vsi_handle))
3313 return ICE_ERR_PARAM;
3315 ice_acquire_lock(&hw->fl_profs_locks[blk]);
3317 prof = ice_flow_find_prof_id(hw, blk, prof_id);
3319 status = ICE_ERR_DOES_NOT_EXIST;
3321 /* Allocate memory for the entry being added and associate
3322 * the VSI to the found flow profile
3324 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
3326 status = ICE_ERR_NO_MEMORY;
3328 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3331 ice_release_lock(&hw->fl_profs_locks[blk]);
3336 e->vsi_handle = vsi_handle;
3345 /* ACL will handle the entry management */
3346 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
3351 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
3357 status = ICE_ERR_NOT_IMPL;
3361 if (blk != ICE_BLK_ACL) {
3362 /* ACL will handle the entry management */
3363 ice_acquire_lock(&prof->entries_lock);
3364 LIST_ADD(&e->l_entry, &prof->entries);
3365 ice_release_lock(&prof->entries_lock);
3368 *entry_h = ICE_FLOW_ENTRY_HNDL(e);
3373 ice_free(hw, e->entry);
3381 * ice_flow_rem_entry - Remove a flow entry
3382 * @hw: pointer to the HW struct
3383 * @blk: classification stage
3384 * @entry_h: handle to the flow entry to be removed
3386 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
3389 struct ice_flow_entry *entry;
3390 struct ice_flow_prof *prof;
3391 enum ice_status status = ICE_SUCCESS;
3393 if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
3394 return ICE_ERR_PARAM;
3396 entry = ICE_FLOW_ENTRY_PTR((intptr_t)entry_h);
3398 /* Retain the pointer to the flow profile as the entry will be freed */
3402 ice_acquire_lock(&prof->entries_lock);
3403 status = ice_flow_rem_entry_sync(hw, blk, entry);
3404 ice_release_lock(&prof->entries_lock);
3411 * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
3412 * @seg: packet segment the field being set belongs to
3413 * @fld: field to be set
3414 * @field_type: type of the field
3415 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3416 * entry's input buffer
3417 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3419 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3420 * entry's input buffer
3422 * This helper function stores information of a field being matched, including
3423 * the type of the field and the locations of the value to match, the mask, and
3424 * the upper-bound value in the start of the input buffer for a flow entry.
3425 * This function should only be used for fixed-size data structures.
3427 * This function also opportunistically determines the protocol headers to be
3428 * present based on the fields being set. Some fields cannot be used alone to
3429 * determine the protocol headers present. Sometimes, fields for particular
3430 * protocol headers are not matched. In those cases, the protocol headers
3431 * must be explicitly set.
3434 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3435 enum ice_flow_fld_match_type field_type, u16 val_loc,
3436 u16 mask_loc, u16 last_loc)
3438 u64 bit = BIT_ULL(fld);
3441 if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
3444 seg->fields[fld].type = field_type;
3445 seg->fields[fld].src.val = val_loc;
3446 seg->fields[fld].src.mask = mask_loc;
3447 seg->fields[fld].src.last = last_loc;
3449 ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
3453 * ice_flow_set_fld - specifies locations of field from entry's input buffer
3454 * @seg: packet segment the field being set belongs to
3455 * @fld: field to be set
3456 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3457 * entry's input buffer
3458 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3460 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3461 * entry's input buffer
3462 * @range: indicate if field being matched is to be in a range
3464 * This function specifies the locations, in the form of byte offsets from the
3465 * start of the input buffer for a flow entry, from where the value to match,
3466 * the mask value, and upper value can be extracted. These locations are then
3467 * stored in the flow profile. When adding a flow entry associated with the
3468 * flow profile, these locations will be used to quickly extract the values and
3469 * create the content of a match entry. This function should only be used for
3470 * fixed-size data structures.
3473 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3474 u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3476 enum ice_flow_fld_match_type t = range ?
3477 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3479 ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3483 * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3484 * @seg: packet segment the field being set belongs to
3485 * @fld: field to be set
3486 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3487 * entry's input buffer
3488 * @pref_loc: location of prefix value from entry's input buffer
3489 * @pref_sz: size of the location holding the prefix value
3491 * This function specifies the locations, in the form of byte offsets from the
3492 * start of the input buffer for a flow entry, from where the value to match
3493 * and the IPv4 prefix value can be extracted. These locations are then stored
3494 * in the flow profile. When adding flow entries to the associated flow profile,
3495 * these locations can be used to quickly extract the values to create the
3496 * content of a match entry. This function should only be used for fixed-size
3500 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3501 u16 val_loc, u16 pref_loc, u8 pref_sz)
3503 /* For this type of field, the "mask" location is for the prefix value's
3504 * location and the "last" location is for the size of the location of
3507 ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3508 pref_loc, (u16)pref_sz);
3512 * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3513 * @seg: packet segment the field being set belongs to
3514 * @off: offset of the raw field from the beginning of the segment in bytes
3515 * @len: length of the raw pattern to be matched
3516 * @val_loc: location of the value to match from entry's input buffer
3517 * @mask_loc: location of mask value from entry's input buffer
3519 * This function specifies the offset of the raw field to be match from the
3520 * beginning of the specified packet segment, and the locations, in the form of
3521 * byte offsets from the start of the input buffer for a flow entry, from where
3522 * the value to match and the mask value to be extracted. These locations are
3523 * then stored in the flow profile. When adding flow entries to the associated
3524 * flow profile, these locations can be used to quickly extract the values to
3525 * create the content of a match entry. This function should only be used for
3526 * fixed-size data structures.
3529 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3530 u16 val_loc, u16 mask_loc)
3532 if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3533 seg->raws[seg->raws_cnt].off = off;
3534 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3535 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3536 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3537 /* The "last" field is used to store the length of the field */
3538 seg->raws[seg->raws_cnt].info.src.last = len;
3541 /* Overflows of "raws" will be handled as an error condition later in
3542 * the flow when this information is processed.
3548 * ice_flow_rem_vsi_prof - remove vsi from flow profile
3549 * @hw: pointer to the hardware structure
3550 * @blk: classification stage
3551 * @vsi_handle: software VSI handle
3552 * @prof_id: unique ID to identify this flow profile
3554 * This function removes the flow entries associated to the input
3555 * vsi handle and disassociates the vsi from the flow profile.
3557 enum ice_status ice_flow_rem_vsi_prof(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
3560 struct ice_flow_prof *prof = NULL;
3561 enum ice_status status = ICE_SUCCESS;
3563 if (blk >= ICE_BLK_COUNT || !ice_is_vsi_valid(hw, vsi_handle))
3564 return ICE_ERR_PARAM;
3566 /* find flow profile pointer with input package block and profile id */
3567 prof = ice_flow_find_prof_id(hw, ICE_BLK_FD, prof_id);
3569 ice_debug(hw, ICE_DBG_PKG,
3570 "Cannot find flow profile id=%" PRIu64 "\n", prof_id);
3571 return ICE_ERR_DOES_NOT_EXIST;
3574 /* Remove all remaining flow entries before removing the flow profile */
3575 if (!LIST_EMPTY(&prof->entries)) {
3576 struct ice_flow_entry *e, *t;
3578 ice_acquire_lock(&prof->entries_lock);
3579 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
3581 if (e->vsi_handle != vsi_handle)
3584 status = ice_flow_rem_entry_sync(hw, blk, e);
3588 ice_release_lock(&prof->entries_lock);
3593 /* disassociate the flow profile from sw vsi handle */
3594 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3596 ice_debug(hw, ICE_DBG_PKG,
3597 "ice_flow_disassoc_prof() failed with status=%d\n",
3602 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3603 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_ETH_NON_IP | ICE_FLOW_SEG_HDR_VLAN)
3605 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3606 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3608 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3609 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
3611 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3612 (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3613 ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3614 ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3617 * ice_flow_set_rss_seg_info - setup packet segments for RSS
3618 * @segs: pointer to the flow field segment(s)
3619 * @seg_cnt: segment count
3620 * @cfg: configure parameters
3622 * Helper function to extract fields from hash bitmap and use flow
3623 * header value to set flow field segment for further use in flow
3624 * profile entry or removal.
3626 static enum ice_status
3627 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
3628 const struct ice_rss_hash_cfg *cfg)
3630 struct ice_flow_seg_info *seg;
3634 /* set inner most segment */
3635 seg = &segs[seg_cnt - 1];
3637 ice_for_each_set_bit(i, (const ice_bitmap_t *)&cfg->hash_flds,
3638 ICE_FLOW_FIELD_IDX_MAX)
3639 ice_flow_set_fld(seg, (enum ice_flow_field)i,
3640 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
3641 ICE_FLOW_FLD_OFF_INVAL, false);
3643 ICE_FLOW_SET_HDRS(seg, cfg->addl_hdrs);
3645 /* set outer most header */
3646 if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4)
3647 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
3648 ICE_FLOW_SEG_HDR_IPV_FRAG |
3649 ICE_FLOW_SEG_HDR_IPV_OTHER;
3650 else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6)
3651 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
3652 ICE_FLOW_SEG_HDR_IPV_FRAG |
3653 ICE_FLOW_SEG_HDR_IPV_OTHER;
3654 else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4_GRE)
3655 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
3656 ICE_FLOW_SEG_HDR_GRE |
3657 ICE_FLOW_SEG_HDR_IPV_OTHER;
3658 else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6_GRE)
3659 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
3660 ICE_FLOW_SEG_HDR_GRE |
3661 ICE_FLOW_SEG_HDR_IPV_OTHER;
3663 if (seg->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3664 ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER &
3665 ~ICE_FLOW_SEG_HDR_IPV_FRAG)
3666 return ICE_ERR_PARAM;
3668 val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3669 if (val && !ice_is_pow2(val))
3672 val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3673 if (val && !ice_is_pow2(val))
3680 * ice_rem_vsi_rss_list - remove VSI from RSS list
3681 * @hw: pointer to the hardware structure
3682 * @vsi_handle: software VSI handle
3684 * Remove the VSI from all RSS configurations in the list.
3686 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3688 struct ice_rss_cfg *r, *tmp;
3690 if (LIST_EMPTY(&hw->rss_list_head))
3693 ice_acquire_lock(&hw->rss_locks);
3694 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3695 ice_rss_cfg, l_entry)
3696 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3697 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3698 LIST_DEL(&r->l_entry);
3701 ice_release_lock(&hw->rss_locks);
3705 * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3706 * @hw: pointer to the hardware structure
3707 * @vsi_handle: software VSI handle
3709 * This function will iterate through all flow profiles and disassociate
3710 * the VSI from that profile. If the flow profile has no VSIs it will
3713 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3715 const enum ice_block blk = ICE_BLK_RSS;
3716 struct ice_flow_prof *p, *t;
3717 enum ice_status status = ICE_SUCCESS;
3719 if (!ice_is_vsi_valid(hw, vsi_handle))
3720 return ICE_ERR_PARAM;
3722 if (LIST_EMPTY(&hw->fl_profs[blk]))
3725 ice_acquire_lock(&hw->rss_locks);
3726 LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3728 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3729 status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3733 if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3734 status = ice_flow_rem_prof(hw, blk, p->id);
3739 ice_release_lock(&hw->rss_locks);
3745 * ice_get_rss_hdr_type - get a RSS profile's header type
3746 * @prof: RSS flow profile
3748 static enum ice_rss_cfg_hdr_type
3749 ice_get_rss_hdr_type(struct ice_flow_prof *prof)
3751 enum ice_rss_cfg_hdr_type hdr_type = ICE_RSS_ANY_HEADERS;
3753 if (prof->segs_cnt == ICE_FLOW_SEG_SINGLE) {
3754 hdr_type = ICE_RSS_OUTER_HEADERS;
3755 } else if (prof->segs_cnt == ICE_FLOW_SEG_MAX) {
3756 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs == ICE_FLOW_SEG_HDR_NONE)
3757 hdr_type = ICE_RSS_INNER_HEADERS;
3758 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV4)
3759 hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV4;
3760 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV6)
3761 hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV6;
3768 * ice_rem_rss_list - remove RSS configuration from list
3769 * @hw: pointer to the hardware structure
3770 * @vsi_handle: software VSI handle
3771 * @prof: pointer to flow profile
3773 * Assumption: lock has already been acquired for RSS list
3776 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3778 enum ice_rss_cfg_hdr_type hdr_type;
3779 struct ice_rss_cfg *r, *tmp;
3781 /* Search for RSS hash fields associated to the VSI that match the
3782 * hash configurations associated to the flow profile. If found
3783 * remove from the RSS entry list of the VSI context and delete entry.
3785 hdr_type = ice_get_rss_hdr_type(prof);
3786 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3787 ice_rss_cfg, l_entry)
3788 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3789 r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3790 r->hash.hdr_type == hdr_type) {
3791 ice_clear_bit(vsi_handle, r->vsis);
3792 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3793 LIST_DEL(&r->l_entry);
3801 * ice_add_rss_list - add RSS configuration to list
3802 * @hw: pointer to the hardware structure
3803 * @vsi_handle: software VSI handle
3804 * @prof: pointer to flow profile
3806 * Assumption: lock has already been acquired for RSS list
3808 static enum ice_status
3809 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3811 enum ice_rss_cfg_hdr_type hdr_type;
3812 struct ice_rss_cfg *r, *rss_cfg;
3814 hdr_type = ice_get_rss_hdr_type(prof);
3815 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3816 ice_rss_cfg, l_entry)
3817 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3818 r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3819 r->hash.hdr_type == hdr_type) {
3820 ice_set_bit(vsi_handle, r->vsis);
3824 rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3826 return ICE_ERR_NO_MEMORY;
3828 rss_cfg->hash.hash_flds = prof->segs[prof->segs_cnt - 1].match;
3829 rss_cfg->hash.addl_hdrs = prof->segs[prof->segs_cnt - 1].hdrs;
3830 rss_cfg->hash.hdr_type = hdr_type;
3831 rss_cfg->hash.symm = prof->cfg.symm;
3832 ice_set_bit(vsi_handle, rss_cfg->vsis);
3834 LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3839 #define ICE_FLOW_PROF_HASH_S 0
3840 #define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3841 #define ICE_FLOW_PROF_HDR_S 32
3842 #define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3843 #define ICE_FLOW_PROF_ENCAP_S 62
3844 #define ICE_FLOW_PROF_ENCAP_M (0x3ULL << ICE_FLOW_PROF_ENCAP_S)
3846 /* Flow profile ID format:
3847 * [0:31] - Packet match fields
3848 * [32:61] - Protocol header
3849 * [62:63] - Encapsulation flag:
3852 * 2 for tunneled with outer ipv4
3853 * 3 for tunneled with outer ipv6
3855 #define ICE_FLOW_GEN_PROFID(hash, hdr, encap) \
3856 ((u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3857 (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3858 (((u64)(encap) << ICE_FLOW_PROF_ENCAP_S) & ICE_FLOW_PROF_ENCAP_M)))
3861 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3863 u32 s = ((src % 4) << 3); /* byte shift */
3864 u32 v = dst | 0x80; /* value to program */
3865 u8 i = src / 4; /* register index */
3868 reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3869 reg = (reg & ~(0xff << s)) | (v << s);
3870 wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3874 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3877 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3880 for (i = 0; i < len; i++) {
3881 ice_rss_config_xor_word(hw, prof_id,
3882 /* Yes, field vector in GLQF_HSYMM and
3883 * GLQF_HINSET is inversed!
3885 fv_last_word - (src + i),
3886 fv_last_word - (dst + i));
3887 ice_rss_config_xor_word(hw, prof_id,
3888 fv_last_word - (dst + i),
3889 fv_last_word - (src + i));
3894 ice_rss_update_symm(struct ice_hw *hw,
3895 struct ice_flow_prof *prof)
3897 struct ice_prof_map *map;
3900 ice_acquire_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3901 map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3903 prof_id = map->prof_id;
3904 ice_release_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3907 /* clear to default */
3908 for (m = 0; m < 6; m++)
3909 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3910 if (prof->cfg.symm) {
3911 struct ice_flow_seg_info *seg =
3912 &prof->segs[prof->segs_cnt - 1];
3914 struct ice_flow_seg_xtrct *ipv4_src =
3915 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3916 struct ice_flow_seg_xtrct *ipv4_dst =
3917 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3918 struct ice_flow_seg_xtrct *ipv6_src =
3919 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3920 struct ice_flow_seg_xtrct *ipv6_dst =
3921 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3923 struct ice_flow_seg_xtrct *tcp_src =
3924 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3925 struct ice_flow_seg_xtrct *tcp_dst =
3926 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3928 struct ice_flow_seg_xtrct *udp_src =
3929 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3930 struct ice_flow_seg_xtrct *udp_dst =
3931 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3933 struct ice_flow_seg_xtrct *sctp_src =
3934 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3935 struct ice_flow_seg_xtrct *sctp_dst =
3936 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3939 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3940 ice_rss_config_xor(hw, prof_id,
3941 ipv4_src->idx, ipv4_dst->idx, 2);
3944 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3945 ice_rss_config_xor(hw, prof_id,
3946 ipv6_src->idx, ipv6_dst->idx, 8);
3949 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3950 ice_rss_config_xor(hw, prof_id,
3951 tcp_src->idx, tcp_dst->idx, 1);
3954 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3955 ice_rss_config_xor(hw, prof_id,
3956 udp_src->idx, udp_dst->idx, 1);
3959 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3960 ice_rss_config_xor(hw, prof_id,
3961 sctp_src->idx, sctp_dst->idx, 1);
3966 * ice_add_rss_cfg_sync - add an RSS configuration
3967 * @hw: pointer to the hardware structure
3968 * @vsi_handle: software VSI handle
3969 * @cfg: configure parameters
3971 * Assumption: lock has already been acquired for RSS list
3973 static enum ice_status
3974 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
3975 const struct ice_rss_hash_cfg *cfg)
3977 const enum ice_block blk = ICE_BLK_RSS;
3978 struct ice_flow_prof *prof = NULL;
3979 struct ice_flow_seg_info *segs;
3980 enum ice_status status;
3983 segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
3984 ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
3986 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3989 return ICE_ERR_NO_MEMORY;
3991 /* Construct the packet segment info from the hashed fields */
3992 status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
3996 /* Search for a flow profile that has matching headers, hash fields
3997 * and has the input VSI associated to it. If found, no further
3998 * operations required and exit.
4000 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
4002 ICE_FLOW_FIND_PROF_CHK_FLDS |
4003 ICE_FLOW_FIND_PROF_CHK_VSI);
4005 if (prof->cfg.symm == cfg->symm)
4007 prof->cfg.symm = cfg->symm;
4011 /* Check if a flow profile exists with the same protocol headers and
4012 * associated with the input VSI. If so disassociate the VSI from
4013 * this profile. The VSI will be added to a new profile created with
4014 * the protocol header and new hash field configuration.
4016 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
4017 vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
4019 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
4021 ice_rem_rss_list(hw, vsi_handle, prof);
4025 /* Remove profile if it has no VSIs associated */
4026 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
4027 status = ice_flow_rem_prof(hw, blk, prof->id);
4033 /* Search for a profile that has same match fields only. If this
4034 * exists then associate the VSI to this profile.
4036 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
4038 ICE_FLOW_FIND_PROF_CHK_FLDS);
4040 if (prof->cfg.symm == cfg->symm) {
4041 status = ice_flow_assoc_prof(hw, blk, prof,
4044 status = ice_add_rss_list(hw, vsi_handle,
4047 /* if a profile exist but with different symmetric
4048 * requirement, just return error.
4050 status = ICE_ERR_NOT_SUPPORTED;
4055 /* Create a new flow profile with generated profile and packet
4056 * segment information.
4058 status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
4059 ICE_FLOW_GEN_PROFID(cfg->hash_flds,
4060 segs[segs_cnt - 1].hdrs,
4062 segs, segs_cnt, NULL, 0, &prof);
4066 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
4067 /* If association to a new flow profile failed then this profile can
4071 ice_flow_rem_prof(hw, blk, prof->id);
4075 status = ice_add_rss_list(hw, vsi_handle, prof);
4077 prof->cfg.symm = cfg->symm;
4079 ice_rss_update_symm(hw, prof);
4087 * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
4088 * @hw: pointer to the hardware structure
4089 * @vsi_handle: software VSI handle
4090 * @cfg: configure parameters
4092 * This function will generate a flow profile based on fields associated with
4093 * the input fields to hash on, the flow type and use the VSI number to add
4094 * a flow entry to the profile.
4097 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
4098 const struct ice_rss_hash_cfg *cfg)
4100 struct ice_rss_hash_cfg local_cfg;
4101 enum ice_status status;
4103 if (!ice_is_vsi_valid(hw, vsi_handle) ||
4104 !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
4105 cfg->hash_flds == ICE_HASH_INVALID)
4106 return ICE_ERR_PARAM;
4109 if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
4110 ice_acquire_lock(&hw->rss_locks);
4111 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4112 ice_release_lock(&hw->rss_locks);
4114 ice_acquire_lock(&hw->rss_locks);
4115 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
4116 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4118 local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
4119 status = ice_add_rss_cfg_sync(hw, vsi_handle,
4122 ice_release_lock(&hw->rss_locks);
4129 * ice_rem_rss_cfg_sync - remove an existing RSS configuration
4130 * @hw: pointer to the hardware structure
4131 * @vsi_handle: software VSI handle
4132 * @cfg: configure parameters
4134 * Assumption: lock has already been acquired for RSS list
4136 static enum ice_status
4137 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
4138 const struct ice_rss_hash_cfg *cfg)
4140 const enum ice_block blk = ICE_BLK_RSS;
4141 struct ice_flow_seg_info *segs;
4142 struct ice_flow_prof *prof;
4143 enum ice_status status;
4146 segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
4147 ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
4148 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
4151 return ICE_ERR_NO_MEMORY;
4153 /* Construct the packet segment info from the hashed fields */
4154 status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
4158 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
4160 ICE_FLOW_FIND_PROF_CHK_FLDS);
4162 status = ICE_ERR_DOES_NOT_EXIST;
4166 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
4170 /* Remove RSS configuration from VSI context before deleting
4173 ice_rem_rss_list(hw, vsi_handle, prof);
4175 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
4176 status = ice_flow_rem_prof(hw, blk, prof->id);
4184 * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
4185 * @hw: pointer to the hardware structure
4186 * @vsi_handle: software VSI handle
4187 * @cfg: configure parameters
4189 * This function will lookup the flow profile based on the input
4190 * hash field bitmap, iterate through the profile entry list of
4191 * that profile and find entry associated with input VSI to be
4192 * removed. Calls are made to underlying flow apis which will in
4193 * turn build or update buffers for RSS XLT1 section.
4196 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
4197 const struct ice_rss_hash_cfg *cfg)
4199 struct ice_rss_hash_cfg local_cfg;
4200 enum ice_status status;
4202 if (!ice_is_vsi_valid(hw, vsi_handle) ||
4203 !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
4204 cfg->hash_flds == ICE_HASH_INVALID)
4205 return ICE_ERR_PARAM;
4207 ice_acquire_lock(&hw->rss_locks);
4209 if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
4210 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4212 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
4213 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4216 local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
4217 status = ice_rem_rss_cfg_sync(hw, vsi_handle,
4221 ice_release_lock(&hw->rss_locks);
4227 * ice_replay_rss_cfg - replay RSS configurations associated with VSI
4228 * @hw: pointer to the hardware structure
4229 * @vsi_handle: software VSI handle
4231 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
4233 enum ice_status status = ICE_SUCCESS;
4234 struct ice_rss_cfg *r;
4236 if (!ice_is_vsi_valid(hw, vsi_handle))
4237 return ICE_ERR_PARAM;
4239 ice_acquire_lock(&hw->rss_locks);
4240 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
4241 ice_rss_cfg, l_entry) {
4242 if (ice_is_bit_set(r->vsis, vsi_handle)) {
4243 status = ice_add_rss_cfg_sync(hw, vsi_handle, &r->hash);
4248 ice_release_lock(&hw->rss_locks);
4254 * ice_get_rss_cfg - returns hashed fields for the given header types
4255 * @hw: pointer to the hardware structure
4256 * @vsi_handle: software VSI handle
4257 * @hdrs: protocol header type
4259 * This function will return the match fields of the first instance of flow
4260 * profile having the given header types and containing input VSI
4262 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
4264 u64 rss_hash = ICE_HASH_INVALID;
4265 struct ice_rss_cfg *r;
4267 /* verify if the protocol header is non zero and VSI is valid */
4268 if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
4269 return ICE_HASH_INVALID;
4271 ice_acquire_lock(&hw->rss_locks);
4272 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
4273 ice_rss_cfg, l_entry)
4274 if (ice_is_bit_set(r->vsis, vsi_handle) &&
4275 r->hash.addl_hdrs == hdrs) {
4276 rss_hash = r->hash.hash_flds;
4279 ice_release_lock(&hw->rss_locks);