1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2021 Intel Corporation
5 #include "ice_common.h"
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE 2
10 #define ICE_FLOW_FLD_SZ_VLAN 2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR 4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR 16
13 #define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
14 #define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
15 #define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
16 #define ICE_FLOW_FLD_SZ_IPV4_ID 2
17 #define ICE_FLOW_FLD_SZ_IPV6_ID 4
18 #define ICE_FLOW_FLD_SZ_IP_DSCP 1
19 #define ICE_FLOW_FLD_SZ_IP_TTL 1
20 #define ICE_FLOW_FLD_SZ_IP_PROT 1
21 #define ICE_FLOW_FLD_SZ_PORT 2
22 #define ICE_FLOW_FLD_SZ_TCP_FLAGS 1
23 #define ICE_FLOW_FLD_SZ_ICMP_TYPE 1
24 #define ICE_FLOW_FLD_SZ_ICMP_CODE 1
25 #define ICE_FLOW_FLD_SZ_ARP_OPER 2
26 #define ICE_FLOW_FLD_SZ_GRE_KEYID 4
27 #define ICE_FLOW_FLD_SZ_GTP_TEID 4
28 #define ICE_FLOW_FLD_SZ_GTP_QFI 2
29 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID 2
30 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
31 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID 4
32 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
33 #define ICE_FLOW_FLD_SZ_AH_SPI 4
34 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI 4
35 #define ICE_FLOW_FLD_SZ_VXLAN_VNI 4
36 #define ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID 2
38 /* Describe properties of a protocol header field */
39 struct ice_flow_field_info {
40 enum ice_flow_seg_hdr hdr;
41 s16 off; /* Offset from start of a protocol header, in bits */
42 u16 size; /* Size of fields in bits */
43 u16 mask; /* 16-bit mask for field */
46 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
48 .off = (_offset_bytes) * BITS_PER_BYTE, \
49 .size = (_size_bytes) * BITS_PER_BYTE, \
53 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
55 .off = (_offset_bytes) * BITS_PER_BYTE, \
56 .size = (_size_bytes) * BITS_PER_BYTE, \
60 /* Table containing properties of supported protocol header fields */
62 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
64 /* ICE_FLOW_FIELD_IDX_ETH_DA */
65 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
66 /* ICE_FLOW_FIELD_IDX_ETH_SA */
67 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
68 /* ICE_FLOW_FIELD_IDX_S_VLAN */
69 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
70 /* ICE_FLOW_FIELD_IDX_C_VLAN */
71 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
72 /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
73 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
75 /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
76 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
78 /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
79 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
81 /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
82 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
83 ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
84 /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
85 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
86 ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
87 /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
88 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
89 ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
90 /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
91 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
92 ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
93 /* ICE_FLOW_FIELD_IDX_IPV4_SA */
94 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
95 /* ICE_FLOW_FIELD_IDX_IPV4_DA */
96 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
97 /* ICE_FLOW_FIELD_IDX_IPV6_SA */
98 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
99 /* ICE_FLOW_FIELD_IDX_IPV6_DA */
100 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
101 /* ICE_FLOW_FIELD_IDX_IPV4_FRAG */
102 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4,
103 ICE_FLOW_FLD_SZ_IPV4_ID),
104 /* ICE_FLOW_FIELD_IDX_IPV6_FRAG */
105 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4,
106 ICE_FLOW_FLD_SZ_IPV6_ID),
107 /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
108 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
109 ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
110 /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
111 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
112 ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
113 /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
114 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
115 ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
116 /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
117 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
118 ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
119 /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
120 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
121 ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
122 /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
123 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
124 ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
126 /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
127 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
128 /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
129 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
130 /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
131 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
132 /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
133 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
134 /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
135 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
136 /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
137 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
138 /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
139 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
141 /* ICE_FLOW_FIELD_IDX_ARP_SIP */
142 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
143 /* ICE_FLOW_FIELD_IDX_ARP_DIP */
144 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
145 /* ICE_FLOW_FIELD_IDX_ARP_SHA */
146 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
147 /* ICE_FLOW_FIELD_IDX_ARP_DHA */
148 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
149 /* ICE_FLOW_FIELD_IDX_ARP_OP */
150 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
152 /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
153 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
154 /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
155 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
157 /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
158 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
160 /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
161 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
162 ICE_FLOW_FLD_SZ_GTP_TEID),
163 /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
164 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
165 ICE_FLOW_FLD_SZ_GTP_TEID),
166 /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
167 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
168 ICE_FLOW_FLD_SZ_GTP_TEID),
169 /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
170 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
171 ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
172 /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
173 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
174 ICE_FLOW_FLD_SZ_GTP_TEID),
175 /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
176 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
177 ICE_FLOW_FLD_SZ_GTP_TEID),
179 /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
180 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
181 ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
183 /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
184 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
185 ICE_FLOW_FLD_SZ_PFCP_SEID),
187 /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
188 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
189 ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
191 /* ICE_FLOW_FIELD_IDX_ESP_SPI */
192 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
193 ICE_FLOW_FLD_SZ_ESP_SPI),
195 /* ICE_FLOW_FIELD_IDX_AH_SPI */
196 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
197 ICE_FLOW_FLD_SZ_AH_SPI),
199 /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
200 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
201 ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
202 /* ICE_FLOW_FIELD_IDX_VXLAN_VNI */
203 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VXLAN, 12,
204 ICE_FLOW_FLD_SZ_VXLAN_VNI),
206 /* ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID */
207 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ECPRI_TP0, 4,
208 ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
210 /* ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID */
211 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0, 12,
212 ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
215 /* Bitmaps indicating relevant packet types for a particular protocol header
217 * Packet types for packets with an Outer/First/Single MAC header
219 static const u32 ice_ptypes_mac_ofos[] = {
220 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
221 0x0000077E, 0x000003FF, 0x00000000, 0x00000000,
222 0x00400000, 0x03FFF000, 0xFFFFFFE0, 0x00100707,
223 0xFFFFFF00, 0x0000003F, 0x00000000, 0x00000000,
224 0x00000000, 0x00000000, 0x00000000, 0x00000000,
225 0x00000000, 0x00000000, 0x00000000, 0x00000000,
226 0x00000000, 0x00000000, 0x00000000, 0x00000000,
227 0x00000000, 0x00000000, 0x00000000, 0x00000000,
230 /* Packet types for packets with an Innermost/Last MAC VLAN header */
231 static const u32 ice_ptypes_macvlan_il[] = {
232 0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
233 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
234 0x00000000, 0x00000000, 0x00000000, 0x00000000,
235 0x00000000, 0x00000000, 0x00000000, 0x00000000,
236 0x00000000, 0x00000000, 0x00000000, 0x00000000,
237 0x00000000, 0x00000000, 0x00000000, 0x00000000,
238 0x00000000, 0x00000000, 0x00000000, 0x00000000,
239 0x00000000, 0x00000000, 0x00000000, 0x00000000,
242 /* Packet types for packets with an Outer/First/Single non-frag IPv4 header,
243 * does NOT include IPV4 other PTYPEs
245 static const u32 ice_ptypes_ipv4_ofos[] = {
246 0x1D800000, 0x24000800, 0x00000000, 0x00000000,
247 0x00000000, 0x00000155, 0x00000000, 0x00000000,
248 0x00000000, 0x000FC000, 0x000002A0, 0x00100000,
249 0x00001500, 0x00000000, 0x00000000, 0x00000000,
250 0x00000000, 0x00000000, 0x00000000, 0x00000000,
251 0x00000000, 0x00000000, 0x00000000, 0x00000000,
252 0x00000000, 0x00000000, 0x00000000, 0x00000000,
253 0x00000000, 0x00000000, 0x00000000, 0x00000000,
256 /* Packet types for packets with an Outer/First/Single non-frag IPv4 header,
257 * includes IPV4 other PTYPEs
259 static const u32 ice_ptypes_ipv4_ofos_all[] = {
260 0x1D800000, 0x27BF7800, 0x00000000, 0x00000000,
261 0x00000000, 0x00000155, 0x00000000, 0x00000000,
262 0x00000000, 0x000FC000, 0x83E0FAA0, 0x00000101,
263 0x03FFD500, 0x00000000, 0x00000000, 0x00000000,
264 0x00000000, 0x00000000, 0x00000000, 0x00000000,
265 0x00000000, 0x00000000, 0x00000000, 0x00000000,
266 0x00000000, 0x00000000, 0x00000000, 0x00000000,
267 0x00000000, 0x00000000, 0x00000000, 0x00000000,
270 /* Packet types for packets with an Innermost/Last IPv4 header */
271 static const u32 ice_ptypes_ipv4_il[] = {
272 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
273 0x0000000E, 0x00000000, 0x00000000, 0x00000000,
274 0x00000000, 0x00000000, 0x001FF800, 0x00100000,
275 0xFC0FC000, 0x00000000, 0x00000000, 0x00000000,
276 0x00000000, 0x00000000, 0x00000000, 0x00000000,
277 0x00000000, 0x00000000, 0x00000000, 0x00000000,
278 0x00000000, 0x00000000, 0x00000000, 0x00000000,
279 0x00000000, 0x00000000, 0x00000000, 0x00000000,
282 /* Packet types for packets with an Outer/First/Single non-frag IPv6 header,
283 * does NOT include IVP6 other PTYPEs
285 static const u32 ice_ptypes_ipv6_ofos[] = {
286 0x00000000, 0x00000000, 0x76000000, 0x10002000,
287 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
288 0x00000000, 0x03F00000, 0x00000540, 0x00000000,
289 0x00002A00, 0x00000000, 0x00000000, 0x00000000,
290 0x00000000, 0x00000000, 0x00000000, 0x00000000,
291 0x00000000, 0x00000000, 0x00000000, 0x00000000,
292 0x00000000, 0x00000000, 0x00000000, 0x00000000,
293 0x00000000, 0x00000000, 0x00000000, 0x00000000,
296 /* Packet types for packets with an Outer/First/Single non-frag IPv6 header,
297 * includes IPV6 other PTYPEs
299 static const u32 ice_ptypes_ipv6_ofos_all[] = {
300 0x00000000, 0x00000000, 0x76000000, 0x1EFDE000,
301 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
302 0x00000000, 0x03F00000, 0x7C1F0540, 0x00000206,
303 0xFC002A00, 0x0000003F, 0x00000000, 0x00000000,
304 0x00000000, 0x00000000, 0x00000000, 0x00000000,
305 0x00000000, 0x00000000, 0x00000000, 0x00000000,
306 0x00000000, 0x00000000, 0x00000000, 0x00000000,
307 0x00000000, 0x00000000, 0x00000000, 0x00000000,
310 /* Packet types for packets with an Innermost/Last IPv6 header */
311 static const u32 ice_ptypes_ipv6_il[] = {
312 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
313 0x00000770, 0x00000000, 0x00000000, 0x00000000,
314 0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
315 0x03F00000, 0x0000003F, 0x00000000, 0x00000000,
316 0x00000000, 0x00000000, 0x00000000, 0x00000000,
317 0x00000000, 0x00000000, 0x00000000, 0x00000000,
318 0x00000000, 0x00000000, 0x00000000, 0x00000000,
319 0x00000000, 0x00000000, 0x00000000, 0x00000000,
322 /* Packet types for packets with an Outer/First/Single
323 * non-frag IPv4 header - no L4
325 static const u32 ice_ptypes_ipv4_ofos_no_l4[] = {
326 0x10800000, 0x04000800, 0x00000000, 0x00000000,
327 0x00000000, 0x00000000, 0x00000000, 0x00000000,
328 0x00000000, 0x000cc000, 0x000002A0, 0x00000000,
329 0x00001500, 0x00000000, 0x00000000, 0x00000000,
330 0x00000000, 0x00000000, 0x00000000, 0x00000000,
331 0x00000000, 0x00000000, 0x00000000, 0x00000000,
332 0x00000000, 0x00000000, 0x00000000, 0x00000000,
333 0x00000000, 0x00000000, 0x00000000, 0x00000000,
336 /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
337 static const u32 ice_ptypes_ipv4_il_no_l4[] = {
338 0x60000000, 0x18043008, 0x80000002, 0x6010c021,
339 0x00000008, 0x00000000, 0x00000000, 0x00000000,
340 0x00000000, 0x00000000, 0x00139800, 0x00000000,
341 0x8C08C000, 0x00000000, 0x00000000, 0x00000000,
342 0x00000000, 0x00000000, 0x00000000, 0x00000000,
343 0x00000000, 0x00000000, 0x00000000, 0x00000000,
344 0x00000000, 0x00000000, 0x00000000, 0x00000000,
345 0x00000000, 0x00000000, 0x00000000, 0x00000000,
348 /* Packet types for packets with an Outer/First/Single
349 * non-frag IPv6 header - no L4
351 static const u32 ice_ptypes_ipv6_ofos_no_l4[] = {
352 0x00000000, 0x00000000, 0x42000000, 0x10002000,
353 0x00000000, 0x00000000, 0x00000000, 0x00000000,
354 0x00000000, 0x02300000, 0x00000540, 0x00000000,
355 0x00002A00, 0x00000000, 0x00000000, 0x00000000,
356 0x00000000, 0x00000000, 0x00000000, 0x00000000,
357 0x00000000, 0x00000000, 0x00000000, 0x00000000,
358 0x00000000, 0x00000000, 0x00000000, 0x00000000,
359 0x00000000, 0x00000000, 0x00000000, 0x00000000,
362 /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
363 static const u32 ice_ptypes_ipv6_il_no_l4[] = {
364 0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
365 0x00000430, 0x00000000, 0x00000000, 0x00000000,
366 0x00000000, 0x00000000, 0x4e600000, 0x00000000,
367 0x02300000, 0x00000023, 0x00000000, 0x00000000,
368 0x00000000, 0x00000000, 0x00000000, 0x00000000,
369 0x00000000, 0x00000000, 0x00000000, 0x00000000,
370 0x00000000, 0x00000000, 0x00000000, 0x00000000,
371 0x00000000, 0x00000000, 0x00000000, 0x00000000,
374 /* Packet types for packets with an Outermost/First ARP header */
375 static const u32 ice_ptypes_arp_of[] = {
376 0x00000800, 0x00000000, 0x00000000, 0x00000000,
377 0x00000000, 0x00000000, 0x00000000, 0x00000000,
378 0x00000000, 0x00000000, 0x00000000, 0x00000000,
379 0x00000000, 0x00000000, 0x00000000, 0x00000000,
380 0x00000000, 0x00000000, 0x00000000, 0x00000000,
381 0x00000000, 0x00000000, 0x00000000, 0x00000000,
382 0x00000000, 0x00000000, 0x00000000, 0x00000000,
383 0x00000000, 0x00000000, 0x00000000, 0x00000000,
386 /* UDP Packet types for non-tunneled packets or tunneled
387 * packets with inner UDP.
389 static const u32 ice_ptypes_udp_il[] = {
390 0x81000000, 0x20204040, 0x04000010, 0x80810102,
391 0x00000040, 0x00000000, 0x00000000, 0x00000000,
392 0x00000000, 0x00410000, 0x908427E0, 0x00100007,
393 0x10410000, 0x00000004, 0x00000000, 0x00000000,
394 0x00000000, 0x00000000, 0x00000000, 0x00000000,
395 0x00000000, 0x00000000, 0x00000000, 0x00000000,
396 0x00000000, 0x00000000, 0x00000000, 0x00000000,
397 0x00000000, 0x00000000, 0x00000000, 0x00000000,
400 /* Packet types for packets with an Innermost/Last TCP header */
401 static const u32 ice_ptypes_tcp_il[] = {
402 0x04000000, 0x80810102, 0x10000040, 0x02040408,
403 0x00000102, 0x00000000, 0x00000000, 0x00000000,
404 0x00000000, 0x00820000, 0x21084000, 0x00000000,
405 0x20820000, 0x00000008, 0x00000000, 0x00000000,
406 0x00000000, 0x00000000, 0x00000000, 0x00000000,
407 0x00000000, 0x00000000, 0x00000000, 0x00000000,
408 0x00000000, 0x00000000, 0x00000000, 0x00000000,
409 0x00000000, 0x00000000, 0x00000000, 0x00000000,
412 /* Packet types for packets with an Innermost/Last SCTP header */
413 static const u32 ice_ptypes_sctp_il[] = {
414 0x08000000, 0x01020204, 0x20000081, 0x04080810,
415 0x00000204, 0x00000000, 0x00000000, 0x00000000,
416 0x00000000, 0x01040000, 0x00000000, 0x00000000,
417 0x41040000, 0x00000010, 0x00000000, 0x00000000,
418 0x00000000, 0x00000000, 0x00000000, 0x00000000,
419 0x00000000, 0x00000000, 0x00000000, 0x00000000,
420 0x00000000, 0x00000000, 0x00000000, 0x00000000,
421 0x00000000, 0x00000000, 0x00000000, 0x00000000,
424 /* Packet types for packets with an Outermost/First ICMP header */
425 static const u32 ice_ptypes_icmp_of[] = {
426 0x10000000, 0x00000000, 0x00000000, 0x00000000,
427 0x00000000, 0x00000000, 0x00000000, 0x00000000,
428 0x00000000, 0x00000000, 0x00000000, 0x00000000,
429 0x00000000, 0x00000000, 0x00000000, 0x00000000,
430 0x00000000, 0x00000000, 0x00000000, 0x00000000,
431 0x00000000, 0x00000000, 0x00000000, 0x00000000,
432 0x00000000, 0x00000000, 0x00000000, 0x00000000,
433 0x00000000, 0x00000000, 0x00000000, 0x00000000,
436 /* Packet types for packets with an Innermost/Last ICMP header */
437 static const u32 ice_ptypes_icmp_il[] = {
438 0x00000000, 0x02040408, 0x40000102, 0x08101020,
439 0x00000408, 0x00000000, 0x00000000, 0x00000000,
440 0x00000000, 0x00000000, 0x42108000, 0x00000000,
441 0x82080000, 0x00000020, 0x00000000, 0x00000000,
442 0x00000000, 0x00000000, 0x00000000, 0x00000000,
443 0x00000000, 0x00000000, 0x00000000, 0x00000000,
444 0x00000000, 0x00000000, 0x00000000, 0x00000000,
445 0x00000000, 0x00000000, 0x00000000, 0x00000000,
448 /* Packet types for packets with an Outermost/First GRE header */
449 static const u32 ice_ptypes_gre_of[] = {
450 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
451 0x0000017E, 0x00000000, 0x00000000, 0x00000000,
452 0x00000000, 0x00000000, 0x00000000, 0x00000000,
453 0x00000000, 0x00000000, 0x00000000, 0x00000000,
454 0x00000000, 0x00000000, 0x00000000, 0x00000000,
455 0x00000000, 0x00000000, 0x00000000, 0x00000000,
456 0x00000000, 0x00000000, 0x00000000, 0x00000000,
457 0x00000000, 0x00000000, 0x00000000, 0x00000000,
460 /* Packet types for packets with an Innermost/Last MAC header */
461 static const u32 ice_ptypes_mac_il[] = {
462 0x00000000, 0x20000000, 0x00000000, 0x00000000,
463 0x00000000, 0x00000000, 0x00000000, 0x00000000,
464 0x00000000, 0x00000000, 0x00000000, 0x00000000,
465 0x00000000, 0x00000000, 0x00000000, 0x00000000,
466 0x00000000, 0x00000000, 0x00000000, 0x00000000,
467 0x00000000, 0x00000000, 0x00000000, 0x00000000,
468 0x00000000, 0x00000000, 0x00000000, 0x00000000,
469 0x00000000, 0x00000000, 0x00000000, 0x00000000,
472 /* Packet types for GTPC */
473 static const u32 ice_ptypes_gtpc[] = {
474 0x00000000, 0x00000000, 0x00000000, 0x00000000,
475 0x00000000, 0x00000000, 0x00000000, 0x00000000,
476 0x00000000, 0x00000000, 0x000001E0, 0x00000000,
477 0x00000000, 0x00000000, 0x00000000, 0x00000000,
478 0x00000000, 0x00000000, 0x00000000, 0x00000000,
479 0x00000000, 0x00000000, 0x00000000, 0x00000000,
480 0x00000000, 0x00000000, 0x00000000, 0x00000000,
481 0x00000000, 0x00000000, 0x00000000, 0x00000000,
484 /* Packet types for VXLAN with VNI */
485 static const u32 ice_ptypes_vxlan_vni[] = {
486 0x00000000, 0xBFBFF800, 0x00EFDFDF, 0xFEFDE000,
487 0x03BF7F7E, 0x00000000, 0x00000000, 0x00000000,
488 0x00000000, 0x00000000, 0x00000000, 0x00000000,
489 0x00000000, 0x00000000, 0x00000000, 0x00000000,
490 0x00000000, 0x00000000, 0x00000000, 0x00000000,
491 0x00000000, 0x00000000, 0x00000000, 0x00000000,
492 0x00000000, 0x00000000, 0x00000000, 0x00000000,
493 0x00000000, 0x00000000, 0x00000000, 0x00000000,
496 /* Packet types for GTPC with TEID */
497 static const u32 ice_ptypes_gtpc_tid[] = {
498 0x00000000, 0x00000000, 0x00000000, 0x00000000,
499 0x00000000, 0x00000000, 0x00000000, 0x00000000,
500 0x00000000, 0x00000000, 0x00000060, 0x00000000,
501 0x00000000, 0x00000000, 0x00000000, 0x00000000,
502 0x00000000, 0x00000000, 0x00000000, 0x00000000,
503 0x00000000, 0x00000000, 0x00000000, 0x00000000,
504 0x00000000, 0x00000000, 0x00000000, 0x00000000,
505 0x00000000, 0x00000000, 0x00000000, 0x00000000,
508 /* Packet types for GTPU */
509 static const struct ice_ptype_attributes ice_attr_gtpu_session[] = {
510 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
511 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
512 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
513 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
514 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
515 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
516 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
517 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
518 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
519 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
520 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
521 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
522 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
523 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
524 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
525 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
526 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
527 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
528 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
529 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
532 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
533 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
534 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
535 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
536 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
537 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
538 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
539 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
540 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
541 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
542 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
543 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
544 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
545 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
546 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
547 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
548 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
549 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
550 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
551 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
552 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
555 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
556 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
557 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
558 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
559 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
560 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
561 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
562 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
563 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
564 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
565 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
566 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
567 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
568 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
569 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
570 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
571 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
572 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
573 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
574 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
575 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
578 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
579 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
580 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
581 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
582 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
583 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
584 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
585 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
586 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
587 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
588 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
589 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
590 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
591 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
592 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
593 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
594 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
595 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
596 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
597 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
598 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
601 static const u32 ice_ptypes_gtpu[] = {
602 0x00000000, 0x00000000, 0x00000000, 0x00000000,
603 0x00000000, 0x00000000, 0x00000000, 0x00000000,
604 0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
605 0x00000000, 0x00000000, 0x00000000, 0x00000000,
606 0x00000000, 0x00000000, 0x00000000, 0x00000000,
607 0x00000000, 0x00000000, 0x00000000, 0x00000000,
608 0x00000000, 0x00000000, 0x00000000, 0x00000000,
609 0x00000000, 0x00000000, 0x00000000, 0x00000000,
612 /* Packet types for pppoe */
613 static const u32 ice_ptypes_pppoe[] = {
614 0x00000000, 0x00000000, 0x00000000, 0x00000000,
615 0x00000000, 0x00000000, 0x00000000, 0x00000000,
616 0x00000000, 0x03ffe000, 0x00000000, 0x00000000,
617 0x00000000, 0x00000000, 0x00000000, 0x00000000,
618 0x00000000, 0x00000000, 0x00000000, 0x00000000,
619 0x00000000, 0x00000000, 0x00000000, 0x00000000,
620 0x00000000, 0x00000000, 0x00000000, 0x00000000,
621 0x00000000, 0x00000000, 0x00000000, 0x00000000,
624 /* Packet types for packets with PFCP NODE header */
625 static const u32 ice_ptypes_pfcp_node[] = {
626 0x00000000, 0x00000000, 0x00000000, 0x00000000,
627 0x00000000, 0x00000000, 0x00000000, 0x00000000,
628 0x00000000, 0x00000000, 0x80000000, 0x00000002,
629 0x00000000, 0x00000000, 0x00000000, 0x00000000,
630 0x00000000, 0x00000000, 0x00000000, 0x00000000,
631 0x00000000, 0x00000000, 0x00000000, 0x00000000,
632 0x00000000, 0x00000000, 0x00000000, 0x00000000,
633 0x00000000, 0x00000000, 0x00000000, 0x00000000,
636 /* Packet types for packets with PFCP SESSION header */
637 static const u32 ice_ptypes_pfcp_session[] = {
638 0x00000000, 0x00000000, 0x00000000, 0x00000000,
639 0x00000000, 0x00000000, 0x00000000, 0x00000000,
640 0x00000000, 0x00000000, 0x00000000, 0x00000005,
641 0x00000000, 0x00000000, 0x00000000, 0x00000000,
642 0x00000000, 0x00000000, 0x00000000, 0x00000000,
643 0x00000000, 0x00000000, 0x00000000, 0x00000000,
644 0x00000000, 0x00000000, 0x00000000, 0x00000000,
645 0x00000000, 0x00000000, 0x00000000, 0x00000000,
648 /* Packet types for l2tpv3 */
649 static const u32 ice_ptypes_l2tpv3[] = {
650 0x00000000, 0x00000000, 0x00000000, 0x00000000,
651 0x00000000, 0x00000000, 0x00000000, 0x00000000,
652 0x00000000, 0x00000000, 0x00000000, 0x00000300,
653 0x00000000, 0x00000000, 0x00000000, 0x00000000,
654 0x00000000, 0x00000000, 0x00000000, 0x00000000,
655 0x00000000, 0x00000000, 0x00000000, 0x00000000,
656 0x00000000, 0x00000000, 0x00000000, 0x00000000,
657 0x00000000, 0x00000000, 0x00000000, 0x00000000,
660 /* Packet types for esp */
661 static const u32 ice_ptypes_esp[] = {
662 0x00000000, 0x00000000, 0x00000000, 0x00000000,
663 0x00000000, 0x00000003, 0x00000000, 0x00000000,
664 0x00000000, 0x00000000, 0x00000000, 0x00000000,
665 0x00000000, 0x00000000, 0x00000000, 0x00000000,
666 0x00000000, 0x00000000, 0x00000000, 0x00000000,
667 0x00000000, 0x00000000, 0x00000000, 0x00000000,
668 0x00000000, 0x00000000, 0x00000000, 0x00000000,
669 0x00000000, 0x00000000, 0x00000000, 0x00000000,
672 /* Packet types for ah */
673 static const u32 ice_ptypes_ah[] = {
674 0x00000000, 0x00000000, 0x00000000, 0x00000000,
675 0x00000000, 0x0000000C, 0x00000000, 0x00000000,
676 0x00000000, 0x00000000, 0x00000000, 0x00000000,
677 0x00000000, 0x00000000, 0x00000000, 0x00000000,
678 0x00000000, 0x00000000, 0x00000000, 0x00000000,
679 0x00000000, 0x00000000, 0x00000000, 0x00000000,
680 0x00000000, 0x00000000, 0x00000000, 0x00000000,
681 0x00000000, 0x00000000, 0x00000000, 0x00000000,
684 /* Packet types for packets with NAT_T ESP header */
685 static const u32 ice_ptypes_nat_t_esp[] = {
686 0x00000000, 0x00000000, 0x00000000, 0x00000000,
687 0x00000000, 0x00000030, 0x00000000, 0x00000000,
688 0x00000000, 0x00000000, 0x00000000, 0x00000000,
689 0x00000000, 0x00000000, 0x00000000, 0x00000000,
690 0x00000000, 0x00000000, 0x00000000, 0x00000000,
691 0x00000000, 0x00000000, 0x00000000, 0x00000000,
692 0x00000000, 0x00000000, 0x00000000, 0x00000000,
693 0x00000000, 0x00000000, 0x00000000, 0x00000000,
696 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
697 0x00000846, 0x00000000, 0x00000000, 0x00000000,
698 0x00000000, 0x00000000, 0x00000000, 0x00000000,
699 0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
700 0x00000000, 0x00000000, 0x00000000, 0x00000000,
701 0x00000000, 0x00000000, 0x00000000, 0x00000000,
702 0x00000000, 0x00000000, 0x00000000, 0x00000000,
703 0x00000000, 0x00000000, 0x00000000, 0x00000000,
704 0x00000000, 0x00000000, 0x00000000, 0x00000000,
707 static const u32 ice_ptypes_gtpu_no_ip[] = {
708 0x00000000, 0x00000000, 0x00000000, 0x00000000,
709 0x00000000, 0x00000000, 0x00000000, 0x00000000,
710 0x00000000, 0x00000000, 0x00000600, 0x00000000,
711 0x00000000, 0x00000000, 0x00000000, 0x00000000,
712 0x00000000, 0x00000000, 0x00000000, 0x00000000,
713 0x00000000, 0x00000000, 0x00000000, 0x00000000,
714 0x00000000, 0x00000000, 0x00000000, 0x00000000,
715 0x00000000, 0x00000000, 0x00000000, 0x00000000,
718 static const u32 ice_ptypes_ecpri_tp0[] = {
719 0x00000000, 0x00000000, 0x00000000, 0x00000000,
720 0x00000000, 0x00000000, 0x00000000, 0x00000000,
721 0x00000000, 0x00000000, 0x00000000, 0x00000400,
722 0x00000000, 0x00000000, 0x00000000, 0x00000000,
723 0x00000000, 0x00000000, 0x00000000, 0x00000000,
724 0x00000000, 0x00000000, 0x00000000, 0x00000000,
725 0x00000000, 0x00000000, 0x00000000, 0x00000000,
726 0x00000000, 0x00000000, 0x00000000, 0x00000000,
729 static const u32 ice_ptypes_udp_ecpri_tp0[] = {
730 0x00000000, 0x00000000, 0x00000000, 0x00000000,
731 0x00000000, 0x00000000, 0x00000000, 0x00000000,
732 0x00000000, 0x00000000, 0x00000000, 0x00100000,
733 0x00000000, 0x00000000, 0x00000000, 0x00000000,
734 0x00000000, 0x00000000, 0x00000000, 0x00000000,
735 0x00000000, 0x00000000, 0x00000000, 0x00000000,
736 0x00000000, 0x00000000, 0x00000000, 0x00000000,
737 0x00000000, 0x00000000, 0x00000000, 0x00000000,
740 static const u32 ice_ptypes_l2tpv2[] = {
741 0x00000000, 0x00000000, 0x00000000, 0x00000000,
742 0x00000000, 0x00000000, 0x00000000, 0x00000000,
743 0x00000000, 0x00000000, 0x00000000, 0x00000000,
744 0xFFFFFF00, 0x0000003F, 0x00000000, 0x00000000,
745 0x00000000, 0x00000000, 0x00000000, 0x00000000,
746 0x00000000, 0x00000000, 0x00000000, 0x00000000,
747 0x00000000, 0x00000000, 0x00000000, 0x00000000,
748 0x00000000, 0x00000000, 0x00000000, 0x00000000,
751 static const u32 ice_ptypes_ppp[] = {
752 0x00000000, 0x00000000, 0x00000000, 0x00000000,
753 0x00000000, 0x00000000, 0x00000000, 0x00000000,
754 0x00000000, 0x00000000, 0x00000000, 0x00000000,
755 0xFFFFF000, 0x0000003F, 0x00000000, 0x00000000,
756 0x00000000, 0x00000000, 0x00000000, 0x00000000,
757 0x00000000, 0x00000000, 0x00000000, 0x00000000,
758 0x00000000, 0x00000000, 0x00000000, 0x00000000,
759 0x00000000, 0x00000000, 0x00000000, 0x00000000,
762 static const u32 ice_ptypes_ipv4_frag[] = {
763 0x00400000, 0x00000000, 0x00000000, 0x00000000,
764 0x00000000, 0x00000000, 0x00000000, 0x00000000,
765 0x00000000, 0x00000000, 0x00000000, 0x00000000,
766 0x00000000, 0x00000000, 0x00000000, 0x00000000,
767 0x00000000, 0x00000000, 0x00000000, 0x00000000,
768 0x00000000, 0x00000000, 0x00000000, 0x00000000,
769 0x00000000, 0x00000000, 0x00000000, 0x00000000,
770 0x00000000, 0x00000000, 0x00000000, 0x00000000,
773 static const u32 ice_ptypes_ipv6_frag[] = {
774 0x00000000, 0x00000000, 0x01000000, 0x00000000,
775 0x00000000, 0x00000000, 0x00000000, 0x00000000,
776 0x00000000, 0x00000000, 0x00000000, 0x00000000,
777 0x00000000, 0x00000000, 0x00000000, 0x00000000,
778 0x00000000, 0x00000000, 0x00000000, 0x00000000,
779 0x00000000, 0x00000000, 0x00000000, 0x00000000,
780 0x00000000, 0x00000000, 0x00000000, 0x00000000,
781 0x00000000, 0x00000000, 0x00000000, 0x00000000,
784 /* Manage parameters and info. used during the creation of a flow profile */
785 struct ice_flow_prof_params {
787 u16 entry_length; /* # of bytes formatted entry will require */
789 struct ice_flow_prof *prof;
791 /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
792 * This will give us the direction flags.
794 struct ice_fv_word es[ICE_MAX_FV_WORDS];
795 /* attributes can be used to add attributes to a particular PTYPE */
796 const struct ice_ptype_attributes *attr;
799 u16 mask[ICE_MAX_FV_WORDS];
800 ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
803 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
804 (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
805 ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
806 ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
807 ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
808 ICE_FLOW_SEG_HDR_NAT_T_ESP | ICE_FLOW_SEG_HDR_GTPU_NON_IP | \
809 ICE_FLOW_SEG_HDR_ECPRI_TP0 | ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0 | \
810 ICE_FLOW_SEG_HDR_L2TPV2 | ICE_FLOW_SEG_HDR_PPP | ICE_FLOW_SEG_HDR_GRE)
812 #define ICE_FLOW_SEG_HDRS_L2_MASK \
813 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
814 #define ICE_FLOW_SEG_HDRS_L3_MASK \
815 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
816 ICE_FLOW_SEG_HDR_ARP)
817 #define ICE_FLOW_SEG_HDRS_L4_MASK \
818 (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
819 ICE_FLOW_SEG_HDR_SCTP)
820 /* mask for L4 protocols that are NOT part of IPV4/6 OTHER PTYPE groups */
821 #define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER \
822 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
825 * ice_flow_val_hdrs - validates packet segments for valid protocol headers
826 * @segs: array of one or more packet segments that describe the flow
827 * @segs_cnt: number of packet segments provided
829 static enum ice_status
830 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
834 for (i = 0; i < segs_cnt; i++) {
835 /* Multiple L3 headers */
836 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
837 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
838 return ICE_ERR_PARAM;
840 /* Multiple L4 headers */
841 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
842 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
843 return ICE_ERR_PARAM;
849 /* Sizes of fixed known protocol headers without header options */
850 #define ICE_FLOW_PROT_HDR_SZ_MAC 14
851 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
852 #define ICE_FLOW_PROT_HDR_SZ_IPV4 20
853 #define ICE_FLOW_PROT_HDR_SZ_IPV6 40
854 #define ICE_FLOW_PROT_HDR_SZ_ARP 28
855 #define ICE_FLOW_PROT_HDR_SZ_ICMP 8
856 #define ICE_FLOW_PROT_HDR_SZ_TCP 20
857 #define ICE_FLOW_PROT_HDR_SZ_UDP 8
858 #define ICE_FLOW_PROT_HDR_SZ_SCTP 12
861 * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
862 * @params: information about the flow to be processed
863 * @seg: index of packet segment whose header size is to be determined
865 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
870 sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
871 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
874 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
875 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
876 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
877 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
878 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
879 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
880 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
881 /* A L3 header is required if L4 is specified */
885 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
886 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
887 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
888 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
889 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
890 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
891 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
892 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
898 * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
899 * @params: information about the flow to be processed
901 * This function identifies the packet types associated with the protocol
902 * headers being present in packet segments of the specified flow profile.
904 static enum ice_status
905 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
907 struct ice_flow_prof *prof;
910 ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
915 for (i = 0; i < params->prof->segs_cnt; i++) {
916 const ice_bitmap_t *src;
919 hdrs = prof->segs[i].hdrs;
921 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
922 src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
923 (const ice_bitmap_t *)ice_ptypes_mac_il;
924 ice_and_bitmap(params->ptypes, params->ptypes, src,
928 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
929 src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
930 ice_and_bitmap(params->ptypes, params->ptypes, src,
934 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
935 ice_and_bitmap(params->ptypes, params->ptypes,
936 (const ice_bitmap_t *)ice_ptypes_arp_of,
940 if (hdrs & ICE_FLOW_SEG_HDR_ECPRI_TP0) {
941 src = (const ice_bitmap_t *)ice_ptypes_ecpri_tp0;
942 ice_and_bitmap(params->ptypes, params->ptypes, src,
945 if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
946 (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
948 (const ice_bitmap_t *)ice_ptypes_ipv4_il :
949 (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_all;
950 ice_and_bitmap(params->ptypes, params->ptypes, src,
952 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
953 (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
955 (const ice_bitmap_t *)ice_ptypes_ipv6_il :
956 (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_all;
957 ice_and_bitmap(params->ptypes, params->ptypes, src,
959 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
960 (hdrs & ICE_FLOW_SEG_HDR_IPV_FRAG)) {
961 src = (const ice_bitmap_t *)ice_ptypes_ipv4_frag;
962 ice_and_bitmap(params->ptypes, params->ptypes, src,
964 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
965 (hdrs & ICE_FLOW_SEG_HDR_IPV_FRAG)) {
966 src = (const ice_bitmap_t *)ice_ptypes_ipv6_frag;
967 ice_and_bitmap(params->ptypes, params->ptypes, src,
969 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
970 !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
971 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_no_l4 :
972 (const ice_bitmap_t *)ice_ptypes_ipv4_il_no_l4;
973 ice_and_bitmap(params->ptypes, params->ptypes, src,
975 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
976 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
977 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
978 ice_and_bitmap(params->ptypes, params->ptypes, src,
980 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
981 !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
982 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_no_l4 :
983 (const ice_bitmap_t *)ice_ptypes_ipv6_il_no_l4;
984 ice_and_bitmap(params->ptypes, params->ptypes, src,
986 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
987 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
988 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
989 ice_and_bitmap(params->ptypes, params->ptypes, src,
993 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
994 src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
995 ice_and_bitmap(params->ptypes, params->ptypes,
996 src, ICE_FLOW_PTYPE_MAX);
997 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
998 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
999 ice_and_bitmap(params->ptypes, params->ptypes, src,
1000 ICE_FLOW_PTYPE_MAX);
1002 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
1003 ice_andnot_bitmap(params->ptypes, params->ptypes, src,
1004 ICE_FLOW_PTYPE_MAX);
1007 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
1008 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
1009 ice_and_bitmap(params->ptypes, params->ptypes, src,
1010 ICE_FLOW_PTYPE_MAX);
1011 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
1012 ice_and_bitmap(params->ptypes, params->ptypes,
1013 (const ice_bitmap_t *)ice_ptypes_tcp_il,
1014 ICE_FLOW_PTYPE_MAX);
1015 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
1016 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
1017 ice_and_bitmap(params->ptypes, params->ptypes, src,
1018 ICE_FLOW_PTYPE_MAX);
1021 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
1022 src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
1023 (const ice_bitmap_t *)ice_ptypes_icmp_il;
1024 ice_and_bitmap(params->ptypes, params->ptypes, src,
1025 ICE_FLOW_PTYPE_MAX);
1026 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
1027 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
1028 ice_and_bitmap(params->ptypes, params->ptypes, src,
1029 ICE_FLOW_PTYPE_MAX);
1030 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
1031 src = (const ice_bitmap_t *)ice_ptypes_gtpc;
1032 ice_and_bitmap(params->ptypes, params->ptypes,
1033 src, ICE_FLOW_PTYPE_MAX);
1034 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
1035 src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
1036 ice_and_bitmap(params->ptypes, params->ptypes,
1037 src, ICE_FLOW_PTYPE_MAX);
1038 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_NON_IP) {
1039 src = (const ice_bitmap_t *)ice_ptypes_gtpu_no_ip;
1040 ice_and_bitmap(params->ptypes, params->ptypes,
1041 src, ICE_FLOW_PTYPE_MAX);
1042 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
1043 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1044 ice_and_bitmap(params->ptypes, params->ptypes,
1045 src, ICE_FLOW_PTYPE_MAX);
1047 /* Attributes for GTP packet with downlink */
1048 params->attr = ice_attr_gtpu_down;
1049 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
1050 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
1051 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1052 ice_and_bitmap(params->ptypes, params->ptypes,
1053 src, ICE_FLOW_PTYPE_MAX);
1055 /* Attributes for GTP packet with uplink */
1056 params->attr = ice_attr_gtpu_up;
1057 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
1058 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
1059 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1060 ice_and_bitmap(params->ptypes, params->ptypes,
1061 src, ICE_FLOW_PTYPE_MAX);
1063 /* Attributes for GTP packet with Extension Header */
1064 params->attr = ice_attr_gtpu_eh;
1065 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
1066 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
1067 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1068 ice_and_bitmap(params->ptypes, params->ptypes,
1069 src, ICE_FLOW_PTYPE_MAX);
1071 /* Attributes for GTP packet without Extension Header */
1072 params->attr = ice_attr_gtpu_session;
1073 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session);
1074 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV2) {
1075 src = (const ice_bitmap_t *)ice_ptypes_l2tpv2;
1076 ice_and_bitmap(params->ptypes, params->ptypes,
1077 src, ICE_FLOW_PTYPE_MAX);
1078 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
1079 src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
1080 ice_and_bitmap(params->ptypes, params->ptypes,
1081 src, ICE_FLOW_PTYPE_MAX);
1082 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
1083 src = (const ice_bitmap_t *)ice_ptypes_esp;
1084 ice_and_bitmap(params->ptypes, params->ptypes,
1085 src, ICE_FLOW_PTYPE_MAX);
1086 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
1087 src = (const ice_bitmap_t *)ice_ptypes_ah;
1088 ice_and_bitmap(params->ptypes, params->ptypes,
1089 src, ICE_FLOW_PTYPE_MAX);
1090 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
1091 src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
1092 ice_and_bitmap(params->ptypes, params->ptypes,
1093 src, ICE_FLOW_PTYPE_MAX);
1094 } else if (hdrs & ICE_FLOW_SEG_HDR_VXLAN) {
1095 src = (const ice_bitmap_t *)ice_ptypes_vxlan_vni;
1096 ice_and_bitmap(params->ptypes, params->ptypes,
1097 src, ICE_FLOW_PTYPE_MAX);
1098 } else if (hdrs & ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0) {
1099 src = (const ice_bitmap_t *)ice_ptypes_udp_ecpri_tp0;
1100 ice_and_bitmap(params->ptypes, params->ptypes,
1101 src, ICE_FLOW_PTYPE_MAX);
1104 if (hdrs & ICE_FLOW_SEG_HDR_PPP) {
1105 src = (const ice_bitmap_t *)ice_ptypes_ppp;
1106 ice_and_bitmap(params->ptypes, params->ptypes,
1107 src, ICE_FLOW_PTYPE_MAX);
1110 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
1111 if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
1113 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1116 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1118 ice_and_bitmap(params->ptypes, params->ptypes,
1119 src, ICE_FLOW_PTYPE_MAX);
1121 src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1122 ice_andnot_bitmap(params->ptypes, params->ptypes,
1123 src, ICE_FLOW_PTYPE_MAX);
1125 src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1126 ice_andnot_bitmap(params->ptypes, params->ptypes,
1127 src, ICE_FLOW_PTYPE_MAX);
1135 * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
1136 * @hw: pointer to the HW struct
1137 * @params: information about the flow to be processed
1138 * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
1140 * This function will allocate an extraction sequence entries for a DWORD size
1141 * chunk of the packet flags.
1143 static enum ice_status
1144 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
1145 struct ice_flow_prof_params *params,
1146 enum ice_flex_mdid_pkt_flags flags)
1148 u8 fv_words = hw->blk[params->blk].es.fvw;
1151 /* Make sure the number of extraction sequence entries required does not
1152 * exceed the block's capacity.
1154 if (params->es_cnt >= fv_words)
1155 return ICE_ERR_MAX_LIMIT;
1157 /* some blocks require a reversed field vector layout */
1158 if (hw->blk[params->blk].es.reverse)
1159 idx = fv_words - params->es_cnt - 1;
1161 idx = params->es_cnt;
1163 params->es[idx].prot_id = ICE_PROT_META_ID;
1164 params->es[idx].off = flags;
1171 * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
1172 * @hw: pointer to the HW struct
1173 * @params: information about the flow to be processed
1174 * @seg: packet segment index of the field to be extracted
1175 * @fld: ID of field to be extracted
1176 * @match: bitfield of all fields
1178 * This function determines the protocol ID, offset, and size of the given
1179 * field. It then allocates one or more extraction sequence entries for the
1180 * given field, and fill the entries with protocol ID and offset information.
1182 static enum ice_status
1183 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
1184 u8 seg, enum ice_flow_field fld, u64 match)
1186 enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
1187 enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
1188 u8 fv_words = hw->blk[params->blk].es.fvw;
1189 struct ice_flow_fld_info *flds;
1190 u16 cnt, ese_bits, i;
1195 flds = params->prof->segs[seg].fields;
1198 case ICE_FLOW_FIELD_IDX_ETH_DA:
1199 case ICE_FLOW_FIELD_IDX_ETH_SA:
1200 case ICE_FLOW_FIELD_IDX_S_VLAN:
1201 case ICE_FLOW_FIELD_IDX_C_VLAN:
1202 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
1204 case ICE_FLOW_FIELD_IDX_ETH_TYPE:
1205 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
1207 case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
1208 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1210 case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
1211 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1213 case ICE_FLOW_FIELD_IDX_IPV4_TTL:
1214 case ICE_FLOW_FIELD_IDX_IPV4_PROT:
1215 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1217 /* TTL and PROT share the same extraction seq. entry.
1218 * Each is considered a sibling to the other in terms of sharing
1219 * the same extraction sequence entry.
1221 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
1222 sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
1224 sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
1226 /* If the sibling field is also included, that field's
1227 * mask needs to be included.
1229 if (match & BIT(sib))
1230 sib_mask = ice_flds_info[sib].mask;
1232 case ICE_FLOW_FIELD_IDX_IPV6_TTL:
1233 case ICE_FLOW_FIELD_IDX_IPV6_PROT:
1234 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1236 /* TTL and PROT share the same extraction seq. entry.
1237 * Each is considered a sibling to the other in terms of sharing
1238 * the same extraction sequence entry.
1240 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
1241 sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
1243 sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
1245 /* If the sibling field is also included, that field's
1246 * mask needs to be included.
1248 if (match & BIT(sib))
1249 sib_mask = ice_flds_info[sib].mask;
1251 case ICE_FLOW_FIELD_IDX_IPV4_SA:
1252 case ICE_FLOW_FIELD_IDX_IPV4_DA:
1253 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1255 case ICE_FLOW_FIELD_IDX_IPV4_ID:
1256 prot_id = ICE_PROT_IPV4_OF_OR_S;
1258 case ICE_FLOW_FIELD_IDX_IPV6_SA:
1259 case ICE_FLOW_FIELD_IDX_IPV6_DA:
1260 case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
1261 case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
1262 case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
1263 case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
1264 case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
1265 case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
1266 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1268 case ICE_FLOW_FIELD_IDX_IPV6_ID:
1269 prot_id = ICE_PROT_IPV6_FRAG;
1271 case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
1272 case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
1273 case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
1274 prot_id = ICE_PROT_TCP_IL;
1276 case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
1277 case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
1278 prot_id = ICE_PROT_UDP_IL_OR_S;
1280 case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
1281 case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
1282 prot_id = ICE_PROT_SCTP_IL;
1284 case ICE_FLOW_FIELD_IDX_VXLAN_VNI:
1285 case ICE_FLOW_FIELD_IDX_GTPC_TEID:
1286 case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
1287 case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
1288 case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
1289 case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
1290 case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
1291 /* GTP is accessed through UDP OF protocol */
1292 prot_id = ICE_PROT_UDP_OF;
1294 case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
1295 prot_id = ICE_PROT_PPPOE;
1297 case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1298 prot_id = ICE_PROT_UDP_IL_OR_S;
1300 case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1301 prot_id = ICE_PROT_L2TPV3;
1303 case ICE_FLOW_FIELD_IDX_ESP_SPI:
1304 prot_id = ICE_PROT_ESP_F;
1306 case ICE_FLOW_FIELD_IDX_AH_SPI:
1307 prot_id = ICE_PROT_ESP_2;
1309 case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1310 prot_id = ICE_PROT_UDP_IL_OR_S;
1312 case ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID:
1313 prot_id = ICE_PROT_ECPRI;
1315 case ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID:
1316 prot_id = ICE_PROT_UDP_IL_OR_S;
1318 case ICE_FLOW_FIELD_IDX_ARP_SIP:
1319 case ICE_FLOW_FIELD_IDX_ARP_DIP:
1320 case ICE_FLOW_FIELD_IDX_ARP_SHA:
1321 case ICE_FLOW_FIELD_IDX_ARP_DHA:
1322 case ICE_FLOW_FIELD_IDX_ARP_OP:
1323 prot_id = ICE_PROT_ARP_OF;
1325 case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1326 case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1327 /* ICMP type and code share the same extraction seq. entry */
1328 prot_id = (params->prof->segs[seg].hdrs &
1329 ICE_FLOW_SEG_HDR_IPV4) ?
1330 ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1331 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1332 ICE_FLOW_FIELD_IDX_ICMP_CODE :
1333 ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1335 case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1336 prot_id = ICE_PROT_GRE_OF;
1339 return ICE_ERR_NOT_IMPL;
1342 /* Each extraction sequence entry is a word in size, and extracts a
1343 * word-aligned offset from a protocol header.
1345 ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1347 flds[fld].xtrct.prot_id = prot_id;
1348 flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1349 ICE_FLOW_FV_EXTRACT_SZ;
1350 flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
1351 flds[fld].xtrct.idx = params->es_cnt;
1352 flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1354 /* Adjust the next field-entry index after accommodating the number of
1355 * entries this field consumes
1357 cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1358 ice_flds_info[fld].size, ese_bits);
1360 /* Fill in the extraction sequence entries needed for this field */
1361 off = flds[fld].xtrct.off;
1362 mask = flds[fld].xtrct.mask;
1363 for (i = 0; i < cnt; i++) {
1364 /* Only consume an extraction sequence entry if there is no
1365 * sibling field associated with this field or the sibling entry
1366 * already extracts the word shared with this field.
1368 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1369 flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1370 flds[sib].xtrct.off != off) {
1373 /* Make sure the number of extraction sequence required
1374 * does not exceed the block's capability
1376 if (params->es_cnt >= fv_words)
1377 return ICE_ERR_MAX_LIMIT;
1379 /* some blocks require a reversed field vector layout */
1380 if (hw->blk[params->blk].es.reverse)
1381 idx = fv_words - params->es_cnt - 1;
1383 idx = params->es_cnt;
1385 params->es[idx].prot_id = prot_id;
1386 params->es[idx].off = off;
1387 params->mask[idx] = mask | sib_mask;
1391 off += ICE_FLOW_FV_EXTRACT_SZ;
1398 * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1399 * @hw: pointer to the HW struct
1400 * @params: information about the flow to be processed
1401 * @seg: index of packet segment whose raw fields are to be extracted
1403 static enum ice_status
1404 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1411 if (!params->prof->segs[seg].raws_cnt)
1414 if (params->prof->segs[seg].raws_cnt >
1415 ARRAY_SIZE(params->prof->segs[seg].raws))
1416 return ICE_ERR_MAX_LIMIT;
1418 /* Offsets within the segment headers are not supported */
1419 hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1421 return ICE_ERR_PARAM;
1423 fv_words = hw->blk[params->blk].es.fvw;
1425 for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1426 struct ice_flow_seg_fld_raw *raw;
1429 raw = ¶ms->prof->segs[seg].raws[i];
1431 /* Storing extraction information */
1432 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1433 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1434 ICE_FLOW_FV_EXTRACT_SZ;
1435 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1437 raw->info.xtrct.idx = params->es_cnt;
1439 /* Determine the number of field vector entries this raw field
1442 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1443 (raw->info.src.last * BITS_PER_BYTE),
1444 (ICE_FLOW_FV_EXTRACT_SZ *
1446 off = raw->info.xtrct.off;
1447 for (j = 0; j < cnt; j++) {
1450 /* Make sure the number of extraction sequence required
1451 * does not exceed the block's capability
1453 if (params->es_cnt >= hw->blk[params->blk].es.count ||
1454 params->es_cnt >= ICE_MAX_FV_WORDS)
1455 return ICE_ERR_MAX_LIMIT;
1457 /* some blocks require a reversed field vector layout */
1458 if (hw->blk[params->blk].es.reverse)
1459 idx = fv_words - params->es_cnt - 1;
1461 idx = params->es_cnt;
1463 params->es[idx].prot_id = raw->info.xtrct.prot_id;
1464 params->es[idx].off = off;
1466 off += ICE_FLOW_FV_EXTRACT_SZ;
1474 * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1475 * @hw: pointer to the HW struct
1476 * @params: information about the flow to be processed
1478 * This function iterates through all matched fields in the given segments, and
1479 * creates an extraction sequence for the fields.
1481 static enum ice_status
1482 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1483 struct ice_flow_prof_params *params)
1485 enum ice_status status = ICE_SUCCESS;
1488 /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1491 if (params->blk == ICE_BLK_ACL) {
1492 status = ice_flow_xtract_pkt_flags(hw, params,
1493 ICE_RX_MDID_PKT_FLAGS_15_0);
1498 for (i = 0; i < params->prof->segs_cnt; i++) {
1499 u64 match = params->prof->segs[i].match;
1500 enum ice_flow_field j;
1502 ice_for_each_set_bit(j, (ice_bitmap_t *)&match,
1503 ICE_FLOW_FIELD_IDX_MAX) {
1504 status = ice_flow_xtract_fld(hw, params, i, j, match);
1507 ice_clear_bit(j, (ice_bitmap_t *)&match);
1510 /* Process raw matching bytes */
1511 status = ice_flow_xtract_raws(hw, params, i);
1520 * ice_flow_sel_acl_scen - returns the specific scenario
1521 * @hw: pointer to the hardware structure
1522 * @params: information about the flow to be processed
1524 * This function will return the specific scenario based on the
1525 * params passed to it
1527 static enum ice_status
1528 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1530 /* Find the best-fit scenario for the provided match width */
1531 struct ice_acl_scen *cand_scen = NULL, *scen;
1534 return ICE_ERR_DOES_NOT_EXIST;
1536 /* Loop through each scenario and match against the scenario width
1537 * to select the specific scenario
1539 LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1540 if (scen->eff_width >= params->entry_length &&
1541 (!cand_scen || cand_scen->eff_width > scen->eff_width))
1544 return ICE_ERR_DOES_NOT_EXIST;
1546 params->prof->cfg.scen = cand_scen;
1552 * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1553 * @params: information about the flow to be processed
1555 static enum ice_status
1556 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1558 u16 index, i, range_idx = 0;
1560 index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1562 for (i = 0; i < params->prof->segs_cnt; i++) {
1563 struct ice_flow_seg_info *seg = ¶ms->prof->segs[i];
1566 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
1567 ICE_FLOW_FIELD_IDX_MAX) {
1568 struct ice_flow_fld_info *fld = &seg->fields[j];
1570 fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1572 if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1573 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1575 /* Range checking only supported for single
1578 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1580 BITS_PER_BYTE * 2) > 1)
1581 return ICE_ERR_PARAM;
1583 /* Ranges must define low and high values */
1584 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1585 fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1586 return ICE_ERR_PARAM;
1588 fld->entry.val = range_idx++;
1590 /* Store adjusted byte-length of field for later
1591 * use, taking into account potential
1592 * non-byte-aligned displacement
1594 fld->entry.last = DIVIDE_AND_ROUND_UP
1595 (ice_flds_info[j].size +
1596 (fld->xtrct.disp % BITS_PER_BYTE),
1598 fld->entry.val = index;
1599 index += fld->entry.last;
1603 for (j = 0; j < seg->raws_cnt; j++) {
1604 struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1606 raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1607 raw->info.entry.val = index;
1608 raw->info.entry.last = raw->info.src.last;
1609 index += raw->info.entry.last;
1613 /* Currently only support using the byte selection base, which only
1614 * allows for an effective entry size of 30 bytes. Reject anything
1617 if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1618 return ICE_ERR_PARAM;
1620 /* Only 8 range checkers per profile, reject anything trying to use
1623 if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1624 return ICE_ERR_PARAM;
1626 /* Store # bytes required for entry for later use */
1627 params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1633 * ice_flow_proc_segs - process all packet segments associated with a profile
1634 * @hw: pointer to the HW struct
1635 * @params: information about the flow to be processed
1637 static enum ice_status
1638 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1640 enum ice_status status;
1642 status = ice_flow_proc_seg_hdrs(params);
1646 status = ice_flow_create_xtrct_seq(hw, params);
1650 switch (params->blk) {
1653 status = ICE_SUCCESS;
1656 status = ice_flow_acl_def_entry_frmt(params);
1659 status = ice_flow_sel_acl_scen(hw, params);
1664 return ICE_ERR_NOT_IMPL;
1670 #define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001
1671 #define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002
1672 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004
1675 * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1676 * @hw: pointer to the HW struct
1677 * @blk: classification stage
1678 * @dir: flow direction
1679 * @segs: array of one or more packet segments that describe the flow
1680 * @segs_cnt: number of packet segments provided
1681 * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1682 * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1684 static struct ice_flow_prof *
1685 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1686 enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1687 u8 segs_cnt, u16 vsi_handle, u32 conds)
1689 struct ice_flow_prof *p, *prof = NULL;
1691 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1692 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1693 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1694 segs_cnt && segs_cnt == p->segs_cnt) {
1697 /* Check for profile-VSI association if specified */
1698 if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1699 ice_is_vsi_valid(hw, vsi_handle) &&
1700 !ice_is_bit_set(p->vsis, vsi_handle))
1703 /* Protocol headers must be checked. Matched fields are
1704 * checked if specified.
1706 for (i = 0; i < segs_cnt; i++)
1707 if (segs[i].hdrs != p->segs[i].hdrs ||
1708 ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1709 segs[i].match != p->segs[i].match))
1712 /* A match is found if all segments are matched */
1713 if (i == segs_cnt) {
1718 ice_release_lock(&hw->fl_profs_locks[blk]);
1724 * ice_flow_find_prof - Look up a profile matching headers and matched fields
1725 * @hw: pointer to the HW struct
1726 * @blk: classification stage
1727 * @dir: flow direction
1728 * @segs: array of one or more packet segments that describe the flow
1729 * @segs_cnt: number of packet segments provided
1732 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1733 struct ice_flow_seg_info *segs, u8 segs_cnt)
1735 struct ice_flow_prof *p;
1737 p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1738 ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1740 return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1744 * ice_flow_find_prof_id - Look up a profile with given profile ID
1745 * @hw: pointer to the HW struct
1746 * @blk: classification stage
1747 * @prof_id: unique ID to identify this flow profile
1749 static struct ice_flow_prof *
1750 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1752 struct ice_flow_prof *p;
1754 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1755 if (p->id == prof_id)
1762 * ice_dealloc_flow_entry - Deallocate flow entry memory
1763 * @hw: pointer to the HW struct
1764 * @entry: flow entry to be removed
1767 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1773 ice_free(hw, entry->entry);
1775 if (entry->range_buf) {
1776 ice_free(hw, entry->range_buf);
1777 entry->range_buf = NULL;
1781 ice_free(hw, entry->acts);
1783 entry->acts_cnt = 0;
1786 ice_free(hw, entry);
1790 * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
1791 * @hw: pointer to the HW struct
1792 * @blk: classification stage
1793 * @prof_id: the profile ID handle
1794 * @hw_prof_id: pointer to variable to receive the HW profile ID
1797 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
1800 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
1801 struct ice_prof_map *map;
1803 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
1804 map = ice_search_prof_id(hw, blk, prof_id);
1806 *hw_prof_id = map->prof_id;
1807 status = ICE_SUCCESS;
1809 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
1813 #define ICE_ACL_INVALID_SCEN 0x3f
1816 * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
1817 * @hw: pointer to the hardware structure
1818 * @prof: pointer to flow profile
1819 * @buf: destination buffer function writes partial extraction sequence to
1821 * returns ICE_SUCCESS if no PF is associated to the given profile
1822 * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
1823 * returns other error code for real error
1825 static enum ice_status
1826 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1827 struct ice_aqc_acl_prof_generic_frmt *buf)
1829 enum ice_status status;
1832 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1836 status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1840 /* If all PF's associated scenarios are all 0 or all
1841 * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1842 * not been configured yet.
1844 if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1845 buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1846 buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1847 buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1850 if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1851 buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1852 buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1853 buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1854 buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1855 buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1856 buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1857 buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1860 return ICE_ERR_IN_USE;
1864 * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
1865 * @hw: pointer to the hardware structure
1866 * @acts: array of actions to be performed on a match
1867 * @acts_cnt: number of actions
1869 static enum ice_status
1870 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1875 for (i = 0; i < acts_cnt; i++) {
1876 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1877 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1878 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1879 struct ice_acl_cntrs cntrs = { 0 };
1880 enum ice_status status;
1882 /* amount is unused in the dealloc path but the common
1883 * parameter check routine wants a value set, as zero
1884 * is invalid for the check. Just set it.
1887 cntrs.bank = 0; /* Only bank0 for the moment */
1889 LE16_TO_CPU(acts[i].data.acl_act.value);
1891 LE16_TO_CPU(acts[i].data.acl_act.value);
1893 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1894 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1896 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1898 status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1907 * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
1908 * @hw: pointer to the hardware structure
1909 * @prof: pointer to flow profile
1911 * Disassociate the scenario from the profile for the PF of the VSI.
1913 static enum ice_status
1914 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1916 struct ice_aqc_acl_prof_generic_frmt buf;
1917 enum ice_status status = ICE_SUCCESS;
1920 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1922 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1926 status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1930 /* Clear scenario for this PF */
1931 buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1932 status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
1938 * ice_flow_rem_entry_sync - Remove a flow entry
1939 * @hw: pointer to the HW struct
1940 * @blk: classification stage
1941 * @entry: flow entry to be removed
1943 static enum ice_status
1944 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1945 struct ice_flow_entry *entry)
1948 return ICE_ERR_BAD_PTR;
1950 if (blk == ICE_BLK_ACL) {
1951 enum ice_status status;
1954 return ICE_ERR_BAD_PTR;
1956 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1957 entry->scen_entry_idx);
1961 /* Checks if we need to release an ACL counter. */
1962 if (entry->acts_cnt && entry->acts)
1963 ice_flow_acl_free_act_cntr(hw, entry->acts,
1967 LIST_DEL(&entry->l_entry);
1969 ice_dealloc_flow_entry(hw, entry);
1975 * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1976 * @hw: pointer to the HW struct
1977 * @blk: classification stage
1978 * @dir: flow direction
1979 * @prof_id: unique ID to identify this flow profile
1980 * @segs: array of one or more packet segments that describe the flow
1981 * @segs_cnt: number of packet segments provided
1982 * @acts: array of default actions
1983 * @acts_cnt: number of default actions
1984 * @prof: stores the returned flow profile added
1986 * Assumption: the caller has acquired the lock to the profile list
1988 static enum ice_status
1989 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1990 enum ice_flow_dir dir, u64 prof_id,
1991 struct ice_flow_seg_info *segs, u8 segs_cnt,
1992 struct ice_flow_action *acts, u8 acts_cnt,
1993 struct ice_flow_prof **prof)
1995 struct ice_flow_prof_params *params;
1996 enum ice_status status;
1999 if (!prof || (acts_cnt && !acts))
2000 return ICE_ERR_BAD_PTR;
2002 params = (struct ice_flow_prof_params *)ice_malloc(hw, sizeof(*params));
2004 return ICE_ERR_NO_MEMORY;
2006 params->prof = (struct ice_flow_prof *)
2007 ice_malloc(hw, sizeof(*params->prof));
2008 if (!params->prof) {
2009 status = ICE_ERR_NO_MEMORY;
2013 /* initialize extraction sequence to all invalid (0xff) */
2014 for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
2015 params->es[i].prot_id = ICE_PROT_INVALID;
2016 params->es[i].off = ICE_FV_OFFSET_INVAL;
2020 params->prof->id = prof_id;
2021 params->prof->dir = dir;
2022 params->prof->segs_cnt = segs_cnt;
2024 /* Make a copy of the segments that need to be persistent in the flow
2027 for (i = 0; i < segs_cnt; i++)
2028 ice_memcpy(¶ms->prof->segs[i], &segs[i], sizeof(*segs),
2029 ICE_NONDMA_TO_NONDMA);
2031 /* Make a copy of the actions that need to be persistent in the flow
2035 params->prof->acts = (struct ice_flow_action *)
2036 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2037 ICE_NONDMA_TO_NONDMA);
2039 if (!params->prof->acts) {
2040 status = ICE_ERR_NO_MEMORY;
2045 status = ice_flow_proc_segs(hw, params);
2047 ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n");
2051 /* Add a HW profile for this flow profile */
2052 status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
2053 params->attr, params->attr_cnt, params->es,
2056 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
2060 INIT_LIST_HEAD(¶ms->prof->entries);
2061 ice_init_lock(¶ms->prof->entries_lock);
2062 *prof = params->prof;
2066 if (params->prof->acts)
2067 ice_free(hw, params->prof->acts);
2068 ice_free(hw, params->prof);
2071 ice_free(hw, params);
2077 * ice_flow_rem_prof_sync - remove a flow profile
2078 * @hw: pointer to the hardware structure
2079 * @blk: classification stage
2080 * @prof: pointer to flow profile to remove
2082 * Assumption: the caller has acquired the lock to the profile list
2084 static enum ice_status
2085 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
2086 struct ice_flow_prof *prof)
2088 enum ice_status status;
2090 /* Remove all remaining flow entries before removing the flow profile */
2091 if (!LIST_EMPTY(&prof->entries)) {
2092 struct ice_flow_entry *e, *t;
2094 ice_acquire_lock(&prof->entries_lock);
2096 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
2098 status = ice_flow_rem_entry_sync(hw, blk, e);
2103 ice_release_lock(&prof->entries_lock);
2106 if (blk == ICE_BLK_ACL) {
2107 struct ice_aqc_acl_profile_ranges query_rng_buf;
2108 struct ice_aqc_acl_prof_generic_frmt buf;
2111 /* Disassociate the scenario from the profile for the PF */
2112 status = ice_flow_acl_disassoc_scen(hw, prof);
2116 /* Clear the range-checker if the profile ID is no longer
2119 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2120 if (status && status != ICE_ERR_IN_USE) {
2122 } else if (!status) {
2123 /* Clear the range-checker value for profile ID */
2124 ice_memset(&query_rng_buf, 0,
2125 sizeof(struct ice_aqc_acl_profile_ranges),
2128 status = ice_flow_get_hw_prof(hw, blk, prof->id,
2133 status = ice_prog_acl_prof_ranges(hw, prof_id,
2134 &query_rng_buf, NULL);
2140 /* Remove all hardware profiles associated with this flow profile */
2141 status = ice_rem_prof(hw, blk, prof->id);
2143 LIST_DEL(&prof->l_entry);
2144 ice_destroy_lock(&prof->entries_lock);
2146 ice_free(hw, prof->acts);
2154 * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
2155 * @buf: Destination buffer function writes partial xtrct sequence to
2156 * @info: Info about field
2159 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
2160 struct ice_flow_fld_info *info)
2165 src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
2166 info->xtrct.disp / BITS_PER_BYTE;
2167 dst = info->entry.val;
2168 for (i = 0; i < info->entry.last; i++)
2169 /* HW stores field vector words in LE, convert words back to BE
2170 * so constructed entries will end up in network order
2172 buf->byte_selection[dst++] = src++ ^ 1;
2176 * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
2177 * @hw: pointer to the hardware structure
2178 * @prof: pointer to flow profile
2180 static enum ice_status
2181 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
2183 struct ice_aqc_acl_prof_generic_frmt buf;
2184 struct ice_flow_fld_info *info;
2185 enum ice_status status;
2189 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2191 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2195 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2196 if (status && status != ICE_ERR_IN_USE)
2200 /* Program the profile dependent configuration. This is done
2201 * only once regardless of the number of PFs using that profile
2203 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2205 for (i = 0; i < prof->segs_cnt; i++) {
2206 struct ice_flow_seg_info *seg = &prof->segs[i];
2209 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2210 ICE_FLOW_FIELD_IDX_MAX) {
2211 info = &seg->fields[j];
2213 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2214 buf.word_selection[info->entry.val] =
2217 ice_flow_acl_set_xtrct_seq_fld(&buf,
2221 for (j = 0; j < seg->raws_cnt; j++) {
2222 info = &seg->raws[j].info;
2223 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
2227 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
2228 ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
2232 /* Update the current PF */
2233 buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
2234 status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
2240 * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
2241 * @hw: pointer to the hardware structure
2242 * @blk: classification stage
2243 * @vsi_handle: software VSI handle
2244 * @vsig: target VSI group
2246 * Assumption: the caller has already verified that the VSI to
2247 * be added has the same characteristics as the VSIG and will
2248 * thereby have access to all resources added to that VSIG.
2251 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
2254 enum ice_status status;
2256 if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
2257 return ICE_ERR_PARAM;
2259 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2260 status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
2262 ice_release_lock(&hw->fl_profs_locks[blk]);
2268 * ice_flow_assoc_prof - associate a VSI with a flow profile
2269 * @hw: pointer to the hardware structure
2270 * @blk: classification stage
2271 * @prof: pointer to flow profile
2272 * @vsi_handle: software VSI handle
2274 * Assumption: the caller has acquired the lock to the profile list
2275 * and the software VSI handle has been validated
2278 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
2279 struct ice_flow_prof *prof, u16 vsi_handle)
2281 enum ice_status status = ICE_SUCCESS;
2283 if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
2284 if (blk == ICE_BLK_ACL) {
2285 status = ice_flow_acl_set_xtrct_seq(hw, prof);
2289 status = ice_add_prof_id_flow(hw, blk,
2290 ice_get_hw_vsi_num(hw,
2294 ice_set_bit(vsi_handle, prof->vsis);
2296 ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n",
2304 * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
2305 * @hw: pointer to the hardware structure
2306 * @blk: classification stage
2307 * @prof: pointer to flow profile
2308 * @vsi_handle: software VSI handle
2310 * Assumption: the caller has acquired the lock to the profile list
2311 * and the software VSI handle has been validated
2313 static enum ice_status
2314 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
2315 struct ice_flow_prof *prof, u16 vsi_handle)
2317 enum ice_status status = ICE_SUCCESS;
2319 if (ice_is_bit_set(prof->vsis, vsi_handle)) {
2320 status = ice_rem_prof_id_flow(hw, blk,
2321 ice_get_hw_vsi_num(hw,
2325 ice_clear_bit(vsi_handle, prof->vsis);
2327 ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n",
2335 * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2336 * @hw: pointer to the HW struct
2337 * @blk: classification stage
2338 * @dir: flow direction
2339 * @prof_id: unique ID to identify this flow profile
2340 * @segs: array of one or more packet segments that describe the flow
2341 * @segs_cnt: number of packet segments provided
2342 * @acts: array of default actions
2343 * @acts_cnt: number of default actions
2344 * @prof: stores the returned flow profile added
2347 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2348 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2349 struct ice_flow_action *acts, u8 acts_cnt,
2350 struct ice_flow_prof **prof)
2352 enum ice_status status;
2354 if (segs_cnt > ICE_FLOW_SEG_MAX)
2355 return ICE_ERR_MAX_LIMIT;
2358 return ICE_ERR_PARAM;
2361 return ICE_ERR_BAD_PTR;
2363 status = ice_flow_val_hdrs(segs, segs_cnt);
2367 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2369 status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2370 acts, acts_cnt, prof);
2372 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2374 ice_release_lock(&hw->fl_profs_locks[blk]);
2380 * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2381 * @hw: pointer to the HW struct
2382 * @blk: the block for which the flow profile is to be removed
2383 * @prof_id: unique ID of the flow profile to be removed
2386 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2388 struct ice_flow_prof *prof;
2389 enum ice_status status;
2391 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2393 prof = ice_flow_find_prof_id(hw, blk, prof_id);
2395 status = ICE_ERR_DOES_NOT_EXIST;
2399 /* prof becomes invalid after the call */
2400 status = ice_flow_rem_prof_sync(hw, blk, prof);
2403 ice_release_lock(&hw->fl_profs_locks[blk]);
2409 * ice_flow_find_entry - look for a flow entry using its unique ID
2410 * @hw: pointer to the HW struct
2411 * @blk: classification stage
2412 * @entry_id: unique ID to identify this flow entry
2414 * This function looks for the flow entry with the specified unique ID in all
2415 * flow profiles of the specified classification stage. If the entry is found,
2416 * and it returns the handle to the flow entry. Otherwise, it returns
2417 * ICE_FLOW_ENTRY_ID_INVAL.
2419 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2421 struct ice_flow_entry *found = NULL;
2422 struct ice_flow_prof *p;
2424 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2426 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2427 struct ice_flow_entry *e;
2429 ice_acquire_lock(&p->entries_lock);
2430 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2431 if (e->id == entry_id) {
2435 ice_release_lock(&p->entries_lock);
2441 ice_release_lock(&hw->fl_profs_locks[blk]);
2443 return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2447 * ice_flow_acl_check_actions - Checks the ACL rule's actions
2448 * @hw: pointer to the hardware structure
2449 * @acts: array of actions to be performed on a match
2450 * @acts_cnt: number of actions
2451 * @cnt_alloc: indicates if an ACL counter has been allocated.
2453 static enum ice_status
2454 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2455 u8 acts_cnt, bool *cnt_alloc)
2457 ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2460 ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2463 if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2464 return ICE_ERR_OUT_OF_RANGE;
2466 for (i = 0; i < acts_cnt; i++) {
2467 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2468 acts[i].type != ICE_FLOW_ACT_DROP &&
2469 acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2470 acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2473 /* If the caller want to add two actions of the same type, then
2474 * it is considered invalid configuration.
2476 if (ice_test_and_set_bit(acts[i].type, dup_check))
2477 return ICE_ERR_PARAM;
2480 /* Checks if ACL counters are needed. */
2481 for (i = 0; i < acts_cnt; i++) {
2482 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2483 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2484 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2485 struct ice_acl_cntrs cntrs = { 0 };
2486 enum ice_status status;
2489 cntrs.bank = 0; /* Only bank0 for the moment */
2491 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2492 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2494 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2496 status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2499 /* Counter index within the bank */
2500 acts[i].data.acl_act.value =
2501 CPU_TO_LE16(cntrs.first_cntr);
2510 * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2511 * @fld: number of the given field
2512 * @info: info about field
2513 * @range_buf: range checker configuration buffer
2514 * @data: pointer to a data buffer containing flow entry's match values/masks
2515 * @range: Input/output param indicating which range checkers are being used
2518 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2519 struct ice_aqc_acl_profile_ranges *range_buf,
2520 u8 *data, u8 *range)
2524 /* If not specified, default mask is all bits in field */
2525 new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2526 BIT(ice_flds_info[fld].size) - 1 :
2527 (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2529 /* If the mask is 0, then we don't need to worry about this input
2530 * range checker value.
2534 (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2536 (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2537 u8 range_idx = info->entry.val;
2539 range_buf->checker_cfg[range_idx].low_boundary =
2540 CPU_TO_BE16(new_low);
2541 range_buf->checker_cfg[range_idx].high_boundary =
2542 CPU_TO_BE16(new_high);
2543 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2545 /* Indicate which range checker is being used */
2546 *range |= BIT(range_idx);
2551 * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2552 * @fld: number of the given field
2553 * @info: info about the field
2554 * @buf: buffer containing the entry
2555 * @dontcare: buffer containing don't care mask for entry
2556 * @data: pointer to a data buffer containing flow entry's match values/masks
2559 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2560 u8 *dontcare, u8 *data)
2562 u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2563 bool use_mask = false;
2566 src = info->src.val;
2567 mask = info->src.mask;
2568 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2569 disp = info->xtrct.disp % BITS_PER_BYTE;
2571 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2574 for (k = 0; k < info->entry.last; k++, dst++) {
2575 /* Add overflow bits from previous byte */
2576 buf[dst] = (tmp_s & 0xff00) >> 8;
2578 /* If mask is not valid, tmp_m is always zero, so just setting
2579 * dontcare to 0 (no masked bits). If mask is valid, pulls in
2580 * overflow bits of mask from prev byte
2582 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2584 /* If there is displacement, last byte will only contain
2585 * displaced data, but there is no more data to read from user
2586 * buffer, so skip so as not to potentially read beyond end of
2589 if (!disp || k < info->entry.last - 1) {
2590 /* Store shifted data to use in next byte */
2591 tmp_s = data[src++] << disp;
2593 /* Add current (shifted) byte */
2594 buf[dst] |= tmp_s & 0xff;
2596 /* Handle mask if valid */
2598 tmp_m = (~data[mask++] & 0xff) << disp;
2599 dontcare[dst] |= tmp_m & 0xff;
2604 /* Fill in don't care bits at beginning of field */
2606 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2607 for (k = 0; k < disp; k++)
2608 dontcare[dst] |= BIT(k);
2611 end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2613 /* Fill in don't care bits at end of field */
2615 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2616 info->entry.last - 1;
2617 for (k = end_disp; k < BITS_PER_BYTE; k++)
2618 dontcare[dst] |= BIT(k);
2623 * ice_flow_acl_frmt_entry - Format ACL entry
2624 * @hw: pointer to the hardware structure
2625 * @prof: pointer to flow profile
2626 * @e: pointer to the flow entry
2627 * @data: pointer to a data buffer containing flow entry's match values/masks
2628 * @acts: array of actions to be performed on a match
2629 * @acts_cnt: number of actions
2631 * Formats the key (and key_inverse) to be matched from the data passed in,
2632 * along with data from the flow profile. This key/key_inverse pair makes up
2633 * the 'entry' for an ACL flow entry.
2635 static enum ice_status
2636 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2637 struct ice_flow_entry *e, u8 *data,
2638 struct ice_flow_action *acts, u8 acts_cnt)
2640 u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2641 struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2642 enum ice_status status;
2647 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2651 /* Format the result action */
2653 status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2657 status = ICE_ERR_NO_MEMORY;
2659 e->acts = (struct ice_flow_action *)
2660 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2661 ICE_NONDMA_TO_NONDMA);
2665 e->acts_cnt = acts_cnt;
2667 /* Format the matching data */
2668 buf_sz = prof->cfg.scen->width;
2669 buf = (u8 *)ice_malloc(hw, buf_sz);
2673 dontcare = (u8 *)ice_malloc(hw, buf_sz);
2677 /* 'key' buffer will store both key and key_inverse, so must be twice
2680 key = (u8 *)ice_malloc(hw, buf_sz * 2);
2684 range_buf = (struct ice_aqc_acl_profile_ranges *)
2685 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2689 /* Set don't care mask to all 1's to start, will zero out used bytes */
2690 ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2692 for (i = 0; i < prof->segs_cnt; i++) {
2693 struct ice_flow_seg_info *seg = &prof->segs[i];
2696 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2697 ICE_FLOW_FIELD_IDX_MAX) {
2698 struct ice_flow_fld_info *info = &seg->fields[j];
2700 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2701 ice_flow_acl_frmt_entry_range(j, info,
2705 ice_flow_acl_frmt_entry_fld(j, info, buf,
2709 for (j = 0; j < seg->raws_cnt; j++) {
2710 struct ice_flow_fld_info *info = &seg->raws[j].info;
2711 u16 dst, src, mask, k;
2712 bool use_mask = false;
2714 src = info->src.val;
2715 dst = info->entry.val -
2716 ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2717 mask = info->src.mask;
2719 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2722 for (k = 0; k < info->entry.last; k++, dst++) {
2723 buf[dst] = data[src++];
2725 dontcare[dst] = ~data[mask++];
2732 buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2733 dontcare[prof->cfg.scen->pid_idx] = 0;
2735 /* Format the buffer for direction flags */
2736 dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2738 if (prof->dir == ICE_FLOW_RX)
2739 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2742 buf[prof->cfg.scen->rng_chk_idx] = range;
2743 /* Mark any unused range checkers as don't care */
2744 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2745 e->range_buf = range_buf;
2747 ice_free(hw, range_buf);
2750 status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2756 e->entry_sz = buf_sz * 2;
2763 ice_free(hw, dontcare);
2768 if (status && range_buf) {
2769 ice_free(hw, range_buf);
2770 e->range_buf = NULL;
2773 if (status && e->acts) {
2774 ice_free(hw, e->acts);
2779 if (status && cnt_alloc)
2780 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2786 * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2787 * the compared data.
2788 * @prof: pointer to flow profile
2789 * @e: pointer to the comparing flow entry
2790 * @do_chg_action: decide if we want to change the ACL action
2791 * @do_add_entry: decide if we want to add the new ACL entry
2792 * @do_rem_entry: decide if we want to remove the current ACL entry
2794 * Find an ACL scenario entry that matches the compared data. In the same time,
2795 * this function also figure out:
2796 * a/ If we want to change the ACL action
2797 * b/ If we want to add the new ACL entry
2798 * c/ If we want to remove the current ACL entry
2800 static struct ice_flow_entry *
2801 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2802 struct ice_flow_entry *e, bool *do_chg_action,
2803 bool *do_add_entry, bool *do_rem_entry)
2805 struct ice_flow_entry *p, *return_entry = NULL;
2809 * a/ There exists an entry with same matching data, but different
2810 * priority, then we remove this existing ACL entry. Then, we
2811 * will add the new entry to the ACL scenario.
2812 * b/ There exists an entry with same matching data, priority, and
2813 * result action, then we do nothing
2814 * c/ There exists an entry with same matching data, priority, but
2815 * different, action, then do only change the action's entry.
2816 * d/ Else, we add this new entry to the ACL scenario.
2818 *do_chg_action = false;
2819 *do_add_entry = true;
2820 *do_rem_entry = false;
2821 LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2822 if (memcmp(p->entry, e->entry, p->entry_sz))
2825 /* From this point, we have the same matching_data. */
2826 *do_add_entry = false;
2829 if (p->priority != e->priority) {
2830 /* matching data && !priority */
2831 *do_add_entry = true;
2832 *do_rem_entry = true;
2836 /* From this point, we will have matching_data && priority */
2837 if (p->acts_cnt != e->acts_cnt)
2838 *do_chg_action = true;
2839 for (i = 0; i < p->acts_cnt; i++) {
2840 bool found_not_match = false;
2842 for (j = 0; j < e->acts_cnt; j++)
2843 if (memcmp(&p->acts[i], &e->acts[j],
2844 sizeof(struct ice_flow_action))) {
2845 found_not_match = true;
2849 if (found_not_match) {
2850 *do_chg_action = true;
2855 /* (do_chg_action = true) means :
2856 * matching_data && priority && !result_action
2857 * (do_chg_action = false) means :
2858 * matching_data && priority && result_action
2863 return return_entry;
2867 * ice_flow_acl_convert_to_acl_prio - Convert to ACL priority
2870 static enum ice_acl_entry_prio
2871 ice_flow_acl_convert_to_acl_prio(enum ice_flow_priority p)
2873 enum ice_acl_entry_prio acl_prio;
2876 case ICE_FLOW_PRIO_LOW:
2877 acl_prio = ICE_ACL_PRIO_LOW;
2879 case ICE_FLOW_PRIO_NORMAL:
2880 acl_prio = ICE_ACL_PRIO_NORMAL;
2882 case ICE_FLOW_PRIO_HIGH:
2883 acl_prio = ICE_ACL_PRIO_HIGH;
2886 acl_prio = ICE_ACL_PRIO_NORMAL;
2894 * ice_flow_acl_union_rng_chk - Perform union operation between two
2895 * range-range checker buffers
2896 * @dst_buf: pointer to destination range checker buffer
2897 * @src_buf: pointer to source range checker buffer
2899 * For this function, we do the union between dst_buf and src_buf
2900 * range checker buffer, and we will save the result back to dst_buf
2902 static enum ice_status
2903 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2904 struct ice_aqc_acl_profile_ranges *src_buf)
2908 if (!dst_buf || !src_buf)
2909 return ICE_ERR_BAD_PTR;
2911 for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2912 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2913 bool will_populate = false;
2915 in_data = &src_buf->checker_cfg[i];
2920 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2921 cfg_data = &dst_buf->checker_cfg[j];
2923 if (!cfg_data->mask ||
2924 !memcmp(cfg_data, in_data,
2925 sizeof(struct ice_acl_rng_data))) {
2926 will_populate = true;
2931 if (will_populate) {
2932 ice_memcpy(cfg_data, in_data,
2933 sizeof(struct ice_acl_rng_data),
2934 ICE_NONDMA_TO_NONDMA);
2936 /* No available slot left to program range checker */
2937 return ICE_ERR_MAX_LIMIT;
2945 * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2946 * @hw: pointer to the hardware structure
2947 * @prof: pointer to flow profile
2948 * @entry: double pointer to the flow entry
2950 * For this function, we will look at the current added entries in the
2951 * corresponding ACL scenario. Then, we will perform matching logic to
2952 * see if we want to add/modify/do nothing with this new entry.
2954 static enum ice_status
2955 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2956 struct ice_flow_entry **entry)
2958 bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2959 struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2960 struct ice_acl_act_entry *acts = NULL;
2961 struct ice_flow_entry *exist;
2962 enum ice_status status = ICE_SUCCESS;
2963 struct ice_flow_entry *e;
2966 if (!entry || !(*entry) || !prof)
2967 return ICE_ERR_BAD_PTR;
2971 do_chg_rng_chk = false;
2975 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2980 /* Query the current range-checker value in FW */
2981 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2985 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2986 sizeof(struct ice_aqc_acl_profile_ranges),
2987 ICE_NONDMA_TO_NONDMA);
2989 /* Generate the new range-checker value */
2990 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2994 /* Reconfigure the range check if the buffer is changed. */
2995 do_chg_rng_chk = false;
2996 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2997 sizeof(struct ice_aqc_acl_profile_ranges))) {
2998 status = ice_prog_acl_prof_ranges(hw, prof_id,
2999 &cfg_rng_buf, NULL);
3003 do_chg_rng_chk = true;
3007 /* Figure out if we want to (change the ACL action) and/or
3008 * (Add the new ACL entry) and/or (Remove the current ACL entry)
3010 exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
3011 &do_add_entry, &do_rem_entry);
3013 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
3018 /* Prepare the result action buffer */
3019 acts = (struct ice_acl_act_entry *)
3020 ice_calloc(hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
3022 return ICE_ERR_NO_MEMORY;
3024 for (i = 0; i < e->acts_cnt; i++)
3025 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
3026 sizeof(struct ice_acl_act_entry),
3027 ICE_NONDMA_TO_NONDMA);
3030 enum ice_acl_entry_prio prio;
3034 keys = (u8 *)e->entry;
3035 inverts = keys + (e->entry_sz / 2);
3036 prio = ice_flow_acl_convert_to_acl_prio(e->priority);
3038 status = ice_acl_add_entry(hw, prof->cfg.scen, prio, keys,
3039 inverts, acts, e->acts_cnt,
3044 e->scen_entry_idx = entry_idx;
3045 LIST_ADD(&e->l_entry, &prof->entries);
3047 if (do_chg_action) {
3048 /* For the action memory info, update the SW's copy of
3049 * exist entry with e's action memory info
3051 ice_free(hw, exist->acts);
3052 exist->acts_cnt = e->acts_cnt;
3053 exist->acts = (struct ice_flow_action *)
3054 ice_calloc(hw, exist->acts_cnt,
3055 sizeof(struct ice_flow_action));
3057 status = ICE_ERR_NO_MEMORY;
3061 ice_memcpy(exist->acts, e->acts,
3062 sizeof(struct ice_flow_action) * e->acts_cnt,
3063 ICE_NONDMA_TO_NONDMA);
3065 status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
3067 exist->scen_entry_idx);
3072 if (do_chg_rng_chk) {
3073 /* In this case, we want to update the range checker
3074 * information of the exist entry
3076 status = ice_flow_acl_union_rng_chk(exist->range_buf,
3082 /* As we don't add the new entry to our SW DB, deallocate its
3083 * memories, and return the exist entry to the caller
3085 ice_dealloc_flow_entry(hw, e);
3095 * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
3096 * @hw: pointer to the hardware structure
3097 * @prof: pointer to flow profile
3098 * @e: double pointer to the flow entry
3100 static enum ice_status
3101 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
3102 struct ice_flow_entry **e)
3104 enum ice_status status;
3106 ice_acquire_lock(&prof->entries_lock);
3107 status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
3108 ice_release_lock(&prof->entries_lock);
3114 * ice_flow_add_entry - Add a flow entry
3115 * @hw: pointer to the HW struct
3116 * @blk: classification stage
3117 * @prof_id: ID of the profile to add a new flow entry to
3118 * @entry_id: unique ID to identify this flow entry
3119 * @vsi_handle: software VSI handle for the flow entry
3120 * @prio: priority of the flow entry
3121 * @data: pointer to a data buffer containing flow entry's match values/masks
3122 * @acts: arrays of actions to be performed on a match
3123 * @acts_cnt: number of actions
3124 * @entry_h: pointer to buffer that receives the new flow entry's handle
3127 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
3128 u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
3129 void *data, struct ice_flow_action *acts, u8 acts_cnt,
3132 struct ice_flow_entry *e = NULL;
3133 struct ice_flow_prof *prof;
3134 enum ice_status status = ICE_SUCCESS;
3136 /* ACL entries must indicate an action */
3137 if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
3138 return ICE_ERR_PARAM;
3140 /* No flow entry data is expected for RSS */
3141 if (!entry_h || (!data && blk != ICE_BLK_RSS))
3142 return ICE_ERR_BAD_PTR;
3144 if (!ice_is_vsi_valid(hw, vsi_handle))
3145 return ICE_ERR_PARAM;
3147 ice_acquire_lock(&hw->fl_profs_locks[blk]);
3149 prof = ice_flow_find_prof_id(hw, blk, prof_id);
3151 status = ICE_ERR_DOES_NOT_EXIST;
3153 /* Allocate memory for the entry being added and associate
3154 * the VSI to the found flow profile
3156 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
3158 status = ICE_ERR_NO_MEMORY;
3160 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3163 ice_release_lock(&hw->fl_profs_locks[blk]);
3168 e->vsi_handle = vsi_handle;
3177 /* ACL will handle the entry management */
3178 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
3183 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
3189 status = ICE_ERR_NOT_IMPL;
3193 if (blk != ICE_BLK_ACL) {
3194 /* ACL will handle the entry management */
3195 ice_acquire_lock(&prof->entries_lock);
3196 LIST_ADD(&e->l_entry, &prof->entries);
3197 ice_release_lock(&prof->entries_lock);
3200 *entry_h = ICE_FLOW_ENTRY_HNDL(e);
3205 ice_free(hw, e->entry);
3213 * ice_flow_rem_entry - Remove a flow entry
3214 * @hw: pointer to the HW struct
3215 * @blk: classification stage
3216 * @entry_h: handle to the flow entry to be removed
3218 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
3221 struct ice_flow_entry *entry;
3222 struct ice_flow_prof *prof;
3223 enum ice_status status = ICE_SUCCESS;
3225 if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
3226 return ICE_ERR_PARAM;
3228 entry = ICE_FLOW_ENTRY_PTR((intptr_t)entry_h);
3230 /* Retain the pointer to the flow profile as the entry will be freed */
3234 ice_acquire_lock(&prof->entries_lock);
3235 status = ice_flow_rem_entry_sync(hw, blk, entry);
3236 ice_release_lock(&prof->entries_lock);
3243 * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
3244 * @seg: packet segment the field being set belongs to
3245 * @fld: field to be set
3246 * @field_type: type of the field
3247 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3248 * entry's input buffer
3249 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3251 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3252 * entry's input buffer
3254 * This helper function stores information of a field being matched, including
3255 * the type of the field and the locations of the value to match, the mask, and
3256 * the upper-bound value in the start of the input buffer for a flow entry.
3257 * This function should only be used for fixed-size data structures.
3259 * This function also opportunistically determines the protocol headers to be
3260 * present based on the fields being set. Some fields cannot be used alone to
3261 * determine the protocol headers present. Sometimes, fields for particular
3262 * protocol headers are not matched. In those cases, the protocol headers
3263 * must be explicitly set.
3266 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3267 enum ice_flow_fld_match_type field_type, u16 val_loc,
3268 u16 mask_loc, u16 last_loc)
3270 u64 bit = BIT_ULL(fld);
3273 if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
3276 seg->fields[fld].type = field_type;
3277 seg->fields[fld].src.val = val_loc;
3278 seg->fields[fld].src.mask = mask_loc;
3279 seg->fields[fld].src.last = last_loc;
3281 ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
3285 * ice_flow_set_fld - specifies locations of field from entry's input buffer
3286 * @seg: packet segment the field being set belongs to
3287 * @fld: field to be set
3288 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3289 * entry's input buffer
3290 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3292 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3293 * entry's input buffer
3294 * @range: indicate if field being matched is to be in a range
3296 * This function specifies the locations, in the form of byte offsets from the
3297 * start of the input buffer for a flow entry, from where the value to match,
3298 * the mask value, and upper value can be extracted. These locations are then
3299 * stored in the flow profile. When adding a flow entry associated with the
3300 * flow profile, these locations will be used to quickly extract the values and
3301 * create the content of a match entry. This function should only be used for
3302 * fixed-size data structures.
3305 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3306 u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3308 enum ice_flow_fld_match_type t = range ?
3309 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3311 ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3315 * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3316 * @seg: packet segment the field being set belongs to
3317 * @fld: field to be set
3318 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3319 * entry's input buffer
3320 * @pref_loc: location of prefix value from entry's input buffer
3321 * @pref_sz: size of the location holding the prefix value
3323 * This function specifies the locations, in the form of byte offsets from the
3324 * start of the input buffer for a flow entry, from where the value to match
3325 * and the IPv4 prefix value can be extracted. These locations are then stored
3326 * in the flow profile. When adding flow entries to the associated flow profile,
3327 * these locations can be used to quickly extract the values to create the
3328 * content of a match entry. This function should only be used for fixed-size
3332 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3333 u16 val_loc, u16 pref_loc, u8 pref_sz)
3335 /* For this type of field, the "mask" location is for the prefix value's
3336 * location and the "last" location is for the size of the location of
3339 ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3340 pref_loc, (u16)pref_sz);
3344 * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3345 * @seg: packet segment the field being set belongs to
3346 * @off: offset of the raw field from the beginning of the segment in bytes
3347 * @len: length of the raw pattern to be matched
3348 * @val_loc: location of the value to match from entry's input buffer
3349 * @mask_loc: location of mask value from entry's input buffer
3351 * This function specifies the offset of the raw field to be match from the
3352 * beginning of the specified packet segment, and the locations, in the form of
3353 * byte offsets from the start of the input buffer for a flow entry, from where
3354 * the value to match and the mask value to be extracted. These locations are
3355 * then stored in the flow profile. When adding flow entries to the associated
3356 * flow profile, these locations can be used to quickly extract the values to
3357 * create the content of a match entry. This function should only be used for
3358 * fixed-size data structures.
3361 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3362 u16 val_loc, u16 mask_loc)
3364 if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3365 seg->raws[seg->raws_cnt].off = off;
3366 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3367 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3368 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3369 /* The "last" field is used to store the length of the field */
3370 seg->raws[seg->raws_cnt].info.src.last = len;
3373 /* Overflows of "raws" will be handled as an error condition later in
3374 * the flow when this information is processed.
3380 * ice_flow_rem_vsi_prof - remove vsi from flow profile
3381 * @hw: pointer to the hardware structure
3382 * @blk: classification stage
3383 * @vsi_handle: software VSI handle
3384 * @prof_id: unique ID to identify this flow profile
3386 * This function removes the flow entries associated to the input
3387 * vsi handle and disassociates the vsi from the flow profile.
3389 enum ice_status ice_flow_rem_vsi_prof(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
3392 struct ice_flow_prof *prof = NULL;
3393 enum ice_status status = ICE_SUCCESS;
3395 if (blk >= ICE_BLK_COUNT || !ice_is_vsi_valid(hw, vsi_handle))
3396 return ICE_ERR_PARAM;
3398 /* find flow profile pointer with input package block and profile id */
3399 prof = ice_flow_find_prof_id(hw, ICE_BLK_FD, prof_id);
3401 ice_debug(hw, ICE_DBG_PKG,
3402 "Cannot find flow profile id=%" PRIu64 "\n", prof_id);
3403 return ICE_ERR_DOES_NOT_EXIST;
3406 /* Remove all remaining flow entries before removing the flow profile */
3407 if (!LIST_EMPTY(&prof->entries)) {
3408 struct ice_flow_entry *e, *t;
3410 ice_acquire_lock(&prof->entries_lock);
3411 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
3413 if (e->vsi_handle != vsi_handle)
3416 status = ice_flow_rem_entry_sync(hw, blk, e);
3420 ice_release_lock(&prof->entries_lock);
3425 /* disassociate the flow profile from sw vsi handle */
3426 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3428 ice_debug(hw, ICE_DBG_PKG,
3429 "ice_flow_disassoc_prof() failed with status=%d\n",
3434 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3435 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_ETH_NON_IP | ICE_FLOW_SEG_HDR_VLAN)
3437 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3438 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3440 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3441 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
3443 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3444 (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3445 ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3446 ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3449 * ice_flow_set_rss_seg_info - setup packet segments for RSS
3450 * @segs: pointer to the flow field segment(s)
3451 * @seg_cnt: segment count
3452 * @cfg: configure parameters
3454 * Helper function to extract fields from hash bitmap and use flow
3455 * header value to set flow field segment for further use in flow
3456 * profile entry or removal.
3458 static enum ice_status
3459 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
3460 const struct ice_rss_hash_cfg *cfg)
3462 struct ice_flow_seg_info *seg;
3466 /* set inner most segment */
3467 seg = &segs[seg_cnt - 1];
3469 ice_for_each_set_bit(i, (const ice_bitmap_t *)&cfg->hash_flds,
3470 ICE_FLOW_FIELD_IDX_MAX)
3471 ice_flow_set_fld(seg, (enum ice_flow_field)i,
3472 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
3473 ICE_FLOW_FLD_OFF_INVAL, false);
3475 ICE_FLOW_SET_HDRS(seg, cfg->addl_hdrs);
3477 /* set outer most header */
3478 if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4)
3479 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
3480 ICE_FLOW_SEG_HDR_IPV_FRAG |
3481 ICE_FLOW_SEG_HDR_IPV_OTHER;
3482 else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6)
3483 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
3484 ICE_FLOW_SEG_HDR_IPV_FRAG |
3485 ICE_FLOW_SEG_HDR_IPV_OTHER;
3487 if (seg->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3488 ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER &
3489 ~ICE_FLOW_SEG_HDR_IPV_FRAG)
3490 return ICE_ERR_PARAM;
3492 val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3493 if (val && !ice_is_pow2(val))
3496 val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3497 if (val && !ice_is_pow2(val))
3504 * ice_rem_vsi_rss_list - remove VSI from RSS list
3505 * @hw: pointer to the hardware structure
3506 * @vsi_handle: software VSI handle
3508 * Remove the VSI from all RSS configurations in the list.
3510 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3512 struct ice_rss_cfg *r, *tmp;
3514 if (LIST_EMPTY(&hw->rss_list_head))
3517 ice_acquire_lock(&hw->rss_locks);
3518 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3519 ice_rss_cfg, l_entry)
3520 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3521 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3522 LIST_DEL(&r->l_entry);
3525 ice_release_lock(&hw->rss_locks);
3529 * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3530 * @hw: pointer to the hardware structure
3531 * @vsi_handle: software VSI handle
3533 * This function will iterate through all flow profiles and disassociate
3534 * the VSI from that profile. If the flow profile has no VSIs it will
3537 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3539 const enum ice_block blk = ICE_BLK_RSS;
3540 struct ice_flow_prof *p, *t;
3541 enum ice_status status = ICE_SUCCESS;
3543 if (!ice_is_vsi_valid(hw, vsi_handle))
3544 return ICE_ERR_PARAM;
3546 if (LIST_EMPTY(&hw->fl_profs[blk]))
3549 ice_acquire_lock(&hw->rss_locks);
3550 LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3552 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3553 status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3557 if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3558 status = ice_flow_rem_prof(hw, blk, p->id);
3563 ice_release_lock(&hw->rss_locks);
3569 * ice_get_rss_hdr_type - get a RSS profile's header type
3570 * @prof: RSS flow profile
3572 static enum ice_rss_cfg_hdr_type
3573 ice_get_rss_hdr_type(struct ice_flow_prof *prof)
3575 enum ice_rss_cfg_hdr_type hdr_type = ICE_RSS_ANY_HEADERS;
3577 if (prof->segs_cnt == ICE_FLOW_SEG_SINGLE) {
3578 hdr_type = ICE_RSS_OUTER_HEADERS;
3579 } else if (prof->segs_cnt == ICE_FLOW_SEG_MAX) {
3580 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs == ICE_FLOW_SEG_HDR_NONE)
3581 hdr_type = ICE_RSS_INNER_HEADERS;
3582 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV4)
3583 hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV4;
3584 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV6)
3585 hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV6;
3592 * ice_rem_rss_list - remove RSS configuration from list
3593 * @hw: pointer to the hardware structure
3594 * @vsi_handle: software VSI handle
3595 * @prof: pointer to flow profile
3597 * Assumption: lock has already been acquired for RSS list
3600 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3602 enum ice_rss_cfg_hdr_type hdr_type;
3603 struct ice_rss_cfg *r, *tmp;
3605 /* Search for RSS hash fields associated to the VSI that match the
3606 * hash configurations associated to the flow profile. If found
3607 * remove from the RSS entry list of the VSI context and delete entry.
3609 hdr_type = ice_get_rss_hdr_type(prof);
3610 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3611 ice_rss_cfg, l_entry)
3612 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3613 r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3614 r->hash.hdr_type == hdr_type) {
3615 ice_clear_bit(vsi_handle, r->vsis);
3616 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3617 LIST_DEL(&r->l_entry);
3625 * ice_add_rss_list - add RSS configuration to list
3626 * @hw: pointer to the hardware structure
3627 * @vsi_handle: software VSI handle
3628 * @prof: pointer to flow profile
3630 * Assumption: lock has already been acquired for RSS list
3632 static enum ice_status
3633 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3635 enum ice_rss_cfg_hdr_type hdr_type;
3636 struct ice_rss_cfg *r, *rss_cfg;
3638 hdr_type = ice_get_rss_hdr_type(prof);
3639 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3640 ice_rss_cfg, l_entry)
3641 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3642 r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3643 r->hash.hdr_type == hdr_type) {
3644 ice_set_bit(vsi_handle, r->vsis);
3648 rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3650 return ICE_ERR_NO_MEMORY;
3652 rss_cfg->hash.hash_flds = prof->segs[prof->segs_cnt - 1].match;
3653 rss_cfg->hash.addl_hdrs = prof->segs[prof->segs_cnt - 1].hdrs;
3654 rss_cfg->hash.hdr_type = hdr_type;
3655 rss_cfg->hash.symm = prof->cfg.symm;
3656 ice_set_bit(vsi_handle, rss_cfg->vsis);
3658 LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3663 #define ICE_FLOW_PROF_HASH_S 0
3664 #define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3665 #define ICE_FLOW_PROF_HDR_S 32
3666 #define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3667 #define ICE_FLOW_PROF_ENCAP_S 62
3668 #define ICE_FLOW_PROF_ENCAP_M (0x3ULL << ICE_FLOW_PROF_ENCAP_S)
3670 /* Flow profile ID format:
3671 * [0:31] - Packet match fields
3672 * [32:61] - Protocol header
3673 * [62:63] - Encapsulation flag:
3676 * 2 for tunneled with outer ipv4
3677 * 3 for tunneled with outer ipv6
3679 #define ICE_FLOW_GEN_PROFID(hash, hdr, encap) \
3680 ((u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3681 (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3682 (((u64)(encap) << ICE_FLOW_PROF_ENCAP_S) & ICE_FLOW_PROF_ENCAP_M)))
3685 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3687 u32 s = ((src % 4) << 3); /* byte shift */
3688 u32 v = dst | 0x80; /* value to program */
3689 u8 i = src / 4; /* register index */
3692 reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3693 reg = (reg & ~(0xff << s)) | (v << s);
3694 wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3698 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3701 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3704 for (i = 0; i < len; i++) {
3705 ice_rss_config_xor_word(hw, prof_id,
3706 /* Yes, field vector in GLQF_HSYMM and
3707 * GLQF_HINSET is inversed!
3709 fv_last_word - (src + i),
3710 fv_last_word - (dst + i));
3711 ice_rss_config_xor_word(hw, prof_id,
3712 fv_last_word - (dst + i),
3713 fv_last_word - (src + i));
3718 ice_rss_update_symm(struct ice_hw *hw,
3719 struct ice_flow_prof *prof)
3721 struct ice_prof_map *map;
3724 ice_acquire_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3725 map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3727 prof_id = map->prof_id;
3728 ice_release_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3731 /* clear to default */
3732 for (m = 0; m < 6; m++)
3733 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3734 if (prof->cfg.symm) {
3735 struct ice_flow_seg_info *seg =
3736 &prof->segs[prof->segs_cnt - 1];
3738 struct ice_flow_seg_xtrct *ipv4_src =
3739 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3740 struct ice_flow_seg_xtrct *ipv4_dst =
3741 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3742 struct ice_flow_seg_xtrct *ipv6_src =
3743 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3744 struct ice_flow_seg_xtrct *ipv6_dst =
3745 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3747 struct ice_flow_seg_xtrct *tcp_src =
3748 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3749 struct ice_flow_seg_xtrct *tcp_dst =
3750 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3752 struct ice_flow_seg_xtrct *udp_src =
3753 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3754 struct ice_flow_seg_xtrct *udp_dst =
3755 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3757 struct ice_flow_seg_xtrct *sctp_src =
3758 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3759 struct ice_flow_seg_xtrct *sctp_dst =
3760 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3763 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3764 ice_rss_config_xor(hw, prof_id,
3765 ipv4_src->idx, ipv4_dst->idx, 2);
3768 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3769 ice_rss_config_xor(hw, prof_id,
3770 ipv6_src->idx, ipv6_dst->idx, 8);
3773 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3774 ice_rss_config_xor(hw, prof_id,
3775 tcp_src->idx, tcp_dst->idx, 1);
3778 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3779 ice_rss_config_xor(hw, prof_id,
3780 udp_src->idx, udp_dst->idx, 1);
3783 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3784 ice_rss_config_xor(hw, prof_id,
3785 sctp_src->idx, sctp_dst->idx, 1);
3790 * ice_add_rss_cfg_sync - add an RSS configuration
3791 * @hw: pointer to the hardware structure
3792 * @vsi_handle: software VSI handle
3793 * @cfg: configure parameters
3795 * Assumption: lock has already been acquired for RSS list
3797 static enum ice_status
3798 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
3799 const struct ice_rss_hash_cfg *cfg)
3801 const enum ice_block blk = ICE_BLK_RSS;
3802 struct ice_flow_prof *prof = NULL;
3803 struct ice_flow_seg_info *segs;
3804 enum ice_status status;
3807 segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
3808 ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
3810 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3813 return ICE_ERR_NO_MEMORY;
3815 /* Construct the packet segment info from the hashed fields */
3816 status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
3820 /* Search for a flow profile that has matching headers, hash fields
3821 * and has the input VSI associated to it. If found, no further
3822 * operations required and exit.
3824 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3826 ICE_FLOW_FIND_PROF_CHK_FLDS |
3827 ICE_FLOW_FIND_PROF_CHK_VSI);
3829 if (prof->cfg.symm == cfg->symm)
3831 prof->cfg.symm = cfg->symm;
3835 /* Check if a flow profile exists with the same protocol headers and
3836 * associated with the input VSI. If so disassociate the VSI from
3837 * this profile. The VSI will be added to a new profile created with
3838 * the protocol header and new hash field configuration.
3840 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3841 vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3843 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3845 ice_rem_rss_list(hw, vsi_handle, prof);
3849 /* Remove profile if it has no VSIs associated */
3850 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3851 status = ice_flow_rem_prof(hw, blk, prof->id);
3857 /* Search for a profile that has same match fields only. If this
3858 * exists then associate the VSI to this profile.
3860 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3862 ICE_FLOW_FIND_PROF_CHK_FLDS);
3864 if (prof->cfg.symm == cfg->symm) {
3865 status = ice_flow_assoc_prof(hw, blk, prof,
3868 status = ice_add_rss_list(hw, vsi_handle,
3871 /* if a profile exist but with different symmetric
3872 * requirement, just return error.
3874 status = ICE_ERR_NOT_SUPPORTED;
3879 /* Create a new flow profile with generated profile and packet
3880 * segment information.
3882 status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3883 ICE_FLOW_GEN_PROFID(cfg->hash_flds,
3884 segs[segs_cnt - 1].hdrs,
3886 segs, segs_cnt, NULL, 0, &prof);
3890 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3891 /* If association to a new flow profile failed then this profile can
3895 ice_flow_rem_prof(hw, blk, prof->id);
3899 status = ice_add_rss_list(hw, vsi_handle, prof);
3901 prof->cfg.symm = cfg->symm;
3903 ice_rss_update_symm(hw, prof);
3911 * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3912 * @hw: pointer to the hardware structure
3913 * @vsi_handle: software VSI handle
3914 * @cfg: configure parameters
3916 * This function will generate a flow profile based on fields associated with
3917 * the input fields to hash on, the flow type and use the VSI number to add
3918 * a flow entry to the profile.
3921 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
3922 const struct ice_rss_hash_cfg *cfg)
3924 struct ice_rss_hash_cfg local_cfg;
3925 enum ice_status status;
3927 if (!ice_is_vsi_valid(hw, vsi_handle) ||
3928 !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
3929 cfg->hash_flds == ICE_HASH_INVALID)
3930 return ICE_ERR_PARAM;
3933 if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
3934 ice_acquire_lock(&hw->rss_locks);
3935 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3936 ice_release_lock(&hw->rss_locks);
3938 ice_acquire_lock(&hw->rss_locks);
3939 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
3940 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3942 local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
3943 status = ice_add_rss_cfg_sync(hw, vsi_handle,
3946 ice_release_lock(&hw->rss_locks);
3953 * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3954 * @hw: pointer to the hardware structure
3955 * @vsi_handle: software VSI handle
3956 * @cfg: configure parameters
3958 * Assumption: lock has already been acquired for RSS list
3960 static enum ice_status
3961 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
3962 const struct ice_rss_hash_cfg *cfg)
3964 const enum ice_block blk = ICE_BLK_RSS;
3965 struct ice_flow_seg_info *segs;
3966 struct ice_flow_prof *prof;
3967 enum ice_status status;
3970 segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
3971 ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
3972 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3975 return ICE_ERR_NO_MEMORY;
3977 /* Construct the packet segment info from the hashed fields */
3978 status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
3982 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3984 ICE_FLOW_FIND_PROF_CHK_FLDS);
3986 status = ICE_ERR_DOES_NOT_EXIST;
3990 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3994 /* Remove RSS configuration from VSI context before deleting
3997 ice_rem_rss_list(hw, vsi_handle, prof);
3999 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
4000 status = ice_flow_rem_prof(hw, blk, prof->id);
4008 * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
4009 * @hw: pointer to the hardware structure
4010 * @vsi_handle: software VSI handle
4011 * @cfg: configure parameters
4013 * This function will lookup the flow profile based on the input
4014 * hash field bitmap, iterate through the profile entry list of
4015 * that profile and find entry associated with input VSI to be
4016 * removed. Calls are made to underlying flow apis which will in
4017 * turn build or update buffers for RSS XLT1 section.
4020 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
4021 const struct ice_rss_hash_cfg *cfg)
4023 struct ice_rss_hash_cfg local_cfg;
4024 enum ice_status status;
4026 if (!ice_is_vsi_valid(hw, vsi_handle) ||
4027 !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
4028 cfg->hash_flds == ICE_HASH_INVALID)
4029 return ICE_ERR_PARAM;
4031 ice_acquire_lock(&hw->rss_locks);
4033 if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
4034 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4036 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
4037 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4040 local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
4041 status = ice_rem_rss_cfg_sync(hw, vsi_handle,
4045 ice_release_lock(&hw->rss_locks);
4051 * ice_replay_rss_cfg - replay RSS configurations associated with VSI
4052 * @hw: pointer to the hardware structure
4053 * @vsi_handle: software VSI handle
4055 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
4057 enum ice_status status = ICE_SUCCESS;
4058 struct ice_rss_cfg *r;
4060 if (!ice_is_vsi_valid(hw, vsi_handle))
4061 return ICE_ERR_PARAM;
4063 ice_acquire_lock(&hw->rss_locks);
4064 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
4065 ice_rss_cfg, l_entry) {
4066 if (ice_is_bit_set(r->vsis, vsi_handle)) {
4067 status = ice_add_rss_cfg_sync(hw, vsi_handle, &r->hash);
4072 ice_release_lock(&hw->rss_locks);
4078 * ice_get_rss_cfg - returns hashed fields for the given header types
4079 * @hw: pointer to the hardware structure
4080 * @vsi_handle: software VSI handle
4081 * @hdrs: protocol header type
4083 * This function will return the match fields of the first instance of flow
4084 * profile having the given header types and containing input VSI
4086 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
4088 u64 rss_hash = ICE_HASH_INVALID;
4089 struct ice_rss_cfg *r;
4091 /* verify if the protocol header is non zero and VSI is valid */
4092 if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
4093 return ICE_HASH_INVALID;
4095 ice_acquire_lock(&hw->rss_locks);
4096 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
4097 ice_rss_cfg, l_entry)
4098 if (ice_is_bit_set(r->vsis, vsi_handle) &&
4099 r->hash.addl_hdrs == hdrs) {
4100 rss_hash = r->hash.hash_flds;
4103 ice_release_lock(&hw->rss_locks);