1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
5 #include "ice_common.h"
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE 2
10 #define ICE_FLOW_FLD_SZ_VLAN 2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR 4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR 16
13 #define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
14 #define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
15 #define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
16 #define ICE_FLOW_FLD_SZ_IP_DSCP 1
17 #define ICE_FLOW_FLD_SZ_IP_TTL 1
18 #define ICE_FLOW_FLD_SZ_IP_PROT 1
19 #define ICE_FLOW_FLD_SZ_PORT 2
20 #define ICE_FLOW_FLD_SZ_TCP_FLAGS 1
21 #define ICE_FLOW_FLD_SZ_ICMP_TYPE 1
22 #define ICE_FLOW_FLD_SZ_ICMP_CODE 1
23 #define ICE_FLOW_FLD_SZ_ARP_OPER 2
24 #define ICE_FLOW_FLD_SZ_GRE_KEYID 4
25 #define ICE_FLOW_FLD_SZ_GTP_TEID 4
26 #define ICE_FLOW_FLD_SZ_GTP_QFI 2
27 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID 2
28 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
29 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID 4
30 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
31 #define ICE_FLOW_FLD_SZ_AH_SPI 4
32 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI 4
34 /* Describe properties of a protocol header field */
35 struct ice_flow_field_info {
36 enum ice_flow_seg_hdr hdr;
37 s16 off; /* Offset from start of a protocol header, in bits */
38 u16 size; /* Size of fields in bits */
39 u16 mask; /* 16-bit mask for field */
42 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
44 .off = (_offset_bytes) * BITS_PER_BYTE, \
45 .size = (_size_bytes) * BITS_PER_BYTE, \
49 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
51 .off = (_offset_bytes) * BITS_PER_BYTE, \
52 .size = (_size_bytes) * BITS_PER_BYTE, \
56 /* Table containing properties of supported protocol header fields */
58 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
60 /* ICE_FLOW_FIELD_IDX_ETH_DA */
61 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
62 /* ICE_FLOW_FIELD_IDX_ETH_SA */
63 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
64 /* ICE_FLOW_FIELD_IDX_S_VLAN */
65 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
66 /* ICE_FLOW_FIELD_IDX_C_VLAN */
67 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
68 /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
69 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
71 /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
72 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
74 /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
75 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
77 /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
78 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
79 ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
80 /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
81 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
82 ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
83 /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
84 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
85 ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
86 /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
87 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
88 ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
89 /* ICE_FLOW_FIELD_IDX_IPV4_SA */
90 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
91 /* ICE_FLOW_FIELD_IDX_IPV4_DA */
92 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
93 /* ICE_FLOW_FIELD_IDX_IPV6_SA */
94 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
95 /* ICE_FLOW_FIELD_IDX_IPV6_DA */
96 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
97 /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
98 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
99 ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
100 /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
101 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
102 ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
103 /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
104 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
105 ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
106 /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
107 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
108 ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
109 /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
110 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
111 ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
112 /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
113 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
114 ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
116 /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
117 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
118 /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
119 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
120 /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
121 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
122 /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
123 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
124 /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
125 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
126 /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
127 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
128 /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
129 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
131 /* ICE_FLOW_FIELD_IDX_ARP_SIP */
132 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
133 /* ICE_FLOW_FIELD_IDX_ARP_DIP */
134 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
135 /* ICE_FLOW_FIELD_IDX_ARP_SHA */
136 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
137 /* ICE_FLOW_FIELD_IDX_ARP_DHA */
138 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
139 /* ICE_FLOW_FIELD_IDX_ARP_OP */
140 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
142 /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
143 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
144 /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
145 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
147 /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
148 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
150 /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
151 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
152 ICE_FLOW_FLD_SZ_GTP_TEID),
153 /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
154 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
155 ICE_FLOW_FLD_SZ_GTP_TEID),
156 /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
157 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
158 ICE_FLOW_FLD_SZ_GTP_TEID),
159 /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
160 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
161 ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
162 /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
163 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
164 ICE_FLOW_FLD_SZ_GTP_TEID),
165 /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
166 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
167 ICE_FLOW_FLD_SZ_GTP_TEID),
169 /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
170 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
171 ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
173 /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
174 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
175 ICE_FLOW_FLD_SZ_PFCP_SEID),
177 /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
178 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
179 ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
181 /* ICE_FLOW_FIELD_IDX_ESP_SPI */
182 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
183 ICE_FLOW_FLD_SZ_ESP_SPI),
185 /* ICE_FLOW_FIELD_IDX_AH_SPI */
186 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
187 ICE_FLOW_FLD_SZ_AH_SPI),
189 /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
190 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
191 ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
194 /* Bitmaps indicating relevant packet types for a particular protocol header
196 * Packet types for packets with an Outer/First/Single MAC header
198 static const u32 ice_ptypes_mac_ofos[] = {
199 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
200 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
201 0x00400000, 0x03FFF000, 0x7FFFFFE0, 0x00000000,
202 0x00000000, 0x00000000, 0x00000000, 0x00000000,
203 0x00000000, 0x00000000, 0x00000000, 0x00000000,
204 0x00000000, 0x00000000, 0x00000000, 0x00000000,
205 0x00000000, 0x00000000, 0x00000000, 0x00000000,
206 0x00000000, 0x00000000, 0x00000000, 0x00000000,
209 /* Packet types for packets with an Innermost/Last MAC VLAN header */
210 static const u32 ice_ptypes_macvlan_il[] = {
211 0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
212 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
213 0x00000000, 0x00000000, 0x00000000, 0x00000000,
214 0x00000000, 0x00000000, 0x00000000, 0x00000000,
215 0x00000000, 0x00000000, 0x00000000, 0x00000000,
216 0x00000000, 0x00000000, 0x00000000, 0x00000000,
217 0x00000000, 0x00000000, 0x00000000, 0x00000000,
218 0x00000000, 0x00000000, 0x00000000, 0x00000000,
221 /* Packet types for packets with an Outer/First/Single IPv4 header, does NOT
222 * include IPV4 other PTYPEs
224 static const u32 ice_ptypes_ipv4_ofos[] = {
225 0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
226 0x00000000, 0x00000155, 0x00000000, 0x00000000,
227 0x00000000, 0x000FC000, 0x00000000, 0x00000000,
228 0x00000000, 0x00000000, 0x00000000, 0x00000000,
229 0x00000000, 0x00000000, 0x00000000, 0x00000000,
230 0x00000000, 0x00000000, 0x00000000, 0x00000000,
231 0x00000000, 0x00000000, 0x00000000, 0x00000000,
232 0x00000000, 0x00000000, 0x00000000, 0x00000000,
235 /* Packet types for packets with an Outer/First/Single IPv4 header, includes
238 static const u32 ice_ptypes_ipv4_ofos_all[] = {
239 0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
240 0x00000000, 0x00000155, 0x00000000, 0x00000000,
241 0x00000000, 0x000FC000, 0x83E0F800, 0x00000101,
242 0x00000000, 0x00000000, 0x00000000, 0x00000000,
243 0x00000000, 0x00000000, 0x00000000, 0x00000000,
244 0x00000000, 0x00000000, 0x00000000, 0x00000000,
245 0x00000000, 0x00000000, 0x00000000, 0x00000000,
246 0x00000000, 0x00000000, 0x00000000, 0x00000000,
249 /* Packet types for packets with an Innermost/Last IPv4 header */
250 static const u32 ice_ptypes_ipv4_il[] = {
251 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
252 0x0000000E, 0x00000000, 0x00000000, 0x00000000,
253 0x00000000, 0x00000000, 0x001FF800, 0x00000000,
254 0x00000000, 0x00000000, 0x00000000, 0x00000000,
255 0x00000000, 0x00000000, 0x00000000, 0x00000000,
256 0x00000000, 0x00000000, 0x00000000, 0x00000000,
257 0x00000000, 0x00000000, 0x00000000, 0x00000000,
258 0x00000000, 0x00000000, 0x00000000, 0x00000000,
261 /* Packet types for packets with an Outer/First/Single IPv6 header, does NOT
262 * include IVP6 other PTYPEs
264 static const u32 ice_ptypes_ipv6_ofos[] = {
265 0x00000000, 0x00000000, 0x77000000, 0x10002000,
266 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
267 0x00000000, 0x03F00000, 0x00000000, 0x00000000,
268 0x00000000, 0x00000000, 0x00000000, 0x00000000,
269 0x00000000, 0x00000000, 0x00000000, 0x00000000,
270 0x00000000, 0x00000000, 0x00000000, 0x00000000,
271 0x00000000, 0x00000000, 0x00000000, 0x00000000,
272 0x00000000, 0x00000000, 0x00000000, 0x00000000,
275 /* Packet types for packets with an Outer/First/Single IPv6 header, includes
278 static const u32 ice_ptypes_ipv6_ofos_all[] = {
279 0x00000000, 0x00000000, 0x77000000, 0x10002000,
280 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
281 0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000206,
282 0x00000000, 0x00000000, 0x00000000, 0x00000000,
283 0x00000000, 0x00000000, 0x00000000, 0x00000000,
284 0x00000000, 0x00000000, 0x00000000, 0x00000000,
285 0x00000000, 0x00000000, 0x00000000, 0x00000000,
286 0x00000000, 0x00000000, 0x00000000, 0x00000000,
289 /* Packet types for packets with an Innermost/Last IPv6 header */
290 static const u32 ice_ptypes_ipv6_il[] = {
291 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
292 0x00000770, 0x00000000, 0x00000000, 0x00000000,
293 0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
294 0x00000000, 0x00000000, 0x00000000, 0x00000000,
295 0x00000000, 0x00000000, 0x00000000, 0x00000000,
296 0x00000000, 0x00000000, 0x00000000, 0x00000000,
297 0x00000000, 0x00000000, 0x00000000, 0x00000000,
298 0x00000000, 0x00000000, 0x00000000, 0x00000000,
301 /* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */
302 static const u32 ice_ipv4_ofos_no_l4[] = {
303 0x10C00000, 0x04000800, 0x00000000, 0x00000000,
304 0x00000000, 0x00000000, 0x00000000, 0x00000000,
305 0x00000000, 0x000cc000, 0x00000000, 0x00000000,
306 0x00000000, 0x00000000, 0x00000000, 0x00000000,
307 0x00000000, 0x00000000, 0x00000000, 0x00000000,
308 0x00000000, 0x00000000, 0x00000000, 0x00000000,
309 0x00000000, 0x00000000, 0x00000000, 0x00000000,
310 0x00000000, 0x00000000, 0x00000000, 0x00000000,
313 /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
314 static const u32 ice_ipv4_il_no_l4[] = {
315 0x60000000, 0x18043008, 0x80000002, 0x6010c021,
316 0x00000008, 0x00000000, 0x00000000, 0x00000000,
317 0x00000000, 0x00000000, 0x00139800, 0x00000000,
318 0x00000000, 0x00000000, 0x00000000, 0x00000000,
319 0x00000000, 0x00000000, 0x00000000, 0x00000000,
320 0x00000000, 0x00000000, 0x00000000, 0x00000000,
321 0x00000000, 0x00000000, 0x00000000, 0x00000000,
322 0x00000000, 0x00000000, 0x00000000, 0x00000000,
325 /* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */
326 static const u32 ice_ipv6_ofos_no_l4[] = {
327 0x00000000, 0x00000000, 0x43000000, 0x10002000,
328 0x00000000, 0x00000000, 0x00000000, 0x00000000,
329 0x00000000, 0x02300000, 0x00000000, 0x00000000,
330 0x00000000, 0x00000000, 0x00000000, 0x00000000,
331 0x00000000, 0x00000000, 0x00000000, 0x00000000,
332 0x00000000, 0x00000000, 0x00000000, 0x00000000,
333 0x00000000, 0x00000000, 0x00000000, 0x00000000,
334 0x00000000, 0x00000000, 0x00000000, 0x00000000,
337 /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
338 static const u32 ice_ipv6_il_no_l4[] = {
339 0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
340 0x00000430, 0x00000000, 0x00000000, 0x00000000,
341 0x00000000, 0x00000000, 0x4e600000, 0x00000000,
342 0x00000000, 0x00000000, 0x00000000, 0x00000000,
343 0x00000000, 0x00000000, 0x00000000, 0x00000000,
344 0x00000000, 0x00000000, 0x00000000, 0x00000000,
345 0x00000000, 0x00000000, 0x00000000, 0x00000000,
346 0x00000000, 0x00000000, 0x00000000, 0x00000000,
349 /* Packet types for packets with an Outermost/First ARP header */
350 static const u32 ice_ptypes_arp_of[] = {
351 0x00000800, 0x00000000, 0x00000000, 0x00000000,
352 0x00000000, 0x00000000, 0x00000000, 0x00000000,
353 0x00000000, 0x00000000, 0x00000000, 0x00000000,
354 0x00000000, 0x00000000, 0x00000000, 0x00000000,
355 0x00000000, 0x00000000, 0x00000000, 0x00000000,
356 0x00000000, 0x00000000, 0x00000000, 0x00000000,
357 0x00000000, 0x00000000, 0x00000000, 0x00000000,
358 0x00000000, 0x00000000, 0x00000000, 0x00000000,
361 /* UDP Packet types for non-tunneled packets or tunneled
362 * packets with inner UDP.
364 static const u32 ice_ptypes_udp_il[] = {
365 0x81000000, 0x20204040, 0x04000010, 0x80810102,
366 0x00000040, 0x00000000, 0x00000000, 0x00000000,
367 0x00000000, 0x00410000, 0x90842000, 0x00000007,
368 0x00000000, 0x00000000, 0x00000000, 0x00000000,
369 0x00000000, 0x00000000, 0x00000000, 0x00000000,
370 0x00000000, 0x00000000, 0x00000000, 0x00000000,
371 0x00000000, 0x00000000, 0x00000000, 0x00000000,
372 0x00000000, 0x00000000, 0x00000000, 0x00000000,
375 /* Packet types for packets with an Innermost/Last TCP header */
376 static const u32 ice_ptypes_tcp_il[] = {
377 0x04000000, 0x80810102, 0x10000040, 0x02040408,
378 0x00000102, 0x00000000, 0x00000000, 0x00000000,
379 0x00000000, 0x00820000, 0x21084000, 0x00000000,
380 0x00000000, 0x00000000, 0x00000000, 0x00000000,
381 0x00000000, 0x00000000, 0x00000000, 0x00000000,
382 0x00000000, 0x00000000, 0x00000000, 0x00000000,
383 0x00000000, 0x00000000, 0x00000000, 0x00000000,
384 0x00000000, 0x00000000, 0x00000000, 0x00000000,
387 /* Packet types for packets with an Innermost/Last SCTP header */
388 static const u32 ice_ptypes_sctp_il[] = {
389 0x08000000, 0x01020204, 0x20000081, 0x04080810,
390 0x00000204, 0x00000000, 0x00000000, 0x00000000,
391 0x00000000, 0x01040000, 0x00000000, 0x00000000,
392 0x00000000, 0x00000000, 0x00000000, 0x00000000,
393 0x00000000, 0x00000000, 0x00000000, 0x00000000,
394 0x00000000, 0x00000000, 0x00000000, 0x00000000,
395 0x00000000, 0x00000000, 0x00000000, 0x00000000,
396 0x00000000, 0x00000000, 0x00000000, 0x00000000,
399 /* Packet types for packets with an Outermost/First ICMP header */
400 static const u32 ice_ptypes_icmp_of[] = {
401 0x10000000, 0x00000000, 0x00000000, 0x00000000,
402 0x00000000, 0x00000000, 0x00000000, 0x00000000,
403 0x00000000, 0x00000000, 0x00000000, 0x00000000,
404 0x00000000, 0x00000000, 0x00000000, 0x00000000,
405 0x00000000, 0x00000000, 0x00000000, 0x00000000,
406 0x00000000, 0x00000000, 0x00000000, 0x00000000,
407 0x00000000, 0x00000000, 0x00000000, 0x00000000,
408 0x00000000, 0x00000000, 0x00000000, 0x00000000,
411 /* Packet types for packets with an Innermost/Last ICMP header */
412 static const u32 ice_ptypes_icmp_il[] = {
413 0x00000000, 0x02040408, 0x40000102, 0x08101020,
414 0x00000408, 0x00000000, 0x00000000, 0x00000000,
415 0x00000000, 0x00000000, 0x42108000, 0x00000000,
416 0x00000000, 0x00000000, 0x00000000, 0x00000000,
417 0x00000000, 0x00000000, 0x00000000, 0x00000000,
418 0x00000000, 0x00000000, 0x00000000, 0x00000000,
419 0x00000000, 0x00000000, 0x00000000, 0x00000000,
420 0x00000000, 0x00000000, 0x00000000, 0x00000000,
423 /* Packet types for packets with an Outermost/First GRE header */
424 static const u32 ice_ptypes_gre_of[] = {
425 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
426 0x0000017E, 0x00000000, 0x00000000, 0x00000000,
427 0x00000000, 0x00000000, 0x00000000, 0x00000000,
428 0x00000000, 0x00000000, 0x00000000, 0x00000000,
429 0x00000000, 0x00000000, 0x00000000, 0x00000000,
430 0x00000000, 0x00000000, 0x00000000, 0x00000000,
431 0x00000000, 0x00000000, 0x00000000, 0x00000000,
432 0x00000000, 0x00000000, 0x00000000, 0x00000000,
435 /* Packet types for packets with an Innermost/Last MAC header */
436 static const u32 ice_ptypes_mac_il[] = {
437 0x00000000, 0x00000000, 0x00000000, 0x00000000,
438 0x00000000, 0x00000000, 0x00000000, 0x00000000,
439 0x00000000, 0x00000000, 0x00000000, 0x00000000,
440 0x00000000, 0x00000000, 0x00000000, 0x00000000,
441 0x00000000, 0x00000000, 0x00000000, 0x00000000,
442 0x00000000, 0x00000000, 0x00000000, 0x00000000,
443 0x00000000, 0x00000000, 0x00000000, 0x00000000,
444 0x00000000, 0x00000000, 0x00000000, 0x00000000,
447 /* Packet types for GTPC */
448 static const u32 ice_ptypes_gtpc[] = {
449 0x00000000, 0x00000000, 0x00000000, 0x00000000,
450 0x00000000, 0x00000000, 0x00000000, 0x00000000,
451 0x00000000, 0x00000000, 0x00000180, 0x00000000,
452 0x00000000, 0x00000000, 0x00000000, 0x00000000,
453 0x00000000, 0x00000000, 0x00000000, 0x00000000,
454 0x00000000, 0x00000000, 0x00000000, 0x00000000,
455 0x00000000, 0x00000000, 0x00000000, 0x00000000,
456 0x00000000, 0x00000000, 0x00000000, 0x00000000,
459 /* Packet types for GTPC with TEID */
460 static const u32 ice_ptypes_gtpc_tid[] = {
461 0x00000000, 0x00000000, 0x00000000, 0x00000000,
462 0x00000000, 0x00000000, 0x00000000, 0x00000000,
463 0x00000000, 0x00000000, 0x00000060, 0x00000000,
464 0x00000000, 0x00000000, 0x00000000, 0x00000000,
465 0x00000000, 0x00000000, 0x00000000, 0x00000000,
466 0x00000000, 0x00000000, 0x00000000, 0x00000000,
467 0x00000000, 0x00000000, 0x00000000, 0x00000000,
468 0x00000000, 0x00000000, 0x00000000, 0x00000000,
471 /* Packet types for GTPU */
472 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
473 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
474 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
475 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
476 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
477 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
478 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
479 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
480 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
481 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
482 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
483 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
484 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
485 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
486 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
487 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
488 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
489 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
490 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
491 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
492 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
495 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
496 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
497 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
498 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
499 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
500 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
501 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
502 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
503 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
504 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
505 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
506 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
507 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
508 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
509 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
510 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
511 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
512 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
513 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
514 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
515 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
518 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
519 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
520 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
521 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
522 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
523 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
524 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
525 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
526 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
527 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
528 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
529 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
530 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
531 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
532 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
533 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
534 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
535 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
536 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
537 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
538 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
541 static const u32 ice_ptypes_gtpu[] = {
542 0x00000000, 0x00000000, 0x00000000, 0x00000000,
543 0x00000000, 0x00000000, 0x00000000, 0x00000000,
544 0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
545 0x00000000, 0x00000000, 0x00000000, 0x00000000,
546 0x00000000, 0x00000000, 0x00000000, 0x00000000,
547 0x00000000, 0x00000000, 0x00000000, 0x00000000,
548 0x00000000, 0x00000000, 0x00000000, 0x00000000,
549 0x00000000, 0x00000000, 0x00000000, 0x00000000,
552 /* Packet types for pppoe */
553 static const u32 ice_ptypes_pppoe[] = {
554 0x00000000, 0x00000000, 0x00000000, 0x00000000,
555 0x00000000, 0x00000000, 0x00000000, 0x00000000,
556 0x00000000, 0x03ffe000, 0x00000000, 0x00000000,
557 0x00000000, 0x00000000, 0x00000000, 0x00000000,
558 0x00000000, 0x00000000, 0x00000000, 0x00000000,
559 0x00000000, 0x00000000, 0x00000000, 0x00000000,
560 0x00000000, 0x00000000, 0x00000000, 0x00000000,
561 0x00000000, 0x00000000, 0x00000000, 0x00000000,
564 /* Packet types for packets with PFCP NODE header */
565 static const u32 ice_ptypes_pfcp_node[] = {
566 0x00000000, 0x00000000, 0x00000000, 0x00000000,
567 0x00000000, 0x00000000, 0x00000000, 0x00000000,
568 0x00000000, 0x00000000, 0x80000000, 0x00000002,
569 0x00000000, 0x00000000, 0x00000000, 0x00000000,
570 0x00000000, 0x00000000, 0x00000000, 0x00000000,
571 0x00000000, 0x00000000, 0x00000000, 0x00000000,
572 0x00000000, 0x00000000, 0x00000000, 0x00000000,
573 0x00000000, 0x00000000, 0x00000000, 0x00000000,
576 /* Packet types for packets with PFCP SESSION header */
577 static const u32 ice_ptypes_pfcp_session[] = {
578 0x00000000, 0x00000000, 0x00000000, 0x00000000,
579 0x00000000, 0x00000000, 0x00000000, 0x00000000,
580 0x00000000, 0x00000000, 0x00000000, 0x00000005,
581 0x00000000, 0x00000000, 0x00000000, 0x00000000,
582 0x00000000, 0x00000000, 0x00000000, 0x00000000,
583 0x00000000, 0x00000000, 0x00000000, 0x00000000,
584 0x00000000, 0x00000000, 0x00000000, 0x00000000,
585 0x00000000, 0x00000000, 0x00000000, 0x00000000,
588 /* Packet types for l2tpv3 */
589 static const u32 ice_ptypes_l2tpv3[] = {
590 0x00000000, 0x00000000, 0x00000000, 0x00000000,
591 0x00000000, 0x00000000, 0x00000000, 0x00000000,
592 0x00000000, 0x00000000, 0x00000000, 0x00000300,
593 0x00000000, 0x00000000, 0x00000000, 0x00000000,
594 0x00000000, 0x00000000, 0x00000000, 0x00000000,
595 0x00000000, 0x00000000, 0x00000000, 0x00000000,
596 0x00000000, 0x00000000, 0x00000000, 0x00000000,
597 0x00000000, 0x00000000, 0x00000000, 0x00000000,
600 /* Packet types for esp */
601 static const u32 ice_ptypes_esp[] = {
602 0x00000000, 0x00000000, 0x00000000, 0x00000000,
603 0x00000000, 0x00000003, 0x00000000, 0x00000000,
604 0x00000000, 0x00000000, 0x00000000, 0x00000000,
605 0x00000000, 0x00000000, 0x00000000, 0x00000000,
606 0x00000000, 0x00000000, 0x00000000, 0x00000000,
607 0x00000000, 0x00000000, 0x00000000, 0x00000000,
608 0x00000000, 0x00000000, 0x00000000, 0x00000000,
609 0x00000000, 0x00000000, 0x00000000, 0x00000000,
612 /* Packet types for ah */
613 static const u32 ice_ptypes_ah[] = {
614 0x00000000, 0x00000000, 0x00000000, 0x00000000,
615 0x00000000, 0x0000000C, 0x00000000, 0x00000000,
616 0x00000000, 0x00000000, 0x00000000, 0x00000000,
617 0x00000000, 0x00000000, 0x00000000, 0x00000000,
618 0x00000000, 0x00000000, 0x00000000, 0x00000000,
619 0x00000000, 0x00000000, 0x00000000, 0x00000000,
620 0x00000000, 0x00000000, 0x00000000, 0x00000000,
621 0x00000000, 0x00000000, 0x00000000, 0x00000000,
624 /* Packet types for packets with NAT_T ESP header */
625 static const u32 ice_ptypes_nat_t_esp[] = {
626 0x00000000, 0x00000000, 0x00000000, 0x00000000,
627 0x00000000, 0x00000030, 0x00000000, 0x00000000,
628 0x00000000, 0x00000000, 0x00000000, 0x00000000,
629 0x00000000, 0x00000000, 0x00000000, 0x00000000,
630 0x00000000, 0x00000000, 0x00000000, 0x00000000,
631 0x00000000, 0x00000000, 0x00000000, 0x00000000,
632 0x00000000, 0x00000000, 0x00000000, 0x00000000,
633 0x00000000, 0x00000000, 0x00000000, 0x00000000,
636 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
637 0x00000846, 0x00000000, 0x00000000, 0x00000000,
638 0x00000000, 0x00000000, 0x00000000, 0x00000000,
639 0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
640 0x00000000, 0x00000000, 0x00000000, 0x00000000,
641 0x00000000, 0x00000000, 0x00000000, 0x00000000,
642 0x00000000, 0x00000000, 0x00000000, 0x00000000,
643 0x00000000, 0x00000000, 0x00000000, 0x00000000,
644 0x00000000, 0x00000000, 0x00000000, 0x00000000,
647 /* Manage parameters and info. used during the creation of a flow profile */
648 struct ice_flow_prof_params {
650 u16 entry_length; /* # of bytes formatted entry will require */
652 struct ice_flow_prof *prof;
654 /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
655 * This will give us the direction flags.
657 struct ice_fv_word es[ICE_MAX_FV_WORDS];
658 /* attributes can be used to add attributes to a particular PTYPE */
659 const struct ice_ptype_attributes *attr;
662 u16 mask[ICE_MAX_FV_WORDS];
663 ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
666 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
667 (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
668 ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
669 ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
670 ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
671 ICE_FLOW_SEG_HDR_NAT_T_ESP)
673 #define ICE_FLOW_SEG_HDRS_L2_MASK \
674 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
675 #define ICE_FLOW_SEG_HDRS_L3_MASK \
676 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
677 ICE_FLOW_SEG_HDR_ARP)
678 #define ICE_FLOW_SEG_HDRS_L4_MASK \
679 (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
680 ICE_FLOW_SEG_HDR_SCTP)
681 /* mask for L4 protocols that are NOT part of IPV4/6 OTHER PTYPE groups */
682 #define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER \
683 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
686 * ice_flow_val_hdrs - validates packet segments for valid protocol headers
687 * @segs: array of one or more packet segments that describe the flow
688 * @segs_cnt: number of packet segments provided
690 static enum ice_status
691 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
695 for (i = 0; i < segs_cnt; i++) {
696 /* Multiple L3 headers */
697 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
698 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
699 return ICE_ERR_PARAM;
701 /* Multiple L4 headers */
702 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
703 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
704 return ICE_ERR_PARAM;
710 /* Sizes of fixed known protocol headers without header options */
711 #define ICE_FLOW_PROT_HDR_SZ_MAC 14
712 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
713 #define ICE_FLOW_PROT_HDR_SZ_IPV4 20
714 #define ICE_FLOW_PROT_HDR_SZ_IPV6 40
715 #define ICE_FLOW_PROT_HDR_SZ_ARP 28
716 #define ICE_FLOW_PROT_HDR_SZ_ICMP 8
717 #define ICE_FLOW_PROT_HDR_SZ_TCP 20
718 #define ICE_FLOW_PROT_HDR_SZ_UDP 8
719 #define ICE_FLOW_PROT_HDR_SZ_SCTP 12
722 * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
723 * @params: information about the flow to be processed
724 * @seg: index of packet segment whose header size is to be determined
726 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
731 sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
732 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
735 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
736 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
737 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
738 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
739 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
740 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
741 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
742 /* A L3 header is required if L4 is specified */
746 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
747 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
748 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
749 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
750 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
751 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
752 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
753 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
759 * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
760 * @params: information about the flow to be processed
762 * This function identifies the packet types associated with the protocol
763 * headers being present in packet segments of the specified flow profile.
765 static enum ice_status
766 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
768 struct ice_flow_prof *prof;
771 ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
776 for (i = 0; i < params->prof->segs_cnt; i++) {
777 const ice_bitmap_t *src;
780 hdrs = prof->segs[i].hdrs;
782 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
783 src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
784 (const ice_bitmap_t *)ice_ptypes_mac_il;
785 ice_and_bitmap(params->ptypes, params->ptypes, src,
789 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
790 src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
791 ice_and_bitmap(params->ptypes, params->ptypes, src,
795 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
796 ice_and_bitmap(params->ptypes, params->ptypes,
797 (const ice_bitmap_t *)ice_ptypes_arp_of,
801 if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
802 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
803 ice_and_bitmap(params->ptypes, params->ptypes, src,
806 if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
807 (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
809 (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_all :
810 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
811 ice_and_bitmap(params->ptypes, params->ptypes, src,
813 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
814 (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
816 (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_all :
817 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
818 ice_and_bitmap(params->ptypes, params->ptypes, src,
820 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
821 !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
822 src = !i ? (const ice_bitmap_t *)ice_ipv4_ofos_no_l4 :
823 (const ice_bitmap_t *)ice_ipv4_il_no_l4;
824 ice_and_bitmap(params->ptypes, params->ptypes, src,
826 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
827 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
828 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
829 ice_and_bitmap(params->ptypes, params->ptypes, src,
831 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
832 !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
833 src = !i ? (const ice_bitmap_t *)ice_ipv6_ofos_no_l4 :
834 (const ice_bitmap_t *)ice_ipv6_il_no_l4;
835 ice_and_bitmap(params->ptypes, params->ptypes, src,
837 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
838 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
839 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
840 ice_and_bitmap(params->ptypes, params->ptypes, src,
844 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
845 src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
846 ice_and_bitmap(params->ptypes, params->ptypes,
847 src, ICE_FLOW_PTYPE_MAX);
848 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
849 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
850 ice_and_bitmap(params->ptypes, params->ptypes, src,
853 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
854 ice_andnot_bitmap(params->ptypes, params->ptypes, src,
858 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
859 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
860 ice_and_bitmap(params->ptypes, params->ptypes, src,
862 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
863 ice_and_bitmap(params->ptypes, params->ptypes,
864 (const ice_bitmap_t *)ice_ptypes_tcp_il,
866 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
867 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
868 ice_and_bitmap(params->ptypes, params->ptypes, src,
872 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
873 src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
874 (const ice_bitmap_t *)ice_ptypes_icmp_il;
875 ice_and_bitmap(params->ptypes, params->ptypes, src,
877 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
879 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
880 ice_and_bitmap(params->ptypes, params->ptypes,
881 src, ICE_FLOW_PTYPE_MAX);
883 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
884 src = (const ice_bitmap_t *)ice_ptypes_gtpc;
885 ice_and_bitmap(params->ptypes, params->ptypes,
886 src, ICE_FLOW_PTYPE_MAX);
887 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
888 src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
889 ice_and_bitmap(params->ptypes, params->ptypes,
890 src, ICE_FLOW_PTYPE_MAX);
891 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
892 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
893 ice_and_bitmap(params->ptypes, params->ptypes,
894 src, ICE_FLOW_PTYPE_MAX);
896 /* Attributes for GTP packet with downlink */
897 params->attr = ice_attr_gtpu_down;
898 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
899 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
900 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
901 ice_and_bitmap(params->ptypes, params->ptypes,
902 src, ICE_FLOW_PTYPE_MAX);
904 /* Attributes for GTP packet with uplink */
905 params->attr = ice_attr_gtpu_up;
906 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
907 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
908 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
909 ice_and_bitmap(params->ptypes, params->ptypes,
910 src, ICE_FLOW_PTYPE_MAX);
912 /* Attributes for GTP packet with Extension Header */
913 params->attr = ice_attr_gtpu_eh;
914 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
915 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
916 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
917 ice_and_bitmap(params->ptypes, params->ptypes,
918 src, ICE_FLOW_PTYPE_MAX);
919 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
920 src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
921 ice_and_bitmap(params->ptypes, params->ptypes,
922 src, ICE_FLOW_PTYPE_MAX);
923 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
924 src = (const ice_bitmap_t *)ice_ptypes_esp;
925 ice_and_bitmap(params->ptypes, params->ptypes,
926 src, ICE_FLOW_PTYPE_MAX);
927 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
928 src = (const ice_bitmap_t *)ice_ptypes_ah;
929 ice_and_bitmap(params->ptypes, params->ptypes,
930 src, ICE_FLOW_PTYPE_MAX);
931 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
932 src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
933 ice_and_bitmap(params->ptypes, params->ptypes,
934 src, ICE_FLOW_PTYPE_MAX);
937 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
938 if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
940 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
943 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
945 ice_and_bitmap(params->ptypes, params->ptypes,
946 src, ICE_FLOW_PTYPE_MAX);
948 src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
949 ice_andnot_bitmap(params->ptypes, params->ptypes,
950 src, ICE_FLOW_PTYPE_MAX);
952 src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
953 ice_andnot_bitmap(params->ptypes, params->ptypes,
954 src, ICE_FLOW_PTYPE_MAX);
962 * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
963 * @hw: pointer to the HW struct
964 * @params: information about the flow to be processed
965 * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
967 * This function will allocate an extraction sequence entries for a DWORD size
968 * chunk of the packet flags.
970 static enum ice_status
971 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
972 struct ice_flow_prof_params *params,
973 enum ice_flex_mdid_pkt_flags flags)
975 u8 fv_words = hw->blk[params->blk].es.fvw;
978 /* Make sure the number of extraction sequence entries required does not
979 * exceed the block's capacity.
981 if (params->es_cnt >= fv_words)
982 return ICE_ERR_MAX_LIMIT;
984 /* some blocks require a reversed field vector layout */
985 if (hw->blk[params->blk].es.reverse)
986 idx = fv_words - params->es_cnt - 1;
988 idx = params->es_cnt;
990 params->es[idx].prot_id = ICE_PROT_META_ID;
991 params->es[idx].off = flags;
998 * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
999 * @hw: pointer to the HW struct
1000 * @params: information about the flow to be processed
1001 * @seg: packet segment index of the field to be extracted
1002 * @fld: ID of field to be extracted
1003 * @match: bitfield of all fields
1005 * This function determines the protocol ID, offset, and size of the given
1006 * field. It then allocates one or more extraction sequence entries for the
1007 * given field, and fill the entries with protocol ID and offset information.
1009 static enum ice_status
1010 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
1011 u8 seg, enum ice_flow_field fld, u64 match)
1013 enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
1014 enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
1015 u8 fv_words = hw->blk[params->blk].es.fvw;
1016 struct ice_flow_fld_info *flds;
1017 u16 cnt, ese_bits, i;
1022 flds = params->prof->segs[seg].fields;
1025 case ICE_FLOW_FIELD_IDX_ETH_DA:
1026 case ICE_FLOW_FIELD_IDX_ETH_SA:
1027 case ICE_FLOW_FIELD_IDX_S_VLAN:
1028 case ICE_FLOW_FIELD_IDX_C_VLAN:
1029 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
1031 case ICE_FLOW_FIELD_IDX_ETH_TYPE:
1032 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
1034 case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
1035 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1037 case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
1038 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1040 case ICE_FLOW_FIELD_IDX_IPV4_TTL:
1041 case ICE_FLOW_FIELD_IDX_IPV4_PROT:
1042 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1044 /* TTL and PROT share the same extraction seq. entry.
1045 * Each is considered a sibling to the other in terms of sharing
1046 * the same extraction sequence entry.
1048 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
1049 sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
1050 else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
1051 sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
1053 /* If the sibling field is also included, that field's
1054 * mask needs to be included.
1056 if (match & BIT(sib))
1057 sib_mask = ice_flds_info[sib].mask;
1059 case ICE_FLOW_FIELD_IDX_IPV6_TTL:
1060 case ICE_FLOW_FIELD_IDX_IPV6_PROT:
1061 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1063 /* TTL and PROT share the same extraction seq. entry.
1064 * Each is considered a sibling to the other in terms of sharing
1065 * the same extraction sequence entry.
1067 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
1068 sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
1069 else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
1070 sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
1072 /* If the sibling field is also included, that field's
1073 * mask needs to be included.
1075 if (match & BIT(sib))
1076 sib_mask = ice_flds_info[sib].mask;
1078 case ICE_FLOW_FIELD_IDX_IPV4_SA:
1079 case ICE_FLOW_FIELD_IDX_IPV4_DA:
1080 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1082 case ICE_FLOW_FIELD_IDX_IPV6_SA:
1083 case ICE_FLOW_FIELD_IDX_IPV6_DA:
1084 case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
1085 case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
1086 case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
1087 case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
1088 case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
1089 case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
1090 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1092 case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
1093 case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
1094 case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
1095 prot_id = ICE_PROT_TCP_IL;
1097 case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
1098 case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
1099 prot_id = ICE_PROT_UDP_IL_OR_S;
1101 case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
1102 case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
1103 prot_id = ICE_PROT_SCTP_IL;
1105 case ICE_FLOW_FIELD_IDX_GTPC_TEID:
1106 case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
1107 case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
1108 case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
1109 case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
1110 case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
1111 /* GTP is accessed through UDP OF protocol */
1112 prot_id = ICE_PROT_UDP_OF;
1114 case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
1115 prot_id = ICE_PROT_PPPOE;
1117 case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1118 prot_id = ICE_PROT_UDP_IL_OR_S;
1120 case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1121 prot_id = ICE_PROT_L2TPV3;
1123 case ICE_FLOW_FIELD_IDX_ESP_SPI:
1124 prot_id = ICE_PROT_ESP_F;
1126 case ICE_FLOW_FIELD_IDX_AH_SPI:
1127 prot_id = ICE_PROT_ESP_2;
1129 case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1130 prot_id = ICE_PROT_UDP_IL_OR_S;
1132 case ICE_FLOW_FIELD_IDX_ARP_SIP:
1133 case ICE_FLOW_FIELD_IDX_ARP_DIP:
1134 case ICE_FLOW_FIELD_IDX_ARP_SHA:
1135 case ICE_FLOW_FIELD_IDX_ARP_DHA:
1136 case ICE_FLOW_FIELD_IDX_ARP_OP:
1137 prot_id = ICE_PROT_ARP_OF;
1139 case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1140 case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1141 /* ICMP type and code share the same extraction seq. entry */
1142 prot_id = (params->prof->segs[seg].hdrs &
1143 ICE_FLOW_SEG_HDR_IPV4) ?
1144 ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1145 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1146 ICE_FLOW_FIELD_IDX_ICMP_CODE :
1147 ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1149 case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1150 prot_id = ICE_PROT_GRE_OF;
1153 return ICE_ERR_NOT_IMPL;
1156 /* Each extraction sequence entry is a word in size, and extracts a
1157 * word-aligned offset from a protocol header.
1159 ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1161 flds[fld].xtrct.prot_id = prot_id;
1162 flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1163 ICE_FLOW_FV_EXTRACT_SZ;
1164 flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
1165 flds[fld].xtrct.idx = params->es_cnt;
1166 flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1168 /* Adjust the next field-entry index after accommodating the number of
1169 * entries this field consumes
1171 cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1172 ice_flds_info[fld].size, ese_bits);
1174 /* Fill in the extraction sequence entries needed for this field */
1175 off = flds[fld].xtrct.off;
1176 mask = flds[fld].xtrct.mask;
1177 for (i = 0; i < cnt; i++) {
1178 /* Only consume an extraction sequence entry if there is no
1179 * sibling field associated with this field or the sibling entry
1180 * already extracts the word shared with this field.
1182 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1183 flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1184 flds[sib].xtrct.off != off) {
1187 /* Make sure the number of extraction sequence required
1188 * does not exceed the block's capability
1190 if (params->es_cnt >= fv_words)
1191 return ICE_ERR_MAX_LIMIT;
1193 /* some blocks require a reversed field vector layout */
1194 if (hw->blk[params->blk].es.reverse)
1195 idx = fv_words - params->es_cnt - 1;
1197 idx = params->es_cnt;
1199 params->es[idx].prot_id = prot_id;
1200 params->es[idx].off = off;
1201 params->mask[idx] = mask | sib_mask;
1205 off += ICE_FLOW_FV_EXTRACT_SZ;
1212 * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1213 * @hw: pointer to the HW struct
1214 * @params: information about the flow to be processed
1215 * @seg: index of packet segment whose raw fields are to be be extracted
1217 static enum ice_status
1218 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1225 if (!params->prof->segs[seg].raws_cnt)
1228 if (params->prof->segs[seg].raws_cnt >
1229 ARRAY_SIZE(params->prof->segs[seg].raws))
1230 return ICE_ERR_MAX_LIMIT;
1232 /* Offsets within the segment headers are not supported */
1233 hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1235 return ICE_ERR_PARAM;
1237 fv_words = hw->blk[params->blk].es.fvw;
1239 for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1240 struct ice_flow_seg_fld_raw *raw;
1243 raw = ¶ms->prof->segs[seg].raws[i];
1245 /* Storing extraction information */
1246 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1247 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1248 ICE_FLOW_FV_EXTRACT_SZ;
1249 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1251 raw->info.xtrct.idx = params->es_cnt;
1253 /* Determine the number of field vector entries this raw field
1256 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1257 (raw->info.src.last * BITS_PER_BYTE),
1258 (ICE_FLOW_FV_EXTRACT_SZ *
1260 off = raw->info.xtrct.off;
1261 for (j = 0; j < cnt; j++) {
1264 /* Make sure the number of extraction sequence required
1265 * does not exceed the block's capability
1267 if (params->es_cnt >= hw->blk[params->blk].es.count ||
1268 params->es_cnt >= ICE_MAX_FV_WORDS)
1269 return ICE_ERR_MAX_LIMIT;
1271 /* some blocks require a reversed field vector layout */
1272 if (hw->blk[params->blk].es.reverse)
1273 idx = fv_words - params->es_cnt - 1;
1275 idx = params->es_cnt;
1277 params->es[idx].prot_id = raw->info.xtrct.prot_id;
1278 params->es[idx].off = off;
1280 off += ICE_FLOW_FV_EXTRACT_SZ;
1288 * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1289 * @hw: pointer to the HW struct
1290 * @params: information about the flow to be processed
1292 * This function iterates through all matched fields in the given segments, and
1293 * creates an extraction sequence for the fields.
1295 static enum ice_status
1296 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1297 struct ice_flow_prof_params *params)
1299 enum ice_status status = ICE_SUCCESS;
1302 /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1305 if (params->blk == ICE_BLK_ACL) {
1306 status = ice_flow_xtract_pkt_flags(hw, params,
1307 ICE_RX_MDID_PKT_FLAGS_15_0);
1312 for (i = 0; i < params->prof->segs_cnt; i++) {
1313 u64 match = params->prof->segs[i].match;
1314 enum ice_flow_field j;
1316 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1317 const u64 bit = BIT_ULL(j);
1320 status = ice_flow_xtract_fld(hw, params, i, j,
1328 /* Process raw matching bytes */
1329 status = ice_flow_xtract_raws(hw, params, i);
1338 * ice_flow_sel_acl_scen - returns the specific scenario
1339 * @hw: pointer to the hardware structure
1340 * @params: information about the flow to be processed
1342 * This function will return the specific scenario based on the
1343 * params passed to it
1345 static enum ice_status
1346 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1348 /* Find the best-fit scenario for the provided match width */
1349 struct ice_acl_scen *cand_scen = NULL, *scen;
1352 return ICE_ERR_DOES_NOT_EXIST;
1354 /* Loop through each scenario and match against the scenario width
1355 * to select the specific scenario
1357 LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1358 if (scen->eff_width >= params->entry_length &&
1359 (!cand_scen || cand_scen->eff_width > scen->eff_width))
1362 return ICE_ERR_DOES_NOT_EXIST;
1364 params->prof->cfg.scen = cand_scen;
1370 * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1371 * @params: information about the flow to be processed
1373 static enum ice_status
1374 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1376 u16 index, i, range_idx = 0;
1378 index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1380 for (i = 0; i < params->prof->segs_cnt; i++) {
1381 struct ice_flow_seg_info *seg = ¶ms->prof->segs[i];
1382 u64 match = seg->match;
1385 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1386 struct ice_flow_fld_info *fld;
1387 const u64 bit = BIT_ULL(j);
1392 fld = &seg->fields[j];
1393 fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1395 if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1396 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1398 /* Range checking only supported for single
1401 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1403 BITS_PER_BYTE * 2) > 1)
1404 return ICE_ERR_PARAM;
1406 /* Ranges must define low and high values */
1407 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1408 fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1409 return ICE_ERR_PARAM;
1411 fld->entry.val = range_idx++;
1413 /* Store adjusted byte-length of field for later
1414 * use, taking into account potential
1415 * non-byte-aligned displacement
1417 fld->entry.last = DIVIDE_AND_ROUND_UP
1418 (ice_flds_info[j].size +
1419 (fld->xtrct.disp % BITS_PER_BYTE),
1421 fld->entry.val = index;
1422 index += fld->entry.last;
1428 for (j = 0; j < seg->raws_cnt; j++) {
1429 struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1431 raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1432 raw->info.entry.val = index;
1433 raw->info.entry.last = raw->info.src.last;
1434 index += raw->info.entry.last;
1438 /* Currently only support using the byte selection base, which only
1439 * allows for an effective entry size of 30 bytes. Reject anything
1442 if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1443 return ICE_ERR_PARAM;
1445 /* Only 8 range checkers per profile, reject anything trying to use
1448 if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1449 return ICE_ERR_PARAM;
1451 /* Store # bytes required for entry for later use */
1452 params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1458 * ice_flow_proc_segs - process all packet segments associated with a profile
1459 * @hw: pointer to the HW struct
1460 * @params: information about the flow to be processed
1462 static enum ice_status
1463 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1465 enum ice_status status;
1467 status = ice_flow_proc_seg_hdrs(params);
1471 status = ice_flow_create_xtrct_seq(hw, params);
1475 switch (params->blk) {
1478 status = ICE_SUCCESS;
1481 status = ice_flow_acl_def_entry_frmt(params);
1484 status = ice_flow_sel_acl_scen(hw, params);
1489 return ICE_ERR_NOT_IMPL;
1495 #define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001
1496 #define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002
1497 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004
1500 * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1501 * @hw: pointer to the HW struct
1502 * @blk: classification stage
1503 * @dir: flow direction
1504 * @segs: array of one or more packet segments that describe the flow
1505 * @segs_cnt: number of packet segments provided
1506 * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1507 * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1509 static struct ice_flow_prof *
1510 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1511 enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1512 u8 segs_cnt, u16 vsi_handle, u32 conds)
1514 struct ice_flow_prof *p, *prof = NULL;
1516 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1517 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1518 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1519 segs_cnt && segs_cnt == p->segs_cnt) {
1522 /* Check for profile-VSI association if specified */
1523 if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1524 ice_is_vsi_valid(hw, vsi_handle) &&
1525 !ice_is_bit_set(p->vsis, vsi_handle))
1528 /* Protocol headers must be checked. Matched fields are
1529 * checked if specified.
1531 for (i = 0; i < segs_cnt; i++)
1532 if (segs[i].hdrs != p->segs[i].hdrs ||
1533 ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1534 segs[i].match != p->segs[i].match))
1537 /* A match is found if all segments are matched */
1538 if (i == segs_cnt) {
1543 ice_release_lock(&hw->fl_profs_locks[blk]);
1549 * ice_flow_find_prof - Look up a profile matching headers and matched fields
1550 * @hw: pointer to the HW struct
1551 * @blk: classification stage
1552 * @dir: flow direction
1553 * @segs: array of one or more packet segments that describe the flow
1554 * @segs_cnt: number of packet segments provided
1557 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1558 struct ice_flow_seg_info *segs, u8 segs_cnt)
1560 struct ice_flow_prof *p;
1562 p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1563 ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1565 return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1569 * ice_flow_find_prof_id - Look up a profile with given profile ID
1570 * @hw: pointer to the HW struct
1571 * @blk: classification stage
1572 * @prof_id: unique ID to identify this flow profile
1574 static struct ice_flow_prof *
1575 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1577 struct ice_flow_prof *p;
1579 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1580 if (p->id == prof_id)
1587 * ice_dealloc_flow_entry - Deallocate flow entry memory
1588 * @hw: pointer to the HW struct
1589 * @entry: flow entry to be removed
1592 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1598 ice_free(hw, entry->entry);
1600 if (entry->range_buf) {
1601 ice_free(hw, entry->range_buf);
1602 entry->range_buf = NULL;
1606 ice_free(hw, entry->acts);
1608 entry->acts_cnt = 0;
1611 ice_free(hw, entry);
1614 #define ICE_ACL_INVALID_SCEN 0x3f
1617 * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
1618 * @hw: pointer to the hardware structure
1619 * @prof: pointer to flow profile
1620 * @buf: destination buffer function writes partial extraction sequence to
1622 * returns ICE_SUCCESS if no PF is associated to the given profile
1623 * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
1624 * returns other error code for real error
1626 static enum ice_status
1627 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1628 struct ice_aqc_acl_prof_generic_frmt *buf)
1630 enum ice_status status;
1633 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1637 status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1641 /* If all PF's associated scenarios are all 0 or all
1642 * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1643 * not been configured yet.
1645 if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1646 buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1647 buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1648 buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1651 if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1652 buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1653 buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1654 buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1655 buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1656 buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1657 buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1658 buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1661 return ICE_ERR_IN_USE;
1665 * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
1666 * @hw: pointer to the hardware structure
1667 * @acts: array of actions to be performed on a match
1668 * @acts_cnt: number of actions
1670 static enum ice_status
1671 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1676 for (i = 0; i < acts_cnt; i++) {
1677 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1678 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1679 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1680 struct ice_acl_cntrs cntrs;
1681 enum ice_status status;
1683 cntrs.bank = 0; /* Only bank0 for the moment */
1685 LE16_TO_CPU(acts[i].data.acl_act.value);
1687 LE16_TO_CPU(acts[i].data.acl_act.value);
1689 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1690 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1692 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1694 status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1703 * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
1704 * @hw: pointer to the hardware structure
1705 * @prof: pointer to flow profile
1707 * Disassociate the scenario from the profile for the PF of the VSI.
1709 static enum ice_status
1710 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1712 struct ice_aqc_acl_prof_generic_frmt buf;
1713 enum ice_status status = ICE_SUCCESS;
1716 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1718 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1722 status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1726 /* Clear scenario for this PF */
1727 buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1728 status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1734 * ice_flow_rem_entry_sync - Remove a flow entry
1735 * @hw: pointer to the HW struct
1736 * @blk: classification stage
1737 * @entry: flow entry to be removed
1739 static enum ice_status
1740 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1741 struct ice_flow_entry *entry)
1744 return ICE_ERR_BAD_PTR;
1746 if (blk == ICE_BLK_ACL) {
1747 enum ice_status status;
1750 return ICE_ERR_BAD_PTR;
1752 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1753 entry->scen_entry_idx);
1757 /* Checks if we need to release an ACL counter. */
1758 if (entry->acts_cnt && entry->acts)
1759 ice_flow_acl_free_act_cntr(hw, entry->acts,
1763 LIST_DEL(&entry->l_entry);
1765 ice_dealloc_flow_entry(hw, entry);
1771 * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1772 * @hw: pointer to the HW struct
1773 * @blk: classification stage
1774 * @dir: flow direction
1775 * @prof_id: unique ID to identify this flow profile
1776 * @segs: array of one or more packet segments that describe the flow
1777 * @segs_cnt: number of packet segments provided
1778 * @acts: array of default actions
1779 * @acts_cnt: number of default actions
1780 * @prof: stores the returned flow profile added
1782 * Assumption: the caller has acquired the lock to the profile list
1784 static enum ice_status
1785 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1786 enum ice_flow_dir dir, u64 prof_id,
1787 struct ice_flow_seg_info *segs, u8 segs_cnt,
1788 struct ice_flow_action *acts, u8 acts_cnt,
1789 struct ice_flow_prof **prof)
1791 struct ice_flow_prof_params params;
1792 enum ice_status status;
1795 if (!prof || (acts_cnt && !acts))
1796 return ICE_ERR_BAD_PTR;
1798 ice_memset(¶ms, 0, sizeof(params), ICE_NONDMA_MEM);
1799 params.prof = (struct ice_flow_prof *)
1800 ice_malloc(hw, sizeof(*params.prof));
1802 return ICE_ERR_NO_MEMORY;
1804 /* initialize extraction sequence to all invalid (0xff) */
1805 for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1806 params.es[i].prot_id = ICE_PROT_INVALID;
1807 params.es[i].off = ICE_FV_OFFSET_INVAL;
1811 params.prof->id = prof_id;
1812 params.prof->dir = dir;
1813 params.prof->segs_cnt = segs_cnt;
1815 /* Make a copy of the segments that need to be persistent in the flow
1818 for (i = 0; i < segs_cnt; i++)
1819 ice_memcpy(¶ms.prof->segs[i], &segs[i], sizeof(*segs),
1820 ICE_NONDMA_TO_NONDMA);
1822 /* Make a copy of the actions that need to be persistent in the flow
1826 params.prof->acts = (struct ice_flow_action *)
1827 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
1828 ICE_NONDMA_TO_NONDMA);
1830 if (!params.prof->acts) {
1831 status = ICE_ERR_NO_MEMORY;
1836 status = ice_flow_proc_segs(hw, ¶ms);
1838 ice_debug(hw, ICE_DBG_FLOW,
1839 "Error processing a flow's packet segments\n");
1843 /* Add a HW profile for this flow profile */
1844 status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes,
1845 params.attr, params.attr_cnt, params.es,
1848 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
1852 INIT_LIST_HEAD(¶ms.prof->entries);
1853 ice_init_lock(¶ms.prof->entries_lock);
1854 *prof = params.prof;
1858 if (params.prof->acts)
1859 ice_free(hw, params.prof->acts);
1860 ice_free(hw, params.prof);
1867 * ice_flow_rem_prof_sync - remove a flow profile
1868 * @hw: pointer to the hardware structure
1869 * @blk: classification stage
1870 * @prof: pointer to flow profile to remove
1872 * Assumption: the caller has acquired the lock to the profile list
1874 static enum ice_status
1875 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
1876 struct ice_flow_prof *prof)
1878 enum ice_status status;
1880 /* Remove all remaining flow entries before removing the flow profile */
1881 if (!LIST_EMPTY(&prof->entries)) {
1882 struct ice_flow_entry *e, *t;
1884 ice_acquire_lock(&prof->entries_lock);
1886 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
1888 status = ice_flow_rem_entry_sync(hw, blk, e);
1893 ice_release_lock(&prof->entries_lock);
1896 if (blk == ICE_BLK_ACL) {
1897 struct ice_aqc_acl_profile_ranges query_rng_buf;
1898 struct ice_aqc_acl_prof_generic_frmt buf;
1901 /* Disassociate the scenario from the profile for the PF */
1902 status = ice_flow_acl_disassoc_scen(hw, prof);
1906 /* Clear the range-checker if the profile ID is no longer
1909 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1910 if (status && status != ICE_ERR_IN_USE) {
1912 } else if (!status) {
1913 /* Clear the range-checker value for profile ID */
1914 ice_memset(&query_rng_buf, 0,
1915 sizeof(struct ice_aqc_acl_profile_ranges),
1918 status = ice_flow_get_hw_prof(hw, blk, prof->id,
1923 status = ice_prog_acl_prof_ranges(hw, prof_id,
1924 &query_rng_buf, NULL);
1930 /* Remove all hardware profiles associated with this flow profile */
1931 status = ice_rem_prof(hw, blk, prof->id);
1933 LIST_DEL(&prof->l_entry);
1934 ice_destroy_lock(&prof->entries_lock);
1936 ice_free(hw, prof->acts);
1944 * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
1945 * @buf: Destination buffer function writes partial xtrct sequence to
1946 * @info: Info about field
1949 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
1950 struct ice_flow_fld_info *info)
1955 src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
1956 info->xtrct.disp / BITS_PER_BYTE;
1957 dst = info->entry.val;
1958 for (i = 0; i < info->entry.last; i++)
1959 /* HW stores field vector words in LE, convert words back to BE
1960 * so constructed entries will end up in network order
1962 buf->byte_selection[dst++] = src++ ^ 1;
1966 * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
1967 * @hw: pointer to the hardware structure
1968 * @prof: pointer to flow profile
1970 static enum ice_status
1971 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
1973 struct ice_aqc_acl_prof_generic_frmt buf;
1974 struct ice_flow_fld_info *info;
1975 enum ice_status status;
1979 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1981 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1985 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1986 if (status && status != ICE_ERR_IN_USE)
1990 /* Program the profile dependent configuration. This is done
1991 * only once regardless of the number of PFs using that profile
1993 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1995 for (i = 0; i < prof->segs_cnt; i++) {
1996 struct ice_flow_seg_info *seg = &prof->segs[i];
1997 u64 match = seg->match;
2000 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
2001 const u64 bit = BIT_ULL(j);
2006 info = &seg->fields[j];
2008 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2009 buf.word_selection[info->entry.val] =
2012 ice_flow_acl_set_xtrct_seq_fld(&buf,
2018 for (j = 0; j < seg->raws_cnt; j++) {
2019 info = &seg->raws[j].info;
2020 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
2024 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
2025 ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
2029 /* Update the current PF */
2030 buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
2031 status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
2037 * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
2038 * @hw: pointer to the hardware structure
2039 * @blk: classification stage
2040 * @vsi_handle: software VSI handle
2041 * @vsig: target VSI group
2043 * Assumption: the caller has already verified that the VSI to
2044 * be added has the same characteristics as the VSIG and will
2045 * thereby have access to all resources added to that VSIG.
2048 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
2051 enum ice_status status;
2053 if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
2054 return ICE_ERR_PARAM;
2056 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2057 status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
2059 ice_release_lock(&hw->fl_profs_locks[blk]);
2065 * ice_flow_assoc_prof - associate a VSI with a flow profile
2066 * @hw: pointer to the hardware structure
2067 * @blk: classification stage
2068 * @prof: pointer to flow profile
2069 * @vsi_handle: software VSI handle
2071 * Assumption: the caller has acquired the lock to the profile list
2072 * and the software VSI handle has been validated
2074 static enum ice_status
2075 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
2076 struct ice_flow_prof *prof, u16 vsi_handle)
2078 enum ice_status status = ICE_SUCCESS;
2080 if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
2081 if (blk == ICE_BLK_ACL) {
2082 status = ice_flow_acl_set_xtrct_seq(hw, prof);
2086 status = ice_add_prof_id_flow(hw, blk,
2087 ice_get_hw_vsi_num(hw,
2091 ice_set_bit(vsi_handle, prof->vsis);
2093 ice_debug(hw, ICE_DBG_FLOW,
2094 "HW profile add failed, %d\n",
2102 * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
2103 * @hw: pointer to the hardware structure
2104 * @blk: classification stage
2105 * @prof: pointer to flow profile
2106 * @vsi_handle: software VSI handle
2108 * Assumption: the caller has acquired the lock to the profile list
2109 * and the software VSI handle has been validated
2111 static enum ice_status
2112 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
2113 struct ice_flow_prof *prof, u16 vsi_handle)
2115 enum ice_status status = ICE_SUCCESS;
2117 if (ice_is_bit_set(prof->vsis, vsi_handle)) {
2118 status = ice_rem_prof_id_flow(hw, blk,
2119 ice_get_hw_vsi_num(hw,
2123 ice_clear_bit(vsi_handle, prof->vsis);
2125 ice_debug(hw, ICE_DBG_FLOW,
2126 "HW profile remove failed, %d\n",
2134 * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2135 * @hw: pointer to the HW struct
2136 * @blk: classification stage
2137 * @dir: flow direction
2138 * @prof_id: unique ID to identify this flow profile
2139 * @segs: array of one or more packet segments that describe the flow
2140 * @segs_cnt: number of packet segments provided
2141 * @acts: array of default actions
2142 * @acts_cnt: number of default actions
2143 * @prof: stores the returned flow profile added
2146 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2147 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2148 struct ice_flow_action *acts, u8 acts_cnt,
2149 struct ice_flow_prof **prof)
2151 enum ice_status status;
2153 if (segs_cnt > ICE_FLOW_SEG_MAX)
2154 return ICE_ERR_MAX_LIMIT;
2157 return ICE_ERR_PARAM;
2160 return ICE_ERR_BAD_PTR;
2162 status = ice_flow_val_hdrs(segs, segs_cnt);
2166 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2168 status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2169 acts, acts_cnt, prof);
2171 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2173 ice_release_lock(&hw->fl_profs_locks[blk]);
2179 * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2180 * @hw: pointer to the HW struct
2181 * @blk: the block for which the flow profile is to be removed
2182 * @prof_id: unique ID of the flow profile to be removed
2185 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2187 struct ice_flow_prof *prof;
2188 enum ice_status status;
2190 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2192 prof = ice_flow_find_prof_id(hw, blk, prof_id);
2194 status = ICE_ERR_DOES_NOT_EXIST;
2198 /* prof becomes invalid after the call */
2199 status = ice_flow_rem_prof_sync(hw, blk, prof);
2202 ice_release_lock(&hw->fl_profs_locks[blk]);
2208 * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
2209 * @hw: pointer to the HW struct
2210 * @blk: classification stage
2211 * @prof_id: the profile ID handle
2212 * @hw_prof_id: pointer to variable to receive the HW profile ID
2215 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2218 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
2219 struct ice_prof_map *map;
2221 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
2222 map = ice_search_prof_id(hw, blk, prof_id);
2224 *hw_prof_id = map->prof_id;
2225 status = ICE_SUCCESS;
2227 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
2232 * ice_flow_find_entry - look for a flow entry using its unique ID
2233 * @hw: pointer to the HW struct
2234 * @blk: classification stage
2235 * @entry_id: unique ID to identify this flow entry
2237 * This function looks for the flow entry with the specified unique ID in all
2238 * flow profiles of the specified classification stage. If the entry is found,
2239 * and it returns the handle to the flow entry. Otherwise, it returns
2240 * ICE_FLOW_ENTRY_ID_INVAL.
2242 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2244 struct ice_flow_entry *found = NULL;
2245 struct ice_flow_prof *p;
2247 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2249 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2250 struct ice_flow_entry *e;
2252 ice_acquire_lock(&p->entries_lock);
2253 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2254 if (e->id == entry_id) {
2258 ice_release_lock(&p->entries_lock);
2264 ice_release_lock(&hw->fl_profs_locks[blk]);
2266 return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2270 * ice_flow_acl_check_actions - Checks the ACL rule's actions
2271 * @hw: pointer to the hardware structure
2272 * @acts: array of actions to be performed on a match
2273 * @acts_cnt: number of actions
2274 * @cnt_alloc: indicates if an ACL counter has been allocated.
2276 static enum ice_status
2277 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2278 u8 acts_cnt, bool *cnt_alloc)
2280 ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2283 ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2286 if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2287 return ICE_ERR_OUT_OF_RANGE;
2289 for (i = 0; i < acts_cnt; i++) {
2290 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2291 acts[i].type != ICE_FLOW_ACT_DROP &&
2292 acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2293 acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2296 /* If the caller want to add two actions of the same type, then
2297 * it is considered invalid configuration.
2299 if (ice_test_and_set_bit(acts[i].type, dup_check))
2300 return ICE_ERR_PARAM;
2303 /* Checks if ACL counters are needed. */
2304 for (i = 0; i < acts_cnt; i++) {
2305 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2306 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2307 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2308 struct ice_acl_cntrs cntrs;
2309 enum ice_status status;
2312 cntrs.bank = 0; /* Only bank0 for the moment */
2314 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2315 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2317 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2319 status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2322 /* Counter index within the bank */
2323 acts[i].data.acl_act.value =
2324 CPU_TO_LE16(cntrs.first_cntr);
2333 * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2334 * @fld: number of the given field
2335 * @info: info about field
2336 * @range_buf: range checker configuration buffer
2337 * @data: pointer to a data buffer containing flow entry's match values/masks
2338 * @range: Input/output param indicating which range checkers are being used
2341 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2342 struct ice_aqc_acl_profile_ranges *range_buf,
2343 u8 *data, u8 *range)
2347 /* If not specified, default mask is all bits in field */
2348 new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2349 BIT(ice_flds_info[fld].size) - 1 :
2350 (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2352 /* If the mask is 0, then we don't need to worry about this input
2353 * range checker value.
2357 (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2359 (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2360 u8 range_idx = info->entry.val;
2362 range_buf->checker_cfg[range_idx].low_boundary =
2363 CPU_TO_BE16(new_low);
2364 range_buf->checker_cfg[range_idx].high_boundary =
2365 CPU_TO_BE16(new_high);
2366 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2368 /* Indicate which range checker is being used */
2369 *range |= BIT(range_idx);
2374 * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2375 * @fld: number of the given field
2376 * @info: info about the field
2377 * @buf: buffer containing the entry
2378 * @dontcare: buffer containing don't care mask for entry
2379 * @data: pointer to a data buffer containing flow entry's match values/masks
2382 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2383 u8 *dontcare, u8 *data)
2385 u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2386 bool use_mask = false;
2389 src = info->src.val;
2390 mask = info->src.mask;
2391 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2392 disp = info->xtrct.disp % BITS_PER_BYTE;
2394 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2397 for (k = 0; k < info->entry.last; k++, dst++) {
2398 /* Add overflow bits from previous byte */
2399 buf[dst] = (tmp_s & 0xff00) >> 8;
2401 /* If mask is not valid, tmp_m is always zero, so just setting
2402 * dontcare to 0 (no masked bits). If mask is valid, pulls in
2403 * overflow bits of mask from prev byte
2405 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2407 /* If there is displacement, last byte will only contain
2408 * displaced data, but there is no more data to read from user
2409 * buffer, so skip so as not to potentially read beyond end of
2412 if (!disp || k < info->entry.last - 1) {
2413 /* Store shifted data to use in next byte */
2414 tmp_s = data[src++] << disp;
2416 /* Add current (shifted) byte */
2417 buf[dst] |= tmp_s & 0xff;
2419 /* Handle mask if valid */
2421 tmp_m = (~data[mask++] & 0xff) << disp;
2422 dontcare[dst] |= tmp_m & 0xff;
2427 /* Fill in don't care bits at beginning of field */
2429 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2430 for (k = 0; k < disp; k++)
2431 dontcare[dst] |= BIT(k);
2434 end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2436 /* Fill in don't care bits at end of field */
2438 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2439 info->entry.last - 1;
2440 for (k = end_disp; k < BITS_PER_BYTE; k++)
2441 dontcare[dst] |= BIT(k);
2446 * ice_flow_acl_frmt_entry - Format ACL entry
2447 * @hw: pointer to the hardware structure
2448 * @prof: pointer to flow profile
2449 * @e: pointer to the flow entry
2450 * @data: pointer to a data buffer containing flow entry's match values/masks
2451 * @acts: array of actions to be performed on a match
2452 * @acts_cnt: number of actions
2454 * Formats the key (and key_inverse) to be matched from the data passed in,
2455 * along with data from the flow profile. This key/key_inverse pair makes up
2456 * the 'entry' for an ACL flow entry.
2458 static enum ice_status
2459 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2460 struct ice_flow_entry *e, u8 *data,
2461 struct ice_flow_action *acts, u8 acts_cnt)
2463 u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2464 struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2465 enum ice_status status;
2470 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2474 /* Format the result action */
2476 status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2480 status = ICE_ERR_NO_MEMORY;
2482 e->acts = (struct ice_flow_action *)
2483 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2484 ICE_NONDMA_TO_NONDMA);
2489 e->acts_cnt = acts_cnt;
2491 /* Format the matching data */
2492 buf_sz = prof->cfg.scen->width;
2493 buf = (u8 *)ice_malloc(hw, buf_sz);
2497 dontcare = (u8 *)ice_malloc(hw, buf_sz);
2501 /* 'key' buffer will store both key and key_inverse, so must be twice
2504 key = (u8 *)ice_malloc(hw, buf_sz * 2);
2508 range_buf = (struct ice_aqc_acl_profile_ranges *)
2509 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2513 /* Set don't care mask to all 1's to start, will zero out used bytes */
2514 ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2516 for (i = 0; i < prof->segs_cnt; i++) {
2517 struct ice_flow_seg_info *seg = &prof->segs[i];
2518 u64 match = seg->match;
2521 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
2522 struct ice_flow_fld_info *info;
2523 const u64 bit = BIT_ULL(j);
2528 info = &seg->fields[j];
2530 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2531 ice_flow_acl_frmt_entry_range(j, info,
2535 ice_flow_acl_frmt_entry_fld(j, info, buf,
2541 for (j = 0; j < seg->raws_cnt; j++) {
2542 struct ice_flow_fld_info *info = &seg->raws[j].info;
2543 u16 dst, src, mask, k;
2544 bool use_mask = false;
2546 src = info->src.val;
2547 dst = info->entry.val -
2548 ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2549 mask = info->src.mask;
2551 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2554 for (k = 0; k < info->entry.last; k++, dst++) {
2555 buf[dst] = data[src++];
2557 dontcare[dst] = ~data[mask++];
2564 buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2565 dontcare[prof->cfg.scen->pid_idx] = 0;
2567 /* Format the buffer for direction flags */
2568 dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2570 if (prof->dir == ICE_FLOW_RX)
2571 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2574 buf[prof->cfg.scen->rng_chk_idx] = range;
2575 /* Mark any unused range checkers as don't care */
2576 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2577 e->range_buf = range_buf;
2579 ice_free(hw, range_buf);
2582 status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2588 e->entry_sz = buf_sz * 2;
2595 ice_free(hw, dontcare);
2600 if (status && range_buf) {
2601 ice_free(hw, range_buf);
2602 e->range_buf = NULL;
2605 if (status && e->acts) {
2606 ice_free(hw, e->acts);
2611 if (status && cnt_alloc)
2612 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2618 * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2619 * the compared data.
2620 * @prof: pointer to flow profile
2621 * @e: pointer to the comparing flow entry
2622 * @do_chg_action: decide if we want to change the ACL action
2623 * @do_add_entry: decide if we want to add the new ACL entry
2624 * @do_rem_entry: decide if we want to remove the current ACL entry
2626 * Find an ACL scenario entry that matches the compared data. In the same time,
2627 * this function also figure out:
2628 * a/ If we want to change the ACL action
2629 * b/ If we want to add the new ACL entry
2630 * c/ If we want to remove the current ACL entry
2632 static struct ice_flow_entry *
2633 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2634 struct ice_flow_entry *e, bool *do_chg_action,
2635 bool *do_add_entry, bool *do_rem_entry)
2637 struct ice_flow_entry *p, *return_entry = NULL;
2641 * a/ There exists an entry with same matching data, but different
2642 * priority, then we remove this existing ACL entry. Then, we
2643 * will add the new entry to the ACL scenario.
2644 * b/ There exists an entry with same matching data, priority, and
2645 * result action, then we do nothing
2646 * c/ There exists an entry with same matching data, priority, but
2647 * different, action, then do only change the action's entry.
2648 * d/ Else, we add this new entry to the ACL scenario.
2650 *do_chg_action = false;
2651 *do_add_entry = true;
2652 *do_rem_entry = false;
2653 LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2654 if (memcmp(p->entry, e->entry, p->entry_sz))
2657 /* From this point, we have the same matching_data. */
2658 *do_add_entry = false;
2661 if (p->priority != e->priority) {
2662 /* matching data && !priority */
2663 *do_add_entry = true;
2664 *do_rem_entry = true;
2668 /* From this point, we will have matching_data && priority */
2669 if (p->acts_cnt != e->acts_cnt)
2670 *do_chg_action = true;
2671 for (i = 0; i < p->acts_cnt; i++) {
2672 bool found_not_match = false;
2674 for (j = 0; j < e->acts_cnt; j++)
2675 if (memcmp(&p->acts[i], &e->acts[j],
2676 sizeof(struct ice_flow_action))) {
2677 found_not_match = true;
2681 if (found_not_match) {
2682 *do_chg_action = true;
2687 /* (do_chg_action = true) means :
2688 * matching_data && priority && !result_action
2689 * (do_chg_action = false) means :
2690 * matching_data && priority && result_action
2695 return return_entry;
2699 * ice_flow_acl_convert_to_acl_prior - Convert to ACL priority
2702 static enum ice_acl_entry_prior
2703 ice_flow_acl_convert_to_acl_prior(enum ice_flow_priority p)
2705 enum ice_acl_entry_prior acl_prior;
2708 case ICE_FLOW_PRIO_LOW:
2709 acl_prior = ICE_LOW;
2711 case ICE_FLOW_PRIO_NORMAL:
2712 acl_prior = ICE_NORMAL;
2714 case ICE_FLOW_PRIO_HIGH:
2715 acl_prior = ICE_HIGH;
2718 acl_prior = ICE_NORMAL;
2726 * ice_flow_acl_union_rng_chk - Perform union operation between two
2727 * range-range checker buffers
2728 * @dst_buf: pointer to destination range checker buffer
2729 * @src_buf: pointer to source range checker buffer
2731 * For this function, we do the union between dst_buf and src_buf
2732 * range checker buffer, and we will save the result back to dst_buf
2734 static enum ice_status
2735 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2736 struct ice_aqc_acl_profile_ranges *src_buf)
2740 if (!dst_buf || !src_buf)
2741 return ICE_ERR_BAD_PTR;
2743 for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2744 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2745 bool will_populate = false;
2747 in_data = &src_buf->checker_cfg[i];
2752 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2753 cfg_data = &dst_buf->checker_cfg[j];
2755 if (!cfg_data->mask ||
2756 !memcmp(cfg_data, in_data,
2757 sizeof(struct ice_acl_rng_data))) {
2758 will_populate = true;
2763 if (will_populate) {
2764 ice_memcpy(cfg_data, in_data,
2765 sizeof(struct ice_acl_rng_data),
2766 ICE_NONDMA_TO_NONDMA);
2768 /* No available slot left to program range checker */
2769 return ICE_ERR_MAX_LIMIT;
2777 * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2778 * @hw: pointer to the hardware structure
2779 * @prof: pointer to flow profile
2780 * @entry: double pointer to the flow entry
2782 * For this function, we will look at the current added entries in the
2783 * corresponding ACL scenario. Then, we will perform matching logic to
2784 * see if we want to add/modify/do nothing with this new entry.
2786 static enum ice_status
2787 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2788 struct ice_flow_entry **entry)
2790 bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2791 struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2792 struct ice_acl_act_entry *acts = NULL;
2793 struct ice_flow_entry *exist;
2794 enum ice_status status = ICE_SUCCESS;
2795 struct ice_flow_entry *e;
2798 if (!entry || !(*entry) || !prof)
2799 return ICE_ERR_BAD_PTR;
2803 do_chg_rng_chk = false;
2807 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2812 /* Query the current range-checker value in FW */
2813 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2817 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2818 sizeof(struct ice_aqc_acl_profile_ranges),
2819 ICE_NONDMA_TO_NONDMA);
2821 /* Generate the new range-checker value */
2822 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2826 /* Reconfigure the range check if the buffer is changed. */
2827 do_chg_rng_chk = false;
2828 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2829 sizeof(struct ice_aqc_acl_profile_ranges))) {
2830 status = ice_prog_acl_prof_ranges(hw, prof_id,
2831 &cfg_rng_buf, NULL);
2835 do_chg_rng_chk = true;
2839 /* Figure out if we want to (change the ACL action) and/or
2840 * (Add the new ACL entry) and/or (Remove the current ACL entry)
2842 exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
2843 &do_add_entry, &do_rem_entry);
2846 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
2851 /* Prepare the result action buffer */
2852 acts = (struct ice_acl_act_entry *)ice_calloc
2853 (hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
2854 for (i = 0; i < e->acts_cnt; i++)
2855 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
2856 sizeof(struct ice_acl_act_entry),
2857 ICE_NONDMA_TO_NONDMA);
2860 enum ice_acl_entry_prior prior;
2864 keys = (u8 *)e->entry;
2865 inverts = keys + (e->entry_sz / 2);
2866 prior = ice_flow_acl_convert_to_acl_prior(e->priority);
2868 status = ice_acl_add_entry(hw, prof->cfg.scen, prior, keys,
2869 inverts, acts, e->acts_cnt,
2874 e->scen_entry_idx = entry_idx;
2875 LIST_ADD(&e->l_entry, &prof->entries);
2877 if (do_chg_action) {
2878 /* For the action memory info, update the SW's copy of
2879 * exist entry with e's action memory info
2881 ice_free(hw, exist->acts);
2882 exist->acts_cnt = e->acts_cnt;
2883 exist->acts = (struct ice_flow_action *)
2884 ice_calloc(hw, exist->acts_cnt,
2885 sizeof(struct ice_flow_action));
2888 status = ICE_ERR_NO_MEMORY;
2892 ice_memcpy(exist->acts, e->acts,
2893 sizeof(struct ice_flow_action) * e->acts_cnt,
2894 ICE_NONDMA_TO_NONDMA);
2896 status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
2898 exist->scen_entry_idx);
2903 if (do_chg_rng_chk) {
2904 /* In this case, we want to update the range checker
2905 * information of the exist entry
2907 status = ice_flow_acl_union_rng_chk(exist->range_buf,
2913 /* As we don't add the new entry to our SW DB, deallocate its
2914 * memories, and return the exist entry to the caller
2916 ice_dealloc_flow_entry(hw, e);
2927 * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
2928 * @hw: pointer to the hardware structure
2929 * @prof: pointer to flow profile
2930 * @e: double pointer to the flow entry
2932 static enum ice_status
2933 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2934 struct ice_flow_entry **e)
2936 enum ice_status status;
2938 ice_acquire_lock(&prof->entries_lock);
2939 status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
2940 ice_release_lock(&prof->entries_lock);
2946 * ice_flow_add_entry - Add a flow entry
2947 * @hw: pointer to the HW struct
2948 * @blk: classification stage
2949 * @prof_id: ID of the profile to add a new flow entry to
2950 * @entry_id: unique ID to identify this flow entry
2951 * @vsi_handle: software VSI handle for the flow entry
2952 * @prio: priority of the flow entry
2953 * @data: pointer to a data buffer containing flow entry's match values/masks
2954 * @acts: arrays of actions to be performed on a match
2955 * @acts_cnt: number of actions
2956 * @entry_h: pointer to buffer that receives the new flow entry's handle
2959 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2960 u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
2961 void *data, struct ice_flow_action *acts, u8 acts_cnt,
2964 struct ice_flow_entry *e = NULL;
2965 struct ice_flow_prof *prof;
2966 enum ice_status status = ICE_SUCCESS;
2968 /* ACL entries must indicate an action */
2969 if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
2970 return ICE_ERR_PARAM;
2972 /* No flow entry data is expected for RSS */
2973 if (!entry_h || (!data && blk != ICE_BLK_RSS))
2974 return ICE_ERR_BAD_PTR;
2976 if (!ice_is_vsi_valid(hw, vsi_handle))
2977 return ICE_ERR_PARAM;
2979 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2981 prof = ice_flow_find_prof_id(hw, blk, prof_id);
2983 status = ICE_ERR_DOES_NOT_EXIST;
2985 /* Allocate memory for the entry being added and associate
2986 * the VSI to the found flow profile
2988 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
2990 status = ICE_ERR_NO_MEMORY;
2992 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
2995 ice_release_lock(&hw->fl_profs_locks[blk]);
3000 e->vsi_handle = vsi_handle;
3009 /* ACL will handle the entry management */
3010 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
3015 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
3021 status = ICE_ERR_NOT_IMPL;
3025 if (blk != ICE_BLK_ACL) {
3026 /* ACL will handle the entry management */
3027 ice_acquire_lock(&prof->entries_lock);
3028 LIST_ADD(&e->l_entry, &prof->entries);
3029 ice_release_lock(&prof->entries_lock);
3032 *entry_h = ICE_FLOW_ENTRY_HNDL(e);
3037 ice_free(hw, e->entry);
3045 * ice_flow_rem_entry - Remove a flow entry
3046 * @hw: pointer to the HW struct
3047 * @blk: classification stage
3048 * @entry_h: handle to the flow entry to be removed
3050 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
3053 struct ice_flow_entry *entry;
3054 struct ice_flow_prof *prof;
3055 enum ice_status status = ICE_SUCCESS;
3057 if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
3058 return ICE_ERR_PARAM;
3060 entry = ICE_FLOW_ENTRY_PTR((unsigned long)entry_h);
3062 /* Retain the pointer to the flow profile as the entry will be freed */
3066 ice_acquire_lock(&prof->entries_lock);
3067 status = ice_flow_rem_entry_sync(hw, blk, entry);
3068 ice_release_lock(&prof->entries_lock);
3075 * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
3076 * @seg: packet segment the field being set belongs to
3077 * @fld: field to be set
3078 * @field_type: type of the field
3079 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3080 * entry's input buffer
3081 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3083 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3084 * entry's input buffer
3086 * This helper function stores information of a field being matched, including
3087 * the type of the field and the locations of the value to match, the mask, and
3088 * and the upper-bound value in the start of the input buffer for a flow entry.
3089 * This function should only be used for fixed-size data structures.
3091 * This function also opportunistically determines the protocol headers to be
3092 * present based on the fields being set. Some fields cannot be used alone to
3093 * determine the protocol headers present. Sometimes, fields for particular
3094 * protocol headers are not matched. In those cases, the protocol headers
3095 * must be explicitly set.
3098 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3099 enum ice_flow_fld_match_type field_type, u16 val_loc,
3100 u16 mask_loc, u16 last_loc)
3102 u64 bit = BIT_ULL(fld);
3105 if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
3108 seg->fields[fld].type = field_type;
3109 seg->fields[fld].src.val = val_loc;
3110 seg->fields[fld].src.mask = mask_loc;
3111 seg->fields[fld].src.last = last_loc;
3113 ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
3117 * ice_flow_set_fld - specifies locations of field from entry's input buffer
3118 * @seg: packet segment the field being set belongs to
3119 * @fld: field to be set
3120 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3121 * entry's input buffer
3122 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3124 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3125 * entry's input buffer
3126 * @range: indicate if field being matched is to be in a range
3128 * This function specifies the locations, in the form of byte offsets from the
3129 * start of the input buffer for a flow entry, from where the value to match,
3130 * the mask value, and upper value can be extracted. These locations are then
3131 * stored in the flow profile. When adding a flow entry associated with the
3132 * flow profile, these locations will be used to quickly extract the values and
3133 * create the content of a match entry. This function should only be used for
3134 * fixed-size data structures.
3137 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3138 u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3140 enum ice_flow_fld_match_type t = range ?
3141 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3143 ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3147 * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3148 * @seg: packet segment the field being set belongs to
3149 * @fld: field to be set
3150 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3151 * entry's input buffer
3152 * @pref_loc: location of prefix value from entry's input buffer
3153 * @pref_sz: size of the location holding the prefix value
3155 * This function specifies the locations, in the form of byte offsets from the
3156 * start of the input buffer for a flow entry, from where the value to match
3157 * and the IPv4 prefix value can be extracted. These locations are then stored
3158 * in the flow profile. When adding flow entries to the associated flow profile,
3159 * these locations can be used to quickly extract the values to create the
3160 * content of a match entry. This function should only be used for fixed-size
3164 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3165 u16 val_loc, u16 pref_loc, u8 pref_sz)
3167 /* For this type of field, the "mask" location is for the prefix value's
3168 * location and the "last" location is for the size of the location of
3171 ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3172 pref_loc, (u16)pref_sz);
3176 * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3177 * @seg: packet segment the field being set belongs to
3178 * @off: offset of the raw field from the beginning of the segment in bytes
3179 * @len: length of the raw pattern to be matched
3180 * @val_loc: location of the value to match from entry's input buffer
3181 * @mask_loc: location of mask value from entry's input buffer
3183 * This function specifies the offset of the raw field to be match from the
3184 * beginning of the specified packet segment, and the locations, in the form of
3185 * byte offsets from the start of the input buffer for a flow entry, from where
3186 * the value to match and the mask value to be extracted. These locations are
3187 * then stored in the flow profile. When adding flow entries to the associated
3188 * flow profile, these locations can be used to quickly extract the values to
3189 * create the content of a match entry. This function should only be used for
3190 * fixed-size data structures.
3193 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3194 u16 val_loc, u16 mask_loc)
3196 if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3197 seg->raws[seg->raws_cnt].off = off;
3198 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3199 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3200 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3201 /* The "last" field is used to store the length of the field */
3202 seg->raws[seg->raws_cnt].info.src.last = len;
3205 /* Overflows of "raws" will be handled as an error condition later in
3206 * the flow when this information is processed.
3211 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3212 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
3214 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3215 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3217 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3218 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
3219 ICE_FLOW_SEG_HDR_SCTP)
3221 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3222 (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3223 ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3224 ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3227 * ice_flow_set_rss_seg_info - setup packet segments for RSS
3228 * @segs: pointer to the flow field segment(s)
3229 * @hash_fields: fields to be hashed on for the segment(s)
3230 * @flow_hdr: protocol header fields within a packet segment
3232 * Helper function to extract fields from hash bitmap and use flow
3233 * header value to set flow field segment for further use in flow
3234 * profile entry or removal.
3236 static enum ice_status
3237 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
3240 u64 val = hash_fields;
3243 for (i = 0; val && i < ICE_FLOW_FIELD_IDX_MAX; i++) {
3244 u64 bit = BIT_ULL(i);
3247 ice_flow_set_fld(segs, (enum ice_flow_field)i,
3248 ICE_FLOW_FLD_OFF_INVAL,
3249 ICE_FLOW_FLD_OFF_INVAL,
3250 ICE_FLOW_FLD_OFF_INVAL, false);
3254 ICE_FLOW_SET_HDRS(segs, flow_hdr);
3256 if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3257 ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER)
3258 return ICE_ERR_PARAM;
3260 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3261 if (val && !ice_is_pow2(val))
3264 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3265 if (val && !ice_is_pow2(val))
3272 * ice_rem_vsi_rss_list - remove VSI from RSS list
3273 * @hw: pointer to the hardware structure
3274 * @vsi_handle: software VSI handle
3276 * Remove the VSI from all RSS configurations in the list.
3278 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3280 struct ice_rss_cfg *r, *tmp;
3282 if (LIST_EMPTY(&hw->rss_list_head))
3285 ice_acquire_lock(&hw->rss_locks);
3286 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3287 ice_rss_cfg, l_entry)
3288 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3289 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3290 LIST_DEL(&r->l_entry);
3293 ice_release_lock(&hw->rss_locks);
3297 * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3298 * @hw: pointer to the hardware structure
3299 * @vsi_handle: software VSI handle
3301 * This function will iterate through all flow profiles and disassociate
3302 * the VSI from that profile. If the flow profile has no VSIs it will
3305 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3307 const enum ice_block blk = ICE_BLK_RSS;
3308 struct ice_flow_prof *p, *t;
3309 enum ice_status status = ICE_SUCCESS;
3311 if (!ice_is_vsi_valid(hw, vsi_handle))
3312 return ICE_ERR_PARAM;
3314 if (LIST_EMPTY(&hw->fl_profs[blk]))
3317 ice_acquire_lock(&hw->rss_locks);
3318 LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3320 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3321 status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3325 if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3326 status = ice_flow_rem_prof(hw, blk, p->id);
3331 ice_release_lock(&hw->rss_locks);
3337 * ice_rem_rss_list - remove RSS configuration from list
3338 * @hw: pointer to the hardware structure
3339 * @vsi_handle: software VSI handle
3340 * @prof: pointer to flow profile
3342 * Assumption: lock has already been acquired for RSS list
3345 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3347 struct ice_rss_cfg *r, *tmp;
3349 /* Search for RSS hash fields associated to the VSI that match the
3350 * hash configurations associated to the flow profile. If found
3351 * remove from the RSS entry list of the VSI context and delete entry.
3353 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3354 ice_rss_cfg, l_entry)
3355 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3356 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3357 ice_clear_bit(vsi_handle, r->vsis);
3358 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3359 LIST_DEL(&r->l_entry);
3367 * ice_add_rss_list - add RSS configuration to list
3368 * @hw: pointer to the hardware structure
3369 * @vsi_handle: software VSI handle
3370 * @prof: pointer to flow profile
3372 * Assumption: lock has already been acquired for RSS list
3374 static enum ice_status
3375 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3377 struct ice_rss_cfg *r, *rss_cfg;
3379 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3380 ice_rss_cfg, l_entry)
3381 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3382 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3383 ice_set_bit(vsi_handle, r->vsis);
3387 rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3389 return ICE_ERR_NO_MEMORY;
3391 rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
3392 rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
3393 rss_cfg->symm = prof->cfg.symm;
3394 ice_set_bit(vsi_handle, rss_cfg->vsis);
3396 LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3401 #define ICE_FLOW_PROF_HASH_S 0
3402 #define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3403 #define ICE_FLOW_PROF_HDR_S 32
3404 #define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3405 #define ICE_FLOW_PROF_ENCAP_S 63
3406 #define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
3408 #define ICE_RSS_OUTER_HEADERS 1
3409 #define ICE_RSS_INNER_HEADERS 2
3411 /* Flow profile ID format:
3412 * [0:31] - Packet match fields
3413 * [32:62] - Protocol header
3414 * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
3416 #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
3417 (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3418 (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3419 ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
3422 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3424 u32 s = ((src % 4) << 3); /* byte shift */
3425 u32 v = dst | 0x80; /* value to program */
3426 u8 i = src / 4; /* register index */
3429 reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3430 reg = (reg & ~(0xff << s)) | (v << s);
3431 wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3435 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3438 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3441 for (i = 0; i < len; i++) {
3442 ice_rss_config_xor_word(hw, prof_id,
3443 /* Yes, field vector in GLQF_HSYMM and
3444 * GLQF_HINSET is inversed!
3446 fv_last_word - (src + i),
3447 fv_last_word - (dst + i));
3448 ice_rss_config_xor_word(hw, prof_id,
3449 fv_last_word - (dst + i),
3450 fv_last_word - (src + i));
3455 ice_rss_update_symm(struct ice_hw *hw,
3456 struct ice_flow_prof *prof)
3458 struct ice_prof_map *map;
3461 ice_acquire_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3462 map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3464 prof_id = map->prof_id;
3465 ice_release_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3468 /* clear to default */
3469 for (m = 0; m < 6; m++)
3470 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3471 if (prof->cfg.symm) {
3472 struct ice_flow_seg_info *seg =
3473 &prof->segs[prof->segs_cnt - 1];
3475 struct ice_flow_seg_xtrct *ipv4_src =
3476 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3477 struct ice_flow_seg_xtrct *ipv4_dst =
3478 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3479 struct ice_flow_seg_xtrct *ipv6_src =
3480 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3481 struct ice_flow_seg_xtrct *ipv6_dst =
3482 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3484 struct ice_flow_seg_xtrct *tcp_src =
3485 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3486 struct ice_flow_seg_xtrct *tcp_dst =
3487 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3489 struct ice_flow_seg_xtrct *udp_src =
3490 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3491 struct ice_flow_seg_xtrct *udp_dst =
3492 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3494 struct ice_flow_seg_xtrct *sctp_src =
3495 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3496 struct ice_flow_seg_xtrct *sctp_dst =
3497 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3500 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3501 ice_rss_config_xor(hw, prof_id,
3502 ipv4_src->idx, ipv4_dst->idx, 2);
3505 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3506 ice_rss_config_xor(hw, prof_id,
3507 ipv6_src->idx, ipv6_dst->idx, 8);
3510 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3511 ice_rss_config_xor(hw, prof_id,
3512 tcp_src->idx, tcp_dst->idx, 1);
3515 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3516 ice_rss_config_xor(hw, prof_id,
3517 udp_src->idx, udp_dst->idx, 1);
3520 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3521 ice_rss_config_xor(hw, prof_id,
3522 sctp_src->idx, sctp_dst->idx, 1);
3527 * ice_add_rss_cfg_sync - add an RSS configuration
3528 * @hw: pointer to the hardware structure
3529 * @vsi_handle: software VSI handle
3530 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3531 * @addl_hdrs: protocol header fields
3532 * @segs_cnt: packet segment count
3533 * @symm: symmetric hash enable/disable
3535 * Assumption: lock has already been acquired for RSS list
3537 static enum ice_status
3538 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3539 u32 addl_hdrs, u8 segs_cnt, bool symm)
3541 const enum ice_block blk = ICE_BLK_RSS;
3542 struct ice_flow_prof *prof = NULL;
3543 struct ice_flow_seg_info *segs;
3544 enum ice_status status;
3546 if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
3547 return ICE_ERR_PARAM;
3549 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3552 return ICE_ERR_NO_MEMORY;
3554 /* Construct the packet segment info from the hashed fields */
3555 status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3560 /* Search for a flow profile that has matching headers, hash fields
3561 * and has the input VSI associated to it. If found, no further
3562 * operations required and exit.
3564 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3566 ICE_FLOW_FIND_PROF_CHK_FLDS |
3567 ICE_FLOW_FIND_PROF_CHK_VSI);
3569 if (prof->cfg.symm == symm)
3571 prof->cfg.symm = symm;
3575 /* Check if a flow profile exists with the same protocol headers and
3576 * associated with the input VSI. If so disassociate the VSI from
3577 * this profile. The VSI will be added to a new profile created with
3578 * the protocol header and new hash field configuration.
3580 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3581 vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3583 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3585 ice_rem_rss_list(hw, vsi_handle, prof);
3589 /* Remove profile if it has no VSIs associated */
3590 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3591 status = ice_flow_rem_prof(hw, blk, prof->id);
3597 /* Search for a profile that has same match fields only. If this
3598 * exists then associate the VSI to this profile.
3600 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3602 ICE_FLOW_FIND_PROF_CHK_FLDS);
3604 if (prof->cfg.symm == symm) {
3605 status = ice_flow_assoc_prof(hw, blk, prof,
3608 status = ice_add_rss_list(hw, vsi_handle,
3611 /* if a profile exist but with different symmetric
3612 * requirement, just return error.
3614 status = ICE_ERR_NOT_SUPPORTED;
3619 /* Create a new flow profile with generated profile and packet
3620 * segment information.
3622 status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3623 ICE_FLOW_GEN_PROFID(hashed_flds,
3624 segs[segs_cnt - 1].hdrs,
3626 segs, segs_cnt, NULL, 0, &prof);
3630 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3631 /* If association to a new flow profile failed then this profile can
3635 ice_flow_rem_prof(hw, blk, prof->id);
3639 status = ice_add_rss_list(hw, vsi_handle, prof);
3641 prof->cfg.symm = symm;
3644 ice_rss_update_symm(hw, prof);
3652 * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3653 * @hw: pointer to the hardware structure
3654 * @vsi_handle: software VSI handle
3655 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3656 * @addl_hdrs: protocol header fields
3657 * @symm: symmetric hash enable/disable
3659 * This function will generate a flow profile based on fields associated with
3660 * the input fields to hash on, the flow type and use the VSI number to add
3661 * a flow entry to the profile.
3664 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3665 u32 addl_hdrs, bool symm)
3667 enum ice_status status;
3669 if (hashed_flds == ICE_HASH_INVALID ||
3670 !ice_is_vsi_valid(hw, vsi_handle))
3671 return ICE_ERR_PARAM;
3673 ice_acquire_lock(&hw->rss_locks);
3674 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3675 ICE_RSS_OUTER_HEADERS, symm);
3677 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3678 addl_hdrs, ICE_RSS_INNER_HEADERS,
3680 ice_release_lock(&hw->rss_locks);
3686 * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3687 * @hw: pointer to the hardware structure
3688 * @vsi_handle: software VSI handle
3689 * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3690 * @addl_hdrs: Protocol header fields within a packet segment
3691 * @segs_cnt: packet segment count
3693 * Assumption: lock has already been acquired for RSS list
3695 static enum ice_status
3696 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3697 u32 addl_hdrs, u8 segs_cnt)
3699 const enum ice_block blk = ICE_BLK_RSS;
3700 struct ice_flow_seg_info *segs;
3701 struct ice_flow_prof *prof;
3702 enum ice_status status;
3704 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3707 return ICE_ERR_NO_MEMORY;
3709 /* Construct the packet segment info from the hashed fields */
3710 status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3715 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3717 ICE_FLOW_FIND_PROF_CHK_FLDS);
3719 status = ICE_ERR_DOES_NOT_EXIST;
3723 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3727 /* Remove RSS configuration from VSI context before deleting
3730 ice_rem_rss_list(hw, vsi_handle, prof);
3732 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
3733 status = ice_flow_rem_prof(hw, blk, prof->id);
3741 * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
3742 * @hw: pointer to the hardware structure
3743 * @vsi_handle: software VSI handle
3744 * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3745 * @addl_hdrs: Protocol header fields within a packet segment
3747 * This function will lookup the flow profile based on the input
3748 * hash field bitmap, iterate through the profile entry list of
3749 * that profile and find entry associated with input VSI to be
3750 * removed. Calls are made to underlying flow apis which will in
3751 * turn build or update buffers for RSS XLT1 section.
3754 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3757 enum ice_status status;
3759 if (hashed_flds == ICE_HASH_INVALID ||
3760 !ice_is_vsi_valid(hw, vsi_handle))
3761 return ICE_ERR_PARAM;
3763 ice_acquire_lock(&hw->rss_locks);
3764 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3765 ICE_RSS_OUTER_HEADERS);
3767 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3768 addl_hdrs, ICE_RSS_INNER_HEADERS);
3769 ice_release_lock(&hw->rss_locks);
3775 * ice_replay_rss_cfg - replay RSS configurations associated with VSI
3776 * @hw: pointer to the hardware structure
3777 * @vsi_handle: software VSI handle
3779 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3781 enum ice_status status = ICE_SUCCESS;
3782 struct ice_rss_cfg *r;
3784 if (!ice_is_vsi_valid(hw, vsi_handle))
3785 return ICE_ERR_PARAM;
3787 ice_acquire_lock(&hw->rss_locks);
3788 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3789 ice_rss_cfg, l_entry) {
3790 if (ice_is_bit_set(r->vsis, vsi_handle)) {
3791 status = ice_add_rss_cfg_sync(hw, vsi_handle,
3794 ICE_RSS_OUTER_HEADERS,
3798 status = ice_add_rss_cfg_sync(hw, vsi_handle,
3801 ICE_RSS_INNER_HEADERS,
3807 ice_release_lock(&hw->rss_locks);
3813 * ice_get_rss_cfg - returns hashed fields for the given header types
3814 * @hw: pointer to the hardware structure
3815 * @vsi_handle: software VSI handle
3816 * @hdrs: protocol header type
3818 * This function will return the match fields of the first instance of flow
3819 * profile having the given header types and containing input VSI
3821 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
3823 u64 rss_hash = ICE_HASH_INVALID;
3824 struct ice_rss_cfg *r;
3826 /* verify if the protocol header is non zero and VSI is valid */
3827 if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
3828 return ICE_HASH_INVALID;
3830 ice_acquire_lock(&hw->rss_locks);
3831 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3832 ice_rss_cfg, l_entry)
3833 if (ice_is_bit_set(r->vsis, vsi_handle) &&
3834 r->packet_hdr == hdrs) {
3835 rss_hash = r->hashed_flds;
3838 ice_release_lock(&hw->rss_locks);