1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
5 #include "ice_common.h"
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE 2
10 #define ICE_FLOW_FLD_SZ_VLAN 2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR 4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR 16
13 #define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
14 #define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
15 #define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
16 #define ICE_FLOW_FLD_SZ_IP_DSCP 1
17 #define ICE_FLOW_FLD_SZ_IP_TTL 1
18 #define ICE_FLOW_FLD_SZ_IP_PROT 1
19 #define ICE_FLOW_FLD_SZ_PORT 2
20 #define ICE_FLOW_FLD_SZ_TCP_FLAGS 1
21 #define ICE_FLOW_FLD_SZ_ICMP_TYPE 1
22 #define ICE_FLOW_FLD_SZ_ICMP_CODE 1
23 #define ICE_FLOW_FLD_SZ_ARP_OPER 2
24 #define ICE_FLOW_FLD_SZ_GRE_KEYID 4
25 #define ICE_FLOW_FLD_SZ_GTP_TEID 4
26 #define ICE_FLOW_FLD_SZ_GTP_QFI 2
27 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID 2
28 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
29 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID 4
30 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
31 #define ICE_FLOW_FLD_SZ_AH_SPI 4
32 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI 4
34 /* Describe properties of a protocol header field */
35 struct ice_flow_field_info {
36 enum ice_flow_seg_hdr hdr;
37 s16 off; /* Offset from start of a protocol header, in bits */
38 u16 size; /* Size of fields in bits */
39 u16 mask; /* 16-bit mask for field */
42 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
44 .off = (_offset_bytes) * BITS_PER_BYTE, \
45 .size = (_size_bytes) * BITS_PER_BYTE, \
49 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
51 .off = (_offset_bytes) * BITS_PER_BYTE, \
52 .size = (_size_bytes) * BITS_PER_BYTE, \
56 /* Table containing properties of supported protocol header fields */
58 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
60 /* ICE_FLOW_FIELD_IDX_ETH_DA */
61 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
62 /* ICE_FLOW_FIELD_IDX_ETH_SA */
63 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
64 /* ICE_FLOW_FIELD_IDX_S_VLAN */
65 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
66 /* ICE_FLOW_FIELD_IDX_C_VLAN */
67 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
68 /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
69 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
71 /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
72 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
74 /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
75 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
77 /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
78 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
79 ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
80 /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
81 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
82 ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
83 /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
84 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
85 ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
86 /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
87 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
88 ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
89 /* ICE_FLOW_FIELD_IDX_IPV4_SA */
90 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
91 /* ICE_FLOW_FIELD_IDX_IPV4_DA */
92 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
93 /* ICE_FLOW_FIELD_IDX_IPV6_SA */
94 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
95 /* ICE_FLOW_FIELD_IDX_IPV6_DA */
96 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
97 /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
98 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
99 ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
100 /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
101 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
102 ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
103 /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
104 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
105 ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
106 /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
107 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
108 ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
109 /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
110 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
111 ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
112 /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
113 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
114 ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
116 /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
117 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
118 /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
119 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
120 /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
121 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
122 /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
123 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
124 /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
125 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
126 /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
127 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
128 /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
129 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
131 /* ICE_FLOW_FIELD_IDX_ARP_SIP */
132 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
133 /* ICE_FLOW_FIELD_IDX_ARP_DIP */
134 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
135 /* ICE_FLOW_FIELD_IDX_ARP_SHA */
136 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
137 /* ICE_FLOW_FIELD_IDX_ARP_DHA */
138 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
139 /* ICE_FLOW_FIELD_IDX_ARP_OP */
140 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
142 /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
143 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
144 /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
145 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
147 /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
148 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
150 /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
151 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
152 ICE_FLOW_FLD_SZ_GTP_TEID),
153 /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
154 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
155 ICE_FLOW_FLD_SZ_GTP_TEID),
156 /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
157 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
158 ICE_FLOW_FLD_SZ_GTP_TEID),
159 /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
160 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
161 ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
162 /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
163 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
164 ICE_FLOW_FLD_SZ_GTP_TEID),
165 /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
166 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
167 ICE_FLOW_FLD_SZ_GTP_TEID),
169 /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
170 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
171 ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
173 /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
174 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
175 ICE_FLOW_FLD_SZ_PFCP_SEID),
177 /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
178 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
179 ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
181 /* ICE_FLOW_FIELD_IDX_ESP_SPI */
182 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
183 ICE_FLOW_FLD_SZ_ESP_SPI),
185 /* ICE_FLOW_FIELD_IDX_AH_SPI */
186 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
187 ICE_FLOW_FLD_SZ_AH_SPI),
189 /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
190 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
191 ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
194 /* Bitmaps indicating relevant packet types for a particular protocol header
196 * Packet types for packets with an Outer/First/Single MAC header
198 static const u32 ice_ptypes_mac_ofos[] = {
199 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
200 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
201 0x00400000, 0x03FFF000, 0x7FFFFFE0, 0x00000000,
202 0x00000000, 0x00000000, 0x00000000, 0x00000000,
203 0x00000000, 0x00000000, 0x00000000, 0x00000000,
204 0x00000000, 0x00000000, 0x00000000, 0x00000000,
205 0x00000000, 0x00000000, 0x00000000, 0x00000000,
206 0x00000000, 0x00000000, 0x00000000, 0x00000000,
209 /* Packet types for packets with an Innermost/Last MAC VLAN header */
210 static const u32 ice_ptypes_macvlan_il[] = {
211 0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
212 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
213 0x00000000, 0x00000000, 0x00000000, 0x00000000,
214 0x00000000, 0x00000000, 0x00000000, 0x00000000,
215 0x00000000, 0x00000000, 0x00000000, 0x00000000,
216 0x00000000, 0x00000000, 0x00000000, 0x00000000,
217 0x00000000, 0x00000000, 0x00000000, 0x00000000,
218 0x00000000, 0x00000000, 0x00000000, 0x00000000,
221 /* Packet types for packets with an Outer/First/Single IPv4 header, does NOT
222 * include IPV4 other PTYPEs
224 static const u32 ice_ptypes_ipv4_ofos[] = {
225 0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
226 0x00000000, 0x00000155, 0x00000000, 0x00000000,
227 0x00000000, 0x000FC000, 0x00000000, 0x00000000,
228 0x00000000, 0x00000000, 0x00000000, 0x00000000,
229 0x00000000, 0x00000000, 0x00000000, 0x00000000,
230 0x00000000, 0x00000000, 0x00000000, 0x00000000,
231 0x00000000, 0x00000000, 0x00000000, 0x00000000,
232 0x00000000, 0x00000000, 0x00000000, 0x00000000,
235 /* Packet types for packets with an Outer/First/Single IPv4 header, includes
238 static const u32 ice_ptypes_ipv4_ofos_all[] = {
239 0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
240 0x00000000, 0x00000155, 0x00000000, 0x00000000,
241 0x00000000, 0x000FC000, 0x83E0F800, 0x00000101,
242 0x00000000, 0x00000000, 0x00000000, 0x00000000,
243 0x00000000, 0x00000000, 0x00000000, 0x00000000,
244 0x00000000, 0x00000000, 0x00000000, 0x00000000,
245 0x00000000, 0x00000000, 0x00000000, 0x00000000,
246 0x00000000, 0x00000000, 0x00000000, 0x00000000,
249 /* Packet types for packets with an Innermost/Last IPv4 header */
250 static const u32 ice_ptypes_ipv4_il[] = {
251 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
252 0x0000000E, 0x00000000, 0x00000000, 0x00000000,
253 0x00000000, 0x00000000, 0x001FF800, 0x00000000,
254 0x00000000, 0x00000000, 0x00000000, 0x00000000,
255 0x00000000, 0x00000000, 0x00000000, 0x00000000,
256 0x00000000, 0x00000000, 0x00000000, 0x00000000,
257 0x00000000, 0x00000000, 0x00000000, 0x00000000,
258 0x00000000, 0x00000000, 0x00000000, 0x00000000,
261 /* Packet types for packets with an Outer/First/Single IPv6 header, does NOT
262 * include IVP6 other PTYPEs
264 static const u32 ice_ptypes_ipv6_ofos[] = {
265 0x00000000, 0x00000000, 0x77000000, 0x10002000,
266 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
267 0x00000000, 0x03F00000, 0x00000000, 0x00000000,
268 0x00000000, 0x00000000, 0x00000000, 0x00000000,
269 0x00000000, 0x00000000, 0x00000000, 0x00000000,
270 0x00000000, 0x00000000, 0x00000000, 0x00000000,
271 0x00000000, 0x00000000, 0x00000000, 0x00000000,
272 0x00000000, 0x00000000, 0x00000000, 0x00000000,
275 /* Packet types for packets with an Outer/First/Single IPv6 header, includes
278 static const u32 ice_ptypes_ipv6_ofos_all[] = {
279 0x00000000, 0x00000000, 0x77000000, 0x10002000,
280 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
281 0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000206,
282 0x00000000, 0x00000000, 0x00000000, 0x00000000,
283 0x00000000, 0x00000000, 0x00000000, 0x00000000,
284 0x00000000, 0x00000000, 0x00000000, 0x00000000,
285 0x00000000, 0x00000000, 0x00000000, 0x00000000,
286 0x00000000, 0x00000000, 0x00000000, 0x00000000,
289 /* Packet types for packets with an Innermost/Last IPv6 header */
290 static const u32 ice_ptypes_ipv6_il[] = {
291 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
292 0x00000770, 0x00000000, 0x00000000, 0x00000000,
293 0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
294 0x00000000, 0x00000000, 0x00000000, 0x00000000,
295 0x00000000, 0x00000000, 0x00000000, 0x00000000,
296 0x00000000, 0x00000000, 0x00000000, 0x00000000,
297 0x00000000, 0x00000000, 0x00000000, 0x00000000,
298 0x00000000, 0x00000000, 0x00000000, 0x00000000,
301 /* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */
302 static const u32 ice_ipv4_ofos_no_l4[] = {
303 0x10C00000, 0x04000800, 0x00000000, 0x00000000,
304 0x00000000, 0x00000000, 0x00000000, 0x00000000,
305 0x00000000, 0x000cc000, 0x00000000, 0x00000000,
306 0x00000000, 0x00000000, 0x00000000, 0x00000000,
307 0x00000000, 0x00000000, 0x00000000, 0x00000000,
308 0x00000000, 0x00000000, 0x00000000, 0x00000000,
309 0x00000000, 0x00000000, 0x00000000, 0x00000000,
310 0x00000000, 0x00000000, 0x00000000, 0x00000000,
313 /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
314 static const u32 ice_ipv4_il_no_l4[] = {
315 0x60000000, 0x18043008, 0x80000002, 0x6010c021,
316 0x00000008, 0x00000000, 0x00000000, 0x00000000,
317 0x00000000, 0x00000000, 0x00139800, 0x00000000,
318 0x00000000, 0x00000000, 0x00000000, 0x00000000,
319 0x00000000, 0x00000000, 0x00000000, 0x00000000,
320 0x00000000, 0x00000000, 0x00000000, 0x00000000,
321 0x00000000, 0x00000000, 0x00000000, 0x00000000,
322 0x00000000, 0x00000000, 0x00000000, 0x00000000,
325 /* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */
326 static const u32 ice_ipv6_ofos_no_l4[] = {
327 0x00000000, 0x00000000, 0x43000000, 0x10002000,
328 0x00000000, 0x00000000, 0x00000000, 0x00000000,
329 0x00000000, 0x02300000, 0x00000000, 0x00000000,
330 0x00000000, 0x00000000, 0x00000000, 0x00000000,
331 0x00000000, 0x00000000, 0x00000000, 0x00000000,
332 0x00000000, 0x00000000, 0x00000000, 0x00000000,
333 0x00000000, 0x00000000, 0x00000000, 0x00000000,
334 0x00000000, 0x00000000, 0x00000000, 0x00000000,
337 /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
338 static const u32 ice_ipv6_il_no_l4[] = {
339 0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
340 0x00000430, 0x00000000, 0x00000000, 0x00000000,
341 0x00000000, 0x00000000, 0x4e600000, 0x00000000,
342 0x00000000, 0x00000000, 0x00000000, 0x00000000,
343 0x00000000, 0x00000000, 0x00000000, 0x00000000,
344 0x00000000, 0x00000000, 0x00000000, 0x00000000,
345 0x00000000, 0x00000000, 0x00000000, 0x00000000,
346 0x00000000, 0x00000000, 0x00000000, 0x00000000,
349 /* Packet types for packets with an Outermost/First ARP header */
350 static const u32 ice_ptypes_arp_of[] = {
351 0x00000800, 0x00000000, 0x00000000, 0x00000000,
352 0x00000000, 0x00000000, 0x00000000, 0x00000000,
353 0x00000000, 0x00000000, 0x00000000, 0x00000000,
354 0x00000000, 0x00000000, 0x00000000, 0x00000000,
355 0x00000000, 0x00000000, 0x00000000, 0x00000000,
356 0x00000000, 0x00000000, 0x00000000, 0x00000000,
357 0x00000000, 0x00000000, 0x00000000, 0x00000000,
358 0x00000000, 0x00000000, 0x00000000, 0x00000000,
361 /* UDP Packet types for non-tunneled packets or tunneled
362 * packets with inner UDP.
364 static const u32 ice_ptypes_udp_il[] = {
365 0x81000000, 0x20204040, 0x04000010, 0x80810102,
366 0x00000040, 0x00000000, 0x00000000, 0x00000000,
367 0x00000000, 0x00410000, 0x90842000, 0x00000007,
368 0x00000000, 0x00000000, 0x00000000, 0x00000000,
369 0x00000000, 0x00000000, 0x00000000, 0x00000000,
370 0x00000000, 0x00000000, 0x00000000, 0x00000000,
371 0x00000000, 0x00000000, 0x00000000, 0x00000000,
372 0x00000000, 0x00000000, 0x00000000, 0x00000000,
375 /* Packet types for packets with an Innermost/Last TCP header */
376 static const u32 ice_ptypes_tcp_il[] = {
377 0x04000000, 0x80810102, 0x10000040, 0x02040408,
378 0x00000102, 0x00000000, 0x00000000, 0x00000000,
379 0x00000000, 0x00820000, 0x21084000, 0x00000000,
380 0x00000000, 0x00000000, 0x00000000, 0x00000000,
381 0x00000000, 0x00000000, 0x00000000, 0x00000000,
382 0x00000000, 0x00000000, 0x00000000, 0x00000000,
383 0x00000000, 0x00000000, 0x00000000, 0x00000000,
384 0x00000000, 0x00000000, 0x00000000, 0x00000000,
387 /* Packet types for packets with an Innermost/Last SCTP header */
388 static const u32 ice_ptypes_sctp_il[] = {
389 0x08000000, 0x01020204, 0x20000081, 0x04080810,
390 0x00000204, 0x00000000, 0x00000000, 0x00000000,
391 0x00000000, 0x01040000, 0x00000000, 0x00000000,
392 0x00000000, 0x00000000, 0x00000000, 0x00000000,
393 0x00000000, 0x00000000, 0x00000000, 0x00000000,
394 0x00000000, 0x00000000, 0x00000000, 0x00000000,
395 0x00000000, 0x00000000, 0x00000000, 0x00000000,
396 0x00000000, 0x00000000, 0x00000000, 0x00000000,
399 /* Packet types for packets with an Outermost/First ICMP header */
400 static const u32 ice_ptypes_icmp_of[] = {
401 0x10000000, 0x00000000, 0x00000000, 0x00000000,
402 0x00000000, 0x00000000, 0x00000000, 0x00000000,
403 0x00000000, 0x00000000, 0x00000000, 0x00000000,
404 0x00000000, 0x00000000, 0x00000000, 0x00000000,
405 0x00000000, 0x00000000, 0x00000000, 0x00000000,
406 0x00000000, 0x00000000, 0x00000000, 0x00000000,
407 0x00000000, 0x00000000, 0x00000000, 0x00000000,
408 0x00000000, 0x00000000, 0x00000000, 0x00000000,
411 /* Packet types for packets with an Innermost/Last ICMP header */
412 static const u32 ice_ptypes_icmp_il[] = {
413 0x00000000, 0x02040408, 0x40000102, 0x08101020,
414 0x00000408, 0x00000000, 0x00000000, 0x00000000,
415 0x00000000, 0x00000000, 0x42108000, 0x00000000,
416 0x00000000, 0x00000000, 0x00000000, 0x00000000,
417 0x00000000, 0x00000000, 0x00000000, 0x00000000,
418 0x00000000, 0x00000000, 0x00000000, 0x00000000,
419 0x00000000, 0x00000000, 0x00000000, 0x00000000,
420 0x00000000, 0x00000000, 0x00000000, 0x00000000,
423 /* Packet types for packets with an Outermost/First GRE header */
424 static const u32 ice_ptypes_gre_of[] = {
425 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
426 0x0000017E, 0x00000000, 0x00000000, 0x00000000,
427 0x00000000, 0x00000000, 0x00000000, 0x00000000,
428 0x00000000, 0x00000000, 0x00000000, 0x00000000,
429 0x00000000, 0x00000000, 0x00000000, 0x00000000,
430 0x00000000, 0x00000000, 0x00000000, 0x00000000,
431 0x00000000, 0x00000000, 0x00000000, 0x00000000,
432 0x00000000, 0x00000000, 0x00000000, 0x00000000,
435 /* Packet types for packets with an Innermost/Last MAC header */
436 static const u32 ice_ptypes_mac_il[] = {
437 0x00000000, 0x00000000, 0x00000000, 0x00000000,
438 0x00000000, 0x00000000, 0x00000000, 0x00000000,
439 0x00000000, 0x00000000, 0x00000000, 0x00000000,
440 0x00000000, 0x00000000, 0x00000000, 0x00000000,
441 0x00000000, 0x00000000, 0x00000000, 0x00000000,
442 0x00000000, 0x00000000, 0x00000000, 0x00000000,
443 0x00000000, 0x00000000, 0x00000000, 0x00000000,
444 0x00000000, 0x00000000, 0x00000000, 0x00000000,
447 /* Packet types for GTPC */
448 static const u32 ice_ptypes_gtpc[] = {
449 0x00000000, 0x00000000, 0x00000000, 0x00000000,
450 0x00000000, 0x00000000, 0x00000000, 0x00000000,
451 0x00000000, 0x00000000, 0x00000180, 0x00000000,
452 0x00000000, 0x00000000, 0x00000000, 0x00000000,
453 0x00000000, 0x00000000, 0x00000000, 0x00000000,
454 0x00000000, 0x00000000, 0x00000000, 0x00000000,
455 0x00000000, 0x00000000, 0x00000000, 0x00000000,
456 0x00000000, 0x00000000, 0x00000000, 0x00000000,
459 /* Packet types for GTPC with TEID */
460 static const u32 ice_ptypes_gtpc_tid[] = {
461 0x00000000, 0x00000000, 0x00000000, 0x00000000,
462 0x00000000, 0x00000000, 0x00000000, 0x00000000,
463 0x00000000, 0x00000000, 0x00000060, 0x00000000,
464 0x00000000, 0x00000000, 0x00000000, 0x00000000,
465 0x00000000, 0x00000000, 0x00000000, 0x00000000,
466 0x00000000, 0x00000000, 0x00000000, 0x00000000,
467 0x00000000, 0x00000000, 0x00000000, 0x00000000,
468 0x00000000, 0x00000000, 0x00000000, 0x00000000,
471 /* Packet types for GTPU */
472 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
473 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
474 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
475 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
476 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
477 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
478 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
479 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
480 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
481 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
482 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
483 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
484 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
485 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
486 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
487 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
488 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
489 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
490 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
491 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
492 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
495 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
496 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
497 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
498 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
499 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
500 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
501 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
502 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
503 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
504 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
505 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
506 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
507 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
508 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
509 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
510 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
511 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
512 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
513 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
514 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
515 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
518 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
519 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
520 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
521 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
522 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
523 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
524 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
525 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
526 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
527 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
528 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
529 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
530 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
531 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
532 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
533 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
534 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
535 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
536 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
537 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
538 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
541 static const u32 ice_ptypes_gtpu[] = {
542 0x00000000, 0x00000000, 0x00000000, 0x00000000,
543 0x00000000, 0x00000000, 0x00000000, 0x00000000,
544 0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
545 0x00000000, 0x00000000, 0x00000000, 0x00000000,
546 0x00000000, 0x00000000, 0x00000000, 0x00000000,
547 0x00000000, 0x00000000, 0x00000000, 0x00000000,
548 0x00000000, 0x00000000, 0x00000000, 0x00000000,
549 0x00000000, 0x00000000, 0x00000000, 0x00000000,
552 /* Packet types for pppoe */
553 static const u32 ice_ptypes_pppoe[] = {
554 0x00000000, 0x00000000, 0x00000000, 0x00000000,
555 0x00000000, 0x00000000, 0x00000000, 0x00000000,
556 0x00000000, 0x03ffe000, 0x00000000, 0x00000000,
557 0x00000000, 0x00000000, 0x00000000, 0x00000000,
558 0x00000000, 0x00000000, 0x00000000, 0x00000000,
559 0x00000000, 0x00000000, 0x00000000, 0x00000000,
560 0x00000000, 0x00000000, 0x00000000, 0x00000000,
561 0x00000000, 0x00000000, 0x00000000, 0x00000000,
564 /* Packet types for packets with PFCP NODE header */
565 static const u32 ice_ptypes_pfcp_node[] = {
566 0x00000000, 0x00000000, 0x00000000, 0x00000000,
567 0x00000000, 0x00000000, 0x00000000, 0x00000000,
568 0x00000000, 0x00000000, 0x80000000, 0x00000002,
569 0x00000000, 0x00000000, 0x00000000, 0x00000000,
570 0x00000000, 0x00000000, 0x00000000, 0x00000000,
571 0x00000000, 0x00000000, 0x00000000, 0x00000000,
572 0x00000000, 0x00000000, 0x00000000, 0x00000000,
573 0x00000000, 0x00000000, 0x00000000, 0x00000000,
576 /* Packet types for packets with PFCP SESSION header */
577 static const u32 ice_ptypes_pfcp_session[] = {
578 0x00000000, 0x00000000, 0x00000000, 0x00000000,
579 0x00000000, 0x00000000, 0x00000000, 0x00000000,
580 0x00000000, 0x00000000, 0x00000000, 0x00000005,
581 0x00000000, 0x00000000, 0x00000000, 0x00000000,
582 0x00000000, 0x00000000, 0x00000000, 0x00000000,
583 0x00000000, 0x00000000, 0x00000000, 0x00000000,
584 0x00000000, 0x00000000, 0x00000000, 0x00000000,
585 0x00000000, 0x00000000, 0x00000000, 0x00000000,
588 /* Packet types for l2tpv3 */
589 static const u32 ice_ptypes_l2tpv3[] = {
590 0x00000000, 0x00000000, 0x00000000, 0x00000000,
591 0x00000000, 0x00000000, 0x00000000, 0x00000000,
592 0x00000000, 0x00000000, 0x00000000, 0x00000300,
593 0x00000000, 0x00000000, 0x00000000, 0x00000000,
594 0x00000000, 0x00000000, 0x00000000, 0x00000000,
595 0x00000000, 0x00000000, 0x00000000, 0x00000000,
596 0x00000000, 0x00000000, 0x00000000, 0x00000000,
597 0x00000000, 0x00000000, 0x00000000, 0x00000000,
600 /* Packet types for esp */
601 static const u32 ice_ptypes_esp[] = {
602 0x00000000, 0x00000000, 0x00000000, 0x00000000,
603 0x00000000, 0x00000003, 0x00000000, 0x00000000,
604 0x00000000, 0x00000000, 0x00000000, 0x00000000,
605 0x00000000, 0x00000000, 0x00000000, 0x00000000,
606 0x00000000, 0x00000000, 0x00000000, 0x00000000,
607 0x00000000, 0x00000000, 0x00000000, 0x00000000,
608 0x00000000, 0x00000000, 0x00000000, 0x00000000,
609 0x00000000, 0x00000000, 0x00000000, 0x00000000,
612 /* Packet types for ah */
613 static const u32 ice_ptypes_ah[] = {
614 0x00000000, 0x00000000, 0x00000000, 0x00000000,
615 0x00000000, 0x0000000C, 0x00000000, 0x00000000,
616 0x00000000, 0x00000000, 0x00000000, 0x00000000,
617 0x00000000, 0x00000000, 0x00000000, 0x00000000,
618 0x00000000, 0x00000000, 0x00000000, 0x00000000,
619 0x00000000, 0x00000000, 0x00000000, 0x00000000,
620 0x00000000, 0x00000000, 0x00000000, 0x00000000,
621 0x00000000, 0x00000000, 0x00000000, 0x00000000,
624 /* Packet types for packets with NAT_T ESP header */
625 static const u32 ice_ptypes_nat_t_esp[] = {
626 0x00000000, 0x00000000, 0x00000000, 0x00000000,
627 0x00000000, 0x00000030, 0x00000000, 0x00000000,
628 0x00000000, 0x00000000, 0x00000000, 0x00000000,
629 0x00000000, 0x00000000, 0x00000000, 0x00000000,
630 0x00000000, 0x00000000, 0x00000000, 0x00000000,
631 0x00000000, 0x00000000, 0x00000000, 0x00000000,
632 0x00000000, 0x00000000, 0x00000000, 0x00000000,
633 0x00000000, 0x00000000, 0x00000000, 0x00000000,
636 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
637 0x00000846, 0x00000000, 0x00000000, 0x00000000,
638 0x00000000, 0x00000000, 0x00000000, 0x00000000,
639 0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
640 0x00000000, 0x00000000, 0x00000000, 0x00000000,
641 0x00000000, 0x00000000, 0x00000000, 0x00000000,
642 0x00000000, 0x00000000, 0x00000000, 0x00000000,
643 0x00000000, 0x00000000, 0x00000000, 0x00000000,
644 0x00000000, 0x00000000, 0x00000000, 0x00000000,
647 /* Manage parameters and info. used during the creation of a flow profile */
648 struct ice_flow_prof_params {
650 u16 entry_length; /* # of bytes formatted entry will require */
652 struct ice_flow_prof *prof;
654 /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
655 * This will give us the direction flags.
657 struct ice_fv_word es[ICE_MAX_FV_WORDS];
658 /* attributes can be used to add attributes to a particular PTYPE */
659 const struct ice_ptype_attributes *attr;
662 u16 mask[ICE_MAX_FV_WORDS];
663 ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
666 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
667 (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
668 ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
669 ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
670 ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
671 ICE_FLOW_SEG_HDR_NAT_T_ESP)
673 #define ICE_FLOW_SEG_HDRS_L2_MASK \
674 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
675 #define ICE_FLOW_SEG_HDRS_L3_MASK \
676 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
677 ICE_FLOW_SEG_HDR_ARP)
678 #define ICE_FLOW_SEG_HDRS_L4_MASK \
679 (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
680 ICE_FLOW_SEG_HDR_SCTP)
681 /* mask for L4 protocols that are NOT part of IPV4/6 OTHER PTYPE groups */
682 #define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER \
683 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
686 * ice_flow_val_hdrs - validates packet segments for valid protocol headers
687 * @segs: array of one or more packet segments that describe the flow
688 * @segs_cnt: number of packet segments provided
690 static enum ice_status
691 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
695 for (i = 0; i < segs_cnt; i++) {
696 /* Multiple L3 headers */
697 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
698 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
699 return ICE_ERR_PARAM;
701 /* Multiple L4 headers */
702 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
703 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
704 return ICE_ERR_PARAM;
710 /* Sizes of fixed known protocol headers without header options */
711 #define ICE_FLOW_PROT_HDR_SZ_MAC 14
712 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
713 #define ICE_FLOW_PROT_HDR_SZ_IPV4 20
714 #define ICE_FLOW_PROT_HDR_SZ_IPV6 40
715 #define ICE_FLOW_PROT_HDR_SZ_ARP 28
716 #define ICE_FLOW_PROT_HDR_SZ_ICMP 8
717 #define ICE_FLOW_PROT_HDR_SZ_TCP 20
718 #define ICE_FLOW_PROT_HDR_SZ_UDP 8
719 #define ICE_FLOW_PROT_HDR_SZ_SCTP 12
722 * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
723 * @params: information about the flow to be processed
724 * @seg: index of packet segment whose header size is to be determined
726 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
731 sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
732 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
735 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
736 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
737 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
738 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
739 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
740 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
741 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
742 /* A L3 header is required if L4 is specified */
746 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
747 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
748 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
749 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
750 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
751 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
752 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
753 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
759 * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
760 * @params: information about the flow to be processed
762 * This function identifies the packet types associated with the protocol
763 * headers being present in packet segments of the specified flow profile.
765 static enum ice_status
766 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
768 struct ice_flow_prof *prof;
771 ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
776 for (i = 0; i < params->prof->segs_cnt; i++) {
777 const ice_bitmap_t *src;
780 hdrs = prof->segs[i].hdrs;
782 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
783 src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
784 (const ice_bitmap_t *)ice_ptypes_mac_il;
785 ice_and_bitmap(params->ptypes, params->ptypes, src,
789 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
790 src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
791 ice_and_bitmap(params->ptypes, params->ptypes, src,
795 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
796 ice_and_bitmap(params->ptypes, params->ptypes,
797 (const ice_bitmap_t *)ice_ptypes_arp_of,
801 if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
802 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
803 ice_and_bitmap(params->ptypes, params->ptypes, src,
806 if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
807 (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
809 (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_all :
810 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
811 ice_and_bitmap(params->ptypes, params->ptypes, src,
813 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
814 (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
816 (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_all :
817 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
818 ice_and_bitmap(params->ptypes, params->ptypes, src,
820 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
821 !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
822 src = !i ? (const ice_bitmap_t *)ice_ipv4_ofos_no_l4 :
823 (const ice_bitmap_t *)ice_ipv4_il_no_l4;
824 ice_and_bitmap(params->ptypes, params->ptypes, src,
826 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
827 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
828 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
829 ice_and_bitmap(params->ptypes, params->ptypes, src,
831 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
832 !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
833 src = !i ? (const ice_bitmap_t *)ice_ipv6_ofos_no_l4 :
834 (const ice_bitmap_t *)ice_ipv6_il_no_l4;
835 ice_and_bitmap(params->ptypes, params->ptypes, src,
837 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
838 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
839 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
840 ice_and_bitmap(params->ptypes, params->ptypes, src,
844 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
845 src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
846 ice_and_bitmap(params->ptypes, params->ptypes,
847 src, ICE_FLOW_PTYPE_MAX);
848 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
849 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
850 ice_and_bitmap(params->ptypes, params->ptypes, src,
853 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
854 ice_andnot_bitmap(params->ptypes, params->ptypes, src,
858 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
859 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
860 ice_and_bitmap(params->ptypes, params->ptypes, src,
862 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
863 ice_and_bitmap(params->ptypes, params->ptypes,
864 (const ice_bitmap_t *)ice_ptypes_tcp_il,
866 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
867 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
868 ice_and_bitmap(params->ptypes, params->ptypes, src,
872 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
873 src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
874 (const ice_bitmap_t *)ice_ptypes_icmp_il;
875 ice_and_bitmap(params->ptypes, params->ptypes, src,
877 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
879 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
880 ice_and_bitmap(params->ptypes, params->ptypes,
881 src, ICE_FLOW_PTYPE_MAX);
883 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
884 src = (const ice_bitmap_t *)ice_ptypes_gtpc;
885 ice_and_bitmap(params->ptypes, params->ptypes,
886 src, ICE_FLOW_PTYPE_MAX);
887 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
888 src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
889 ice_and_bitmap(params->ptypes, params->ptypes,
890 src, ICE_FLOW_PTYPE_MAX);
891 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
892 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
893 ice_and_bitmap(params->ptypes, params->ptypes,
894 src, ICE_FLOW_PTYPE_MAX);
896 /* Attributes for GTP packet with downlink */
897 params->attr = ice_attr_gtpu_down;
898 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
899 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
900 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
901 ice_and_bitmap(params->ptypes, params->ptypes,
902 src, ICE_FLOW_PTYPE_MAX);
904 /* Attributes for GTP packet with uplink */
905 params->attr = ice_attr_gtpu_up;
906 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
907 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
908 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
909 ice_and_bitmap(params->ptypes, params->ptypes,
910 src, ICE_FLOW_PTYPE_MAX);
912 /* Attributes for GTP packet with Extension Header */
913 params->attr = ice_attr_gtpu_eh;
914 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
915 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
916 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
917 ice_and_bitmap(params->ptypes, params->ptypes,
918 src, ICE_FLOW_PTYPE_MAX);
919 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
920 src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
921 ice_and_bitmap(params->ptypes, params->ptypes,
922 src, ICE_FLOW_PTYPE_MAX);
923 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
924 src = (const ice_bitmap_t *)ice_ptypes_esp;
925 ice_and_bitmap(params->ptypes, params->ptypes,
926 src, ICE_FLOW_PTYPE_MAX);
927 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
928 src = (const ice_bitmap_t *)ice_ptypes_ah;
929 ice_and_bitmap(params->ptypes, params->ptypes,
930 src, ICE_FLOW_PTYPE_MAX);
931 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
932 src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
933 ice_and_bitmap(params->ptypes, params->ptypes,
934 src, ICE_FLOW_PTYPE_MAX);
937 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
938 if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
940 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
943 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
945 ice_and_bitmap(params->ptypes, params->ptypes,
946 src, ICE_FLOW_PTYPE_MAX);
948 src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
949 ice_andnot_bitmap(params->ptypes, params->ptypes,
950 src, ICE_FLOW_PTYPE_MAX);
952 src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
953 ice_andnot_bitmap(params->ptypes, params->ptypes,
954 src, ICE_FLOW_PTYPE_MAX);
962 * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
963 * @hw: pointer to the HW struct
964 * @params: information about the flow to be processed
965 * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
967 * This function will allocate an extraction sequence entries for a DWORD size
968 * chunk of the packet flags.
970 static enum ice_status
971 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
972 struct ice_flow_prof_params *params,
973 enum ice_flex_mdid_pkt_flags flags)
975 u8 fv_words = hw->blk[params->blk].es.fvw;
978 /* Make sure the number of extraction sequence entries required does not
979 * exceed the block's capacity.
981 if (params->es_cnt >= fv_words)
982 return ICE_ERR_MAX_LIMIT;
984 /* some blocks require a reversed field vector layout */
985 if (hw->blk[params->blk].es.reverse)
986 idx = fv_words - params->es_cnt - 1;
988 idx = params->es_cnt;
990 params->es[idx].prot_id = ICE_PROT_META_ID;
991 params->es[idx].off = flags;
998 * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
999 * @hw: pointer to the HW struct
1000 * @params: information about the flow to be processed
1001 * @seg: packet segment index of the field to be extracted
1002 * @fld: ID of field to be extracted
1003 * @match: bitfield of all fields
1005 * This function determines the protocol ID, offset, and size of the given
1006 * field. It then allocates one or more extraction sequence entries for the
1007 * given field, and fill the entries with protocol ID and offset information.
1009 static enum ice_status
1010 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
1011 u8 seg, enum ice_flow_field fld, u64 match)
1013 enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
1014 enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
1015 u8 fv_words = hw->blk[params->blk].es.fvw;
1016 struct ice_flow_fld_info *flds;
1017 u16 cnt, ese_bits, i;
1022 flds = params->prof->segs[seg].fields;
1025 case ICE_FLOW_FIELD_IDX_ETH_DA:
1026 case ICE_FLOW_FIELD_IDX_ETH_SA:
1027 case ICE_FLOW_FIELD_IDX_S_VLAN:
1028 case ICE_FLOW_FIELD_IDX_C_VLAN:
1029 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
1031 case ICE_FLOW_FIELD_IDX_ETH_TYPE:
1032 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
1034 case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
1035 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1037 case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
1038 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1040 case ICE_FLOW_FIELD_IDX_IPV4_TTL:
1041 case ICE_FLOW_FIELD_IDX_IPV4_PROT:
1042 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1044 /* TTL and PROT share the same extraction seq. entry.
1045 * Each is considered a sibling to the other in terms of sharing
1046 * the same extraction sequence entry.
1048 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
1049 sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
1050 else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
1051 sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
1053 /* If the sibling field is also included, that field's
1054 * mask needs to be included.
1056 if (match & BIT(sib))
1057 sib_mask = ice_flds_info[sib].mask;
1059 case ICE_FLOW_FIELD_IDX_IPV6_TTL:
1060 case ICE_FLOW_FIELD_IDX_IPV6_PROT:
1061 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1063 /* TTL and PROT share the same extraction seq. entry.
1064 * Each is considered a sibling to the other in terms of sharing
1065 * the same extraction sequence entry.
1067 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
1068 sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
1069 else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
1070 sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
1072 /* If the sibling field is also included, that field's
1073 * mask needs to be included.
1075 if (match & BIT(sib))
1076 sib_mask = ice_flds_info[sib].mask;
1078 case ICE_FLOW_FIELD_IDX_IPV4_SA:
1079 case ICE_FLOW_FIELD_IDX_IPV4_DA:
1080 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1082 case ICE_FLOW_FIELD_IDX_IPV6_SA:
1083 case ICE_FLOW_FIELD_IDX_IPV6_DA:
1084 case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
1085 case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
1086 case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
1087 case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
1088 case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
1089 case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
1090 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1092 case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
1093 case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
1094 case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
1095 prot_id = ICE_PROT_TCP_IL;
1097 case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
1098 case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
1099 prot_id = ICE_PROT_UDP_IL_OR_S;
1101 case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
1102 case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
1103 prot_id = ICE_PROT_SCTP_IL;
1105 case ICE_FLOW_FIELD_IDX_GTPC_TEID:
1106 case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
1107 case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
1108 case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
1109 case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
1110 case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
1111 /* GTP is accessed through UDP OF protocol */
1112 prot_id = ICE_PROT_UDP_OF;
1114 case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
1115 prot_id = ICE_PROT_PPPOE;
1117 case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1118 prot_id = ICE_PROT_UDP_IL_OR_S;
1120 case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1121 prot_id = ICE_PROT_L2TPV3;
1123 case ICE_FLOW_FIELD_IDX_ESP_SPI:
1124 prot_id = ICE_PROT_ESP_F;
1126 case ICE_FLOW_FIELD_IDX_AH_SPI:
1127 prot_id = ICE_PROT_ESP_2;
1129 case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1130 prot_id = ICE_PROT_UDP_IL_OR_S;
1132 case ICE_FLOW_FIELD_IDX_ARP_SIP:
1133 case ICE_FLOW_FIELD_IDX_ARP_DIP:
1134 case ICE_FLOW_FIELD_IDX_ARP_SHA:
1135 case ICE_FLOW_FIELD_IDX_ARP_DHA:
1136 case ICE_FLOW_FIELD_IDX_ARP_OP:
1137 prot_id = ICE_PROT_ARP_OF;
1139 case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1140 case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1141 /* ICMP type and code share the same extraction seq. entry */
1142 prot_id = (params->prof->segs[seg].hdrs &
1143 ICE_FLOW_SEG_HDR_IPV4) ?
1144 ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1145 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1146 ICE_FLOW_FIELD_IDX_ICMP_CODE :
1147 ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1149 case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1150 prot_id = ICE_PROT_GRE_OF;
1153 return ICE_ERR_NOT_IMPL;
1156 /* Each extraction sequence entry is a word in size, and extracts a
1157 * word-aligned offset from a protocol header.
1159 ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1161 flds[fld].xtrct.prot_id = prot_id;
1162 flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1163 ICE_FLOW_FV_EXTRACT_SZ;
1164 flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
1165 flds[fld].xtrct.idx = params->es_cnt;
1166 flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1168 /* Adjust the next field-entry index after accommodating the number of
1169 * entries this field consumes
1171 cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1172 ice_flds_info[fld].size, ese_bits);
1174 /* Fill in the extraction sequence entries needed for this field */
1175 off = flds[fld].xtrct.off;
1176 mask = flds[fld].xtrct.mask;
1177 for (i = 0; i < cnt; i++) {
1178 /* Only consume an extraction sequence entry if there is no
1179 * sibling field associated with this field or the sibling entry
1180 * already extracts the word shared with this field.
1182 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1183 flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1184 flds[sib].xtrct.off != off) {
1187 /* Make sure the number of extraction sequence required
1188 * does not exceed the block's capability
1190 if (params->es_cnt >= fv_words)
1191 return ICE_ERR_MAX_LIMIT;
1193 /* some blocks require a reversed field vector layout */
1194 if (hw->blk[params->blk].es.reverse)
1195 idx = fv_words - params->es_cnt - 1;
1197 idx = params->es_cnt;
1199 params->es[idx].prot_id = prot_id;
1200 params->es[idx].off = off;
1201 params->mask[idx] = mask | sib_mask;
1205 off += ICE_FLOW_FV_EXTRACT_SZ;
1212 * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1213 * @hw: pointer to the HW struct
1214 * @params: information about the flow to be processed
1215 * @seg: index of packet segment whose raw fields are to be be extracted
1217 static enum ice_status
1218 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1225 if (!params->prof->segs[seg].raws_cnt)
1228 if (params->prof->segs[seg].raws_cnt >
1229 ARRAY_SIZE(params->prof->segs[seg].raws))
1230 return ICE_ERR_MAX_LIMIT;
1232 /* Offsets within the segment headers are not supported */
1233 hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1235 return ICE_ERR_PARAM;
1237 fv_words = hw->blk[params->blk].es.fvw;
1239 for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1240 struct ice_flow_seg_fld_raw *raw;
1243 raw = ¶ms->prof->segs[seg].raws[i];
1245 /* Storing extraction information */
1246 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1247 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1248 ICE_FLOW_FV_EXTRACT_SZ;
1249 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1251 raw->info.xtrct.idx = params->es_cnt;
1253 /* Determine the number of field vector entries this raw field
1256 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1257 (raw->info.src.last * BITS_PER_BYTE),
1258 (ICE_FLOW_FV_EXTRACT_SZ *
1260 off = raw->info.xtrct.off;
1261 for (j = 0; j < cnt; j++) {
1264 /* Make sure the number of extraction sequence required
1265 * does not exceed the block's capability
1267 if (params->es_cnt >= hw->blk[params->blk].es.count ||
1268 params->es_cnt >= ICE_MAX_FV_WORDS)
1269 return ICE_ERR_MAX_LIMIT;
1271 /* some blocks require a reversed field vector layout */
1272 if (hw->blk[params->blk].es.reverse)
1273 idx = fv_words - params->es_cnt - 1;
1275 idx = params->es_cnt;
1277 params->es[idx].prot_id = raw->info.xtrct.prot_id;
1278 params->es[idx].off = off;
1280 off += ICE_FLOW_FV_EXTRACT_SZ;
1288 * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1289 * @hw: pointer to the HW struct
1290 * @params: information about the flow to be processed
1292 * This function iterates through all matched fields in the given segments, and
1293 * creates an extraction sequence for the fields.
1295 static enum ice_status
1296 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1297 struct ice_flow_prof_params *params)
1299 enum ice_status status = ICE_SUCCESS;
1302 /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1305 if (params->blk == ICE_BLK_ACL) {
1306 status = ice_flow_xtract_pkt_flags(hw, params,
1307 ICE_RX_MDID_PKT_FLAGS_15_0);
1312 for (i = 0; i < params->prof->segs_cnt; i++) {
1313 u64 match = params->prof->segs[i].match;
1314 enum ice_flow_field j;
1316 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1317 const u64 bit = BIT_ULL(j);
1320 status = ice_flow_xtract_fld(hw, params, i, j,
1328 /* Process raw matching bytes */
1329 status = ice_flow_xtract_raws(hw, params, i);
1338 * ice_flow_sel_acl_scen - returns the specific scenario
1339 * @hw: pointer to the hardware structure
1340 * @params: information about the flow to be processed
1342 * This function will return the specific scenario based on the
1343 * params passed to it
1345 static enum ice_status
1346 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1348 /* Find the best-fit scenario for the provided match width */
1349 struct ice_acl_scen *cand_scen = NULL, *scen;
1352 return ICE_ERR_DOES_NOT_EXIST;
1354 /* Loop through each scenario and match against the scenario width
1355 * to select the specific scenario
1357 LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1358 if (scen->eff_width >= params->entry_length &&
1359 (!cand_scen || cand_scen->eff_width > scen->eff_width))
1362 return ICE_ERR_DOES_NOT_EXIST;
1364 params->prof->cfg.scen = cand_scen;
1370 * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1371 * @params: information about the flow to be processed
1373 static enum ice_status
1374 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1376 u16 index, i, range_idx = 0;
1378 index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1380 for (i = 0; i < params->prof->segs_cnt; i++) {
1381 struct ice_flow_seg_info *seg = ¶ms->prof->segs[i];
1382 u64 match = seg->match;
1385 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1386 struct ice_flow_fld_info *fld;
1387 const u64 bit = BIT_ULL(j);
1392 fld = &seg->fields[j];
1393 fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1395 if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1396 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1398 /* Range checking only supported for single
1401 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1403 BITS_PER_BYTE * 2) > 1)
1404 return ICE_ERR_PARAM;
1406 /* Ranges must define low and high values */
1407 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1408 fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1409 return ICE_ERR_PARAM;
1411 fld->entry.val = range_idx++;
1413 /* Store adjusted byte-length of field for later
1414 * use, taking into account potential
1415 * non-byte-aligned displacement
1417 fld->entry.last = DIVIDE_AND_ROUND_UP
1418 (ice_flds_info[j].size +
1419 (fld->xtrct.disp % BITS_PER_BYTE),
1421 fld->entry.val = index;
1422 index += fld->entry.last;
1428 for (j = 0; j < seg->raws_cnt; j++) {
1429 struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1431 raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1432 raw->info.entry.val = index;
1433 raw->info.entry.last = raw->info.src.last;
1434 index += raw->info.entry.last;
1438 /* Currently only support using the byte selection base, which only
1439 * allows for an effective entry size of 30 bytes. Reject anything
1442 if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1443 return ICE_ERR_PARAM;
1445 /* Only 8 range checkers per profile, reject anything trying to use
1448 if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1449 return ICE_ERR_PARAM;
1451 /* Store # bytes required for entry for later use */
1452 params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1458 * ice_flow_proc_segs - process all packet segments associated with a profile
1459 * @hw: pointer to the HW struct
1460 * @params: information about the flow to be processed
1462 static enum ice_status
1463 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1465 enum ice_status status;
1467 status = ice_flow_proc_seg_hdrs(params);
1471 status = ice_flow_create_xtrct_seq(hw, params);
1475 switch (params->blk) {
1478 status = ICE_SUCCESS;
1481 status = ice_flow_acl_def_entry_frmt(params);
1484 status = ice_flow_sel_acl_scen(hw, params);
1489 return ICE_ERR_NOT_IMPL;
1495 #define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001
1496 #define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002
1497 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004
1500 * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1501 * @hw: pointer to the HW struct
1502 * @blk: classification stage
1503 * @dir: flow direction
1504 * @segs: array of one or more packet segments that describe the flow
1505 * @segs_cnt: number of packet segments provided
1506 * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1507 * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1509 static struct ice_flow_prof *
1510 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1511 enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1512 u8 segs_cnt, u16 vsi_handle, u32 conds)
1514 struct ice_flow_prof *p, *prof = NULL;
1516 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1517 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1518 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1519 segs_cnt && segs_cnt == p->segs_cnt) {
1522 /* Check for profile-VSI association if specified */
1523 if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1524 ice_is_vsi_valid(hw, vsi_handle) &&
1525 !ice_is_bit_set(p->vsis, vsi_handle))
1528 /* Protocol headers must be checked. Matched fields are
1529 * checked if specified.
1531 for (i = 0; i < segs_cnt; i++)
1532 if (segs[i].hdrs != p->segs[i].hdrs ||
1533 ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1534 segs[i].match != p->segs[i].match))
1537 /* A match is found if all segments are matched */
1538 if (i == segs_cnt) {
1543 ice_release_lock(&hw->fl_profs_locks[blk]);
1549 * ice_flow_find_prof - Look up a profile matching headers and matched fields
1550 * @hw: pointer to the HW struct
1551 * @blk: classification stage
1552 * @dir: flow direction
1553 * @segs: array of one or more packet segments that describe the flow
1554 * @segs_cnt: number of packet segments provided
1557 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1558 struct ice_flow_seg_info *segs, u8 segs_cnt)
1560 struct ice_flow_prof *p;
1562 p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1563 ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1565 return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1569 * ice_flow_find_prof_id - Look up a profile with given profile ID
1570 * @hw: pointer to the HW struct
1571 * @blk: classification stage
1572 * @prof_id: unique ID to identify this flow profile
1574 static struct ice_flow_prof *
1575 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1577 struct ice_flow_prof *p;
1579 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1580 if (p->id == prof_id)
1587 * ice_dealloc_flow_entry - Deallocate flow entry memory
1588 * @hw: pointer to the HW struct
1589 * @entry: flow entry to be removed
1592 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1598 ice_free(hw, entry->entry);
1600 if (entry->range_buf) {
1601 ice_free(hw, entry->range_buf);
1602 entry->range_buf = NULL;
1606 ice_free(hw, entry->acts);
1608 entry->acts_cnt = 0;
1611 ice_free(hw, entry);
1614 #define ICE_ACL_INVALID_SCEN 0x3f
1617 * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
1618 * @hw: pointer to the hardware structure
1619 * @prof: pointer to flow profile
1620 * @buf: destination buffer function writes partial extraction sequence to
1622 * returns ICE_SUCCESS if no PF is associated to the given profile
1623 * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
1624 * returns other error code for real error
1626 static enum ice_status
1627 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1628 struct ice_aqc_acl_prof_generic_frmt *buf)
1630 enum ice_status status;
1633 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1637 status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1641 /* If all PF's associated scenarios are all 0 or all
1642 * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1643 * not been configured yet.
1645 if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1646 buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1647 buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1648 buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1651 if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1652 buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1653 buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1654 buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1655 buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1656 buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1657 buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1658 buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1661 return ICE_ERR_IN_USE;
1665 * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
1666 * @hw: pointer to the hardware structure
1667 * @acts: array of actions to be performed on a match
1668 * @acts_cnt: number of actions
1670 static enum ice_status
1671 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1676 for (i = 0; i < acts_cnt; i++) {
1677 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1678 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1679 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1680 struct ice_acl_cntrs cntrs;
1681 enum ice_status status;
1683 cntrs.bank = 0; /* Only bank0 for the moment */
1685 LE16_TO_CPU(acts[i].data.acl_act.value);
1687 LE16_TO_CPU(acts[i].data.acl_act.value);
1689 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1690 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1692 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1694 status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1703 * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
1704 * @hw: pointer to the hardware structure
1705 * @prof: pointer to flow profile
1707 * Disassociate the scenario from the profile for the PF of the VSI.
1709 static enum ice_status
1710 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1712 struct ice_aqc_acl_prof_generic_frmt buf;
1713 enum ice_status status = ICE_SUCCESS;
1716 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1718 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1722 status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1726 /* Clear scenario for this PF */
1727 buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1728 status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1734 * ice_flow_rem_entry_sync - Remove a flow entry
1735 * @hw: pointer to the HW struct
1736 * @blk: classification stage
1737 * @entry: flow entry to be removed
1739 static enum ice_status
1740 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1741 struct ice_flow_entry *entry)
1744 return ICE_ERR_BAD_PTR;
1746 if (blk == ICE_BLK_ACL) {
1747 enum ice_status status;
1750 return ICE_ERR_BAD_PTR;
1752 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1753 entry->scen_entry_idx);
1757 /* Checks if we need to release an ACL counter. */
1758 if (entry->acts_cnt && entry->acts)
1759 ice_flow_acl_free_act_cntr(hw, entry->acts,
1763 LIST_DEL(&entry->l_entry);
1765 ice_dealloc_flow_entry(hw, entry);
1771 * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1772 * @hw: pointer to the HW struct
1773 * @blk: classification stage
1774 * @dir: flow direction
1775 * @prof_id: unique ID to identify this flow profile
1776 * @segs: array of one or more packet segments that describe the flow
1777 * @segs_cnt: number of packet segments provided
1778 * @acts: array of default actions
1779 * @acts_cnt: number of default actions
1780 * @prof: stores the returned flow profile added
1782 * Assumption: the caller has acquired the lock to the profile list
1784 static enum ice_status
1785 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1786 enum ice_flow_dir dir, u64 prof_id,
1787 struct ice_flow_seg_info *segs, u8 segs_cnt,
1788 struct ice_flow_action *acts, u8 acts_cnt,
1789 struct ice_flow_prof **prof)
1791 struct ice_flow_prof_params params;
1792 enum ice_status status;
1795 if (!prof || (acts_cnt && !acts))
1796 return ICE_ERR_BAD_PTR;
1798 ice_memset(¶ms, 0, sizeof(params), ICE_NONDMA_MEM);
1799 params.prof = (struct ice_flow_prof *)
1800 ice_malloc(hw, sizeof(*params.prof));
1802 return ICE_ERR_NO_MEMORY;
1804 /* initialize extraction sequence to all invalid (0xff) */
1805 for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1806 params.es[i].prot_id = ICE_PROT_INVALID;
1807 params.es[i].off = ICE_FV_OFFSET_INVAL;
1811 params.prof->id = prof_id;
1812 params.prof->dir = dir;
1813 params.prof->segs_cnt = segs_cnt;
1815 /* Make a copy of the segments that need to be persistent in the flow
1818 for (i = 0; i < segs_cnt; i++)
1819 ice_memcpy(¶ms.prof->segs[i], &segs[i], sizeof(*segs),
1820 ICE_NONDMA_TO_NONDMA);
1822 /* Make a copy of the actions that need to be persistent in the flow
1826 params.prof->acts = (struct ice_flow_action *)
1827 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
1828 ICE_NONDMA_TO_NONDMA);
1830 if (!params.prof->acts) {
1831 status = ICE_ERR_NO_MEMORY;
1836 status = ice_flow_proc_segs(hw, ¶ms);
1838 ice_debug(hw, ICE_DBG_FLOW,
1839 "Error processing a flow's packet segments\n");
1843 /* Add a HW profile for this flow profile */
1844 status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes,
1845 params.attr, params.attr_cnt, params.es,
1848 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
1852 INIT_LIST_HEAD(¶ms.prof->entries);
1853 ice_init_lock(¶ms.prof->entries_lock);
1854 *prof = params.prof;
1858 if (params.prof->acts)
1859 ice_free(hw, params.prof->acts);
1860 ice_free(hw, params.prof);
1867 * ice_flow_rem_prof_sync - remove a flow profile
1868 * @hw: pointer to the hardware structure
1869 * @blk: classification stage
1870 * @prof: pointer to flow profile to remove
1872 * Assumption: the caller has acquired the lock to the profile list
1874 static enum ice_status
1875 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
1876 struct ice_flow_prof *prof)
1878 enum ice_status status;
1880 /* Remove all remaining flow entries before removing the flow profile */
1881 if (!LIST_EMPTY(&prof->entries)) {
1882 struct ice_flow_entry *e, *t;
1884 ice_acquire_lock(&prof->entries_lock);
1886 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
1888 status = ice_flow_rem_entry_sync(hw, blk, e);
1893 ice_release_lock(&prof->entries_lock);
1896 if (blk == ICE_BLK_ACL) {
1897 struct ice_aqc_acl_profile_ranges query_rng_buf;
1898 struct ice_aqc_acl_prof_generic_frmt buf;
1901 /* Disassociate the scenario from the profile for the PF */
1902 status = ice_flow_acl_disassoc_scen(hw, prof);
1906 /* Clear the range-checker if the profile ID is no longer
1909 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1910 if (status && status != ICE_ERR_IN_USE) {
1912 } else if (!status) {
1913 /* Clear the range-checker value for profile ID */
1914 ice_memset(&query_rng_buf, 0,
1915 sizeof(struct ice_aqc_acl_profile_ranges),
1918 status = ice_flow_get_hw_prof(hw, blk, prof->id,
1923 status = ice_prog_acl_prof_ranges(hw, prof_id,
1924 &query_rng_buf, NULL);
1930 /* Remove all hardware profiles associated with this flow profile */
1931 status = ice_rem_prof(hw, blk, prof->id);
1933 LIST_DEL(&prof->l_entry);
1934 ice_destroy_lock(&prof->entries_lock);
1936 ice_free(hw, prof->acts);
1944 * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
1945 * @buf: Destination buffer function writes partial xtrct sequence to
1946 * @info: Info about field
1949 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
1950 struct ice_flow_fld_info *info)
1955 src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
1956 info->xtrct.disp / BITS_PER_BYTE;
1957 dst = info->entry.val;
1958 for (i = 0; i < info->entry.last; i++)
1959 /* HW stores field vector words in LE, convert words back to BE
1960 * so constructed entries will end up in network order
1962 buf->byte_selection[dst++] = src++ ^ 1;
1966 * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
1967 * @hw: pointer to the hardware structure
1968 * @prof: pointer to flow profile
1970 static enum ice_status
1971 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
1973 struct ice_aqc_acl_prof_generic_frmt buf;
1974 struct ice_flow_fld_info *info;
1975 enum ice_status status;
1979 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1981 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1985 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1986 if (status && status != ICE_ERR_IN_USE)
1990 /* Program the profile dependent configuration. This is done
1991 * only once regardless of the number of PFs using that profile
1993 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1995 for (i = 0; i < prof->segs_cnt; i++) {
1996 struct ice_flow_seg_info *seg = &prof->segs[i];
1997 u64 match = seg->match;
2000 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
2001 const u64 bit = BIT_ULL(j);
2006 info = &seg->fields[j];
2008 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2009 buf.word_selection[info->entry.val] =
2012 ice_flow_acl_set_xtrct_seq_fld(&buf,
2018 for (j = 0; j < seg->raws_cnt; j++) {
2019 info = &seg->raws[j].info;
2020 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
2024 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
2025 ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
2029 /* Update the current PF */
2030 buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
2031 status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
2037 * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
2038 * @hw: pointer to the hardware structure
2039 * @blk: classification stage
2040 * @vsi_handle: software VSI handle
2041 * @vsig: target VSI group
2043 * Assumption: the caller has already verified that the VSI to
2044 * be added has the same characteristics as the VSIG and will
2045 * thereby have access to all resources added to that VSIG.
2048 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
2051 enum ice_status status;
2053 if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
2054 return ICE_ERR_PARAM;
2056 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2057 status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
2059 ice_release_lock(&hw->fl_profs_locks[blk]);
2065 * ice_flow_assoc_prof - associate a VSI with a flow profile
2066 * @hw: pointer to the hardware structure
2067 * @blk: classification stage
2068 * @prof: pointer to flow profile
2069 * @vsi_handle: software VSI handle
2071 * Assumption: the caller has acquired the lock to the profile list
2072 * and the software VSI handle has been validated
2074 static enum ice_status
2075 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
2076 struct ice_flow_prof *prof, u16 vsi_handle)
2078 enum ice_status status = ICE_SUCCESS;
2080 if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
2081 if (blk == ICE_BLK_ACL) {
2082 status = ice_flow_acl_set_xtrct_seq(hw, prof);
2086 status = ice_add_prof_id_flow(hw, blk,
2087 ice_get_hw_vsi_num(hw,
2091 ice_set_bit(vsi_handle, prof->vsis);
2093 ice_debug(hw, ICE_DBG_FLOW,
2094 "HW profile add failed, %d\n",
2102 * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
2103 * @hw: pointer to the hardware structure
2104 * @blk: classification stage
2105 * @prof: pointer to flow profile
2106 * @vsi_handle: software VSI handle
2108 * Assumption: the caller has acquired the lock to the profile list
2109 * and the software VSI handle has been validated
2111 static enum ice_status
2112 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
2113 struct ice_flow_prof *prof, u16 vsi_handle)
2115 enum ice_status status = ICE_SUCCESS;
2117 if (ice_is_bit_set(prof->vsis, vsi_handle)) {
2118 status = ice_rem_prof_id_flow(hw, blk,
2119 ice_get_hw_vsi_num(hw,
2123 ice_clear_bit(vsi_handle, prof->vsis);
2125 ice_debug(hw, ICE_DBG_FLOW,
2126 "HW profile remove failed, %d\n",
2134 * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2135 * @hw: pointer to the HW struct
2136 * @blk: classification stage
2137 * @dir: flow direction
2138 * @prof_id: unique ID to identify this flow profile
2139 * @segs: array of one or more packet segments that describe the flow
2140 * @segs_cnt: number of packet segments provided
2141 * @acts: array of default actions
2142 * @acts_cnt: number of default actions
2143 * @prof: stores the returned flow profile added
2146 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2147 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2148 struct ice_flow_action *acts, u8 acts_cnt,
2149 struct ice_flow_prof **prof)
2151 enum ice_status status;
2153 if (segs_cnt > ICE_FLOW_SEG_MAX)
2154 return ICE_ERR_MAX_LIMIT;
2157 return ICE_ERR_PARAM;
2160 return ICE_ERR_BAD_PTR;
2162 status = ice_flow_val_hdrs(segs, segs_cnt);
2166 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2168 status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2169 acts, acts_cnt, prof);
2171 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2173 ice_release_lock(&hw->fl_profs_locks[blk]);
2179 * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2180 * @hw: pointer to the HW struct
2181 * @blk: the block for which the flow profile is to be removed
2182 * @prof_id: unique ID of the flow profile to be removed
2185 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2187 struct ice_flow_prof *prof;
2188 enum ice_status status;
2190 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2192 prof = ice_flow_find_prof_id(hw, blk, prof_id);
2194 status = ICE_ERR_DOES_NOT_EXIST;
2198 /* prof becomes invalid after the call */
2199 status = ice_flow_rem_prof_sync(hw, blk, prof);
2202 ice_release_lock(&hw->fl_profs_locks[blk]);
2208 * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
2209 * @hw: pointer to the HW struct
2210 * @blk: classification stage
2211 * @prof_id: the profile ID handle
2212 * @hw_prof_id: pointer to variable to receive the HW profile ID
2215 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2218 struct ice_prof_map *map;
2220 map = ice_search_prof_id(hw, blk, prof_id);
2222 *hw_prof_id = map->prof_id;
2226 return ICE_ERR_DOES_NOT_EXIST;
2230 * ice_flow_find_entry - look for a flow entry using its unique ID
2231 * @hw: pointer to the HW struct
2232 * @blk: classification stage
2233 * @entry_id: unique ID to identify this flow entry
2235 * This function looks for the flow entry with the specified unique ID in all
2236 * flow profiles of the specified classification stage. If the entry is found,
2237 * and it returns the handle to the flow entry. Otherwise, it returns
2238 * ICE_FLOW_ENTRY_ID_INVAL.
2240 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2242 struct ice_flow_entry *found = NULL;
2243 struct ice_flow_prof *p;
2245 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2247 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2248 struct ice_flow_entry *e;
2250 ice_acquire_lock(&p->entries_lock);
2251 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2252 if (e->id == entry_id) {
2256 ice_release_lock(&p->entries_lock);
2262 ice_release_lock(&hw->fl_profs_locks[blk]);
2264 return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2268 * ice_flow_acl_check_actions - Checks the ACL rule's actions
2269 * @hw: pointer to the hardware structure
2270 * @acts: array of actions to be performed on a match
2271 * @acts_cnt: number of actions
2272 * @cnt_alloc: indicates if an ACL counter has been allocated.
2274 static enum ice_status
2275 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2276 u8 acts_cnt, bool *cnt_alloc)
2278 ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2281 ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2284 if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2285 return ICE_ERR_OUT_OF_RANGE;
2287 for (i = 0; i < acts_cnt; i++) {
2288 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2289 acts[i].type != ICE_FLOW_ACT_DROP &&
2290 acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2291 acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2294 /* If the caller want to add two actions of the same type, then
2295 * it is considered invalid configuration.
2297 if (ice_test_and_set_bit(acts[i].type, dup_check))
2298 return ICE_ERR_PARAM;
2301 /* Checks if ACL counters are needed. */
2302 for (i = 0; i < acts_cnt; i++) {
2303 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2304 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2305 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2306 struct ice_acl_cntrs cntrs;
2307 enum ice_status status;
2310 cntrs.bank = 0; /* Only bank0 for the moment */
2312 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2313 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2315 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2317 status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2320 /* Counter index within the bank */
2321 acts[i].data.acl_act.value =
2322 CPU_TO_LE16(cntrs.first_cntr);
2331 * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2332 * @fld: number of the given field
2333 * @info: info about field
2334 * @range_buf: range checker configuration buffer
2335 * @data: pointer to a data buffer containing flow entry's match values/masks
2336 * @range: Input/output param indicating which range checkers are being used
2339 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2340 struct ice_aqc_acl_profile_ranges *range_buf,
2341 u8 *data, u8 *range)
2345 /* If not specified, default mask is all bits in field */
2346 new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2347 BIT(ice_flds_info[fld].size) - 1 :
2348 (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2350 /* If the mask is 0, then we don't need to worry about this input
2351 * range checker value.
2355 (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2357 (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2358 u8 range_idx = info->entry.val;
2360 range_buf->checker_cfg[range_idx].low_boundary =
2361 CPU_TO_BE16(new_low);
2362 range_buf->checker_cfg[range_idx].high_boundary =
2363 CPU_TO_BE16(new_high);
2364 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2366 /* Indicate which range checker is being used */
2367 *range |= BIT(range_idx);
2372 * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2373 * @fld: number of the given field
2374 * @info: info about the field
2375 * @buf: buffer containing the entry
2376 * @dontcare: buffer containing don't care mask for entry
2377 * @data: pointer to a data buffer containing flow entry's match values/masks
2380 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2381 u8 *dontcare, u8 *data)
2383 u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2384 bool use_mask = false;
2387 src = info->src.val;
2388 mask = info->src.mask;
2389 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2390 disp = info->xtrct.disp % BITS_PER_BYTE;
2392 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2395 for (k = 0; k < info->entry.last; k++, dst++) {
2396 /* Add overflow bits from previous byte */
2397 buf[dst] = (tmp_s & 0xff00) >> 8;
2399 /* If mask is not valid, tmp_m is always zero, so just setting
2400 * dontcare to 0 (no masked bits). If mask is valid, pulls in
2401 * overflow bits of mask from prev byte
2403 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2405 /* If there is displacement, last byte will only contain
2406 * displaced data, but there is no more data to read from user
2407 * buffer, so skip so as not to potentially read beyond end of
2410 if (!disp || k < info->entry.last - 1) {
2411 /* Store shifted data to use in next byte */
2412 tmp_s = data[src++] << disp;
2414 /* Add current (shifted) byte */
2415 buf[dst] |= tmp_s & 0xff;
2417 /* Handle mask if valid */
2419 tmp_m = (~data[mask++] & 0xff) << disp;
2420 dontcare[dst] |= tmp_m & 0xff;
2425 /* Fill in don't care bits at beginning of field */
2427 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2428 for (k = 0; k < disp; k++)
2429 dontcare[dst] |= BIT(k);
2432 end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2434 /* Fill in don't care bits at end of field */
2436 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2437 info->entry.last - 1;
2438 for (k = end_disp; k < BITS_PER_BYTE; k++)
2439 dontcare[dst] |= BIT(k);
2444 * ice_flow_acl_frmt_entry - Format ACL entry
2445 * @hw: pointer to the hardware structure
2446 * @prof: pointer to flow profile
2447 * @e: pointer to the flow entry
2448 * @data: pointer to a data buffer containing flow entry's match values/masks
2449 * @acts: array of actions to be performed on a match
2450 * @acts_cnt: number of actions
2452 * Formats the key (and key_inverse) to be matched from the data passed in,
2453 * along with data from the flow profile. This key/key_inverse pair makes up
2454 * the 'entry' for an ACL flow entry.
2456 static enum ice_status
2457 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2458 struct ice_flow_entry *e, u8 *data,
2459 struct ice_flow_action *acts, u8 acts_cnt)
2461 u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2462 struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2463 enum ice_status status;
2468 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2472 /* Format the result action */
2474 status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2478 status = ICE_ERR_NO_MEMORY;
2480 e->acts = (struct ice_flow_action *)
2481 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2482 ICE_NONDMA_TO_NONDMA);
2487 e->acts_cnt = acts_cnt;
2489 /* Format the matching data */
2490 buf_sz = prof->cfg.scen->width;
2491 buf = (u8 *)ice_malloc(hw, buf_sz);
2495 dontcare = (u8 *)ice_malloc(hw, buf_sz);
2499 /* 'key' buffer will store both key and key_inverse, so must be twice
2502 key = (u8 *)ice_malloc(hw, buf_sz * 2);
2506 range_buf = (struct ice_aqc_acl_profile_ranges *)
2507 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2511 /* Set don't care mask to all 1's to start, will zero out used bytes */
2512 ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2514 for (i = 0; i < prof->segs_cnt; i++) {
2515 struct ice_flow_seg_info *seg = &prof->segs[i];
2516 u64 match = seg->match;
2519 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
2520 struct ice_flow_fld_info *info;
2521 const u64 bit = BIT_ULL(j);
2526 info = &seg->fields[j];
2528 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2529 ice_flow_acl_frmt_entry_range(j, info,
2533 ice_flow_acl_frmt_entry_fld(j, info, buf,
2539 for (j = 0; j < seg->raws_cnt; j++) {
2540 struct ice_flow_fld_info *info = &seg->raws[j].info;
2541 u16 dst, src, mask, k;
2542 bool use_mask = false;
2544 src = info->src.val;
2545 dst = info->entry.val -
2546 ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2547 mask = info->src.mask;
2549 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2552 for (k = 0; k < info->entry.last; k++, dst++) {
2553 buf[dst] = data[src++];
2555 dontcare[dst] = ~data[mask++];
2562 buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2563 dontcare[prof->cfg.scen->pid_idx] = 0;
2565 /* Format the buffer for direction flags */
2566 dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2568 if (prof->dir == ICE_FLOW_RX)
2569 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2572 buf[prof->cfg.scen->rng_chk_idx] = range;
2573 /* Mark any unused range checkers as don't care */
2574 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2575 e->range_buf = range_buf;
2577 ice_free(hw, range_buf);
2580 status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2586 e->entry_sz = buf_sz * 2;
2593 ice_free(hw, dontcare);
2598 if (status && range_buf) {
2599 ice_free(hw, range_buf);
2600 e->range_buf = NULL;
2603 if (status && e->acts) {
2604 ice_free(hw, e->acts);
2609 if (status && cnt_alloc)
2610 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2616 * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2617 * the compared data.
2618 * @prof: pointer to flow profile
2619 * @e: pointer to the comparing flow entry
2620 * @do_chg_action: decide if we want to change the ACL action
2621 * @do_add_entry: decide if we want to add the new ACL entry
2622 * @do_rem_entry: decide if we want to remove the current ACL entry
2624 * Find an ACL scenario entry that matches the compared data. In the same time,
2625 * this function also figure out:
2626 * a/ If we want to change the ACL action
2627 * b/ If we want to add the new ACL entry
2628 * c/ If we want to remove the current ACL entry
2630 static struct ice_flow_entry *
2631 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2632 struct ice_flow_entry *e, bool *do_chg_action,
2633 bool *do_add_entry, bool *do_rem_entry)
2635 struct ice_flow_entry *p, *return_entry = NULL;
2639 * a/ There exists an entry with same matching data, but different
2640 * priority, then we remove this existing ACL entry. Then, we
2641 * will add the new entry to the ACL scenario.
2642 * b/ There exists an entry with same matching data, priority, and
2643 * result action, then we do nothing
2644 * c/ There exists an entry with same matching data, priority, but
2645 * different, action, then do only change the action's entry.
2646 * d/ Else, we add this new entry to the ACL scenario.
2648 *do_chg_action = false;
2649 *do_add_entry = true;
2650 *do_rem_entry = false;
2651 LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2652 if (memcmp(p->entry, e->entry, p->entry_sz))
2655 /* From this point, we have the same matching_data. */
2656 *do_add_entry = false;
2659 if (p->priority != e->priority) {
2660 /* matching data && !priority */
2661 *do_add_entry = true;
2662 *do_rem_entry = true;
2666 /* From this point, we will have matching_data && priority */
2667 if (p->acts_cnt != e->acts_cnt)
2668 *do_chg_action = true;
2669 for (i = 0; i < p->acts_cnt; i++) {
2670 bool found_not_match = false;
2672 for (j = 0; j < e->acts_cnt; j++)
2673 if (memcmp(&p->acts[i], &e->acts[j],
2674 sizeof(struct ice_flow_action))) {
2675 found_not_match = true;
2679 if (found_not_match) {
2680 *do_chg_action = true;
2685 /* (do_chg_action = true) means :
2686 * matching_data && priority && !result_action
2687 * (do_chg_action = false) means :
2688 * matching_data && priority && result_action
2693 return return_entry;
2697 * ice_flow_acl_convert_to_acl_prior - Convert to ACL priority
2700 static enum ice_acl_entry_prior
2701 ice_flow_acl_convert_to_acl_prior(enum ice_flow_priority p)
2703 enum ice_acl_entry_prior acl_prior;
2706 case ICE_FLOW_PRIO_LOW:
2707 acl_prior = ICE_LOW;
2709 case ICE_FLOW_PRIO_NORMAL:
2710 acl_prior = ICE_NORMAL;
2712 case ICE_FLOW_PRIO_HIGH:
2713 acl_prior = ICE_HIGH;
2716 acl_prior = ICE_NORMAL;
2724 * ice_flow_acl_union_rng_chk - Perform union operation between two
2725 * range-range checker buffers
2726 * @dst_buf: pointer to destination range checker buffer
2727 * @src_buf: pointer to source range checker buffer
2729 * For this function, we do the union between dst_buf and src_buf
2730 * range checker buffer, and we will save the result back to dst_buf
2732 static enum ice_status
2733 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2734 struct ice_aqc_acl_profile_ranges *src_buf)
2738 if (!dst_buf || !src_buf)
2739 return ICE_ERR_BAD_PTR;
2741 for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2742 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2743 bool will_populate = false;
2745 in_data = &src_buf->checker_cfg[i];
2750 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2751 cfg_data = &dst_buf->checker_cfg[j];
2753 if (!cfg_data->mask ||
2754 !memcmp(cfg_data, in_data,
2755 sizeof(struct ice_acl_rng_data))) {
2756 will_populate = true;
2761 if (will_populate) {
2762 ice_memcpy(cfg_data, in_data,
2763 sizeof(struct ice_acl_rng_data),
2764 ICE_NONDMA_TO_NONDMA);
2766 /* No available slot left to program range checker */
2767 return ICE_ERR_MAX_LIMIT;
2775 * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2776 * @hw: pointer to the hardware structure
2777 * @prof: pointer to flow profile
2778 * @entry: double pointer to the flow entry
2780 * For this function, we will look at the current added entries in the
2781 * corresponding ACL scenario. Then, we will perform matching logic to
2782 * see if we want to add/modify/do nothing with this new entry.
2784 static enum ice_status
2785 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2786 struct ice_flow_entry **entry)
2788 bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2789 struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2790 struct ice_acl_act_entry *acts = NULL;
2791 struct ice_flow_entry *exist;
2792 enum ice_status status = ICE_SUCCESS;
2793 struct ice_flow_entry *e;
2796 if (!entry || !(*entry) || !prof)
2797 return ICE_ERR_BAD_PTR;
2801 do_chg_rng_chk = false;
2805 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2810 /* Query the current range-checker value in FW */
2811 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2815 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2816 sizeof(struct ice_aqc_acl_profile_ranges),
2817 ICE_NONDMA_TO_NONDMA);
2819 /* Generate the new range-checker value */
2820 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2824 /* Reconfigure the range check if the buffer is changed. */
2825 do_chg_rng_chk = false;
2826 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2827 sizeof(struct ice_aqc_acl_profile_ranges))) {
2828 status = ice_prog_acl_prof_ranges(hw, prof_id,
2829 &cfg_rng_buf, NULL);
2833 do_chg_rng_chk = true;
2837 /* Figure out if we want to (change the ACL action) and/or
2838 * (Add the new ACL entry) and/or (Remove the current ACL entry)
2840 exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
2841 &do_add_entry, &do_rem_entry);
2844 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
2849 /* Prepare the result action buffer */
2850 acts = (struct ice_acl_act_entry *)ice_calloc
2851 (hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
2852 for (i = 0; i < e->acts_cnt; i++)
2853 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
2854 sizeof(struct ice_acl_act_entry),
2855 ICE_NONDMA_TO_NONDMA);
2858 enum ice_acl_entry_prior prior;
2862 keys = (u8 *)e->entry;
2863 inverts = keys + (e->entry_sz / 2);
2864 prior = ice_flow_acl_convert_to_acl_prior(e->priority);
2866 status = ice_acl_add_entry(hw, prof->cfg.scen, prior, keys,
2867 inverts, acts, e->acts_cnt,
2872 e->scen_entry_idx = entry_idx;
2873 LIST_ADD(&e->l_entry, &prof->entries);
2875 if (do_chg_action) {
2876 /* For the action memory info, update the SW's copy of
2877 * exist entry with e's action memory info
2879 ice_free(hw, exist->acts);
2880 exist->acts_cnt = e->acts_cnt;
2881 exist->acts = (struct ice_flow_action *)
2882 ice_calloc(hw, exist->acts_cnt,
2883 sizeof(struct ice_flow_action));
2886 status = ICE_ERR_NO_MEMORY;
2890 ice_memcpy(exist->acts, e->acts,
2891 sizeof(struct ice_flow_action) * e->acts_cnt,
2892 ICE_NONDMA_TO_NONDMA);
2894 status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
2896 exist->scen_entry_idx);
2901 if (do_chg_rng_chk) {
2902 /* In this case, we want to update the range checker
2903 * information of the exist entry
2905 status = ice_flow_acl_union_rng_chk(exist->range_buf,
2911 /* As we don't add the new entry to our SW DB, deallocate its
2912 * memories, and return the exist entry to the caller
2914 ice_dealloc_flow_entry(hw, e);
2925 * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
2926 * @hw: pointer to the hardware structure
2927 * @prof: pointer to flow profile
2928 * @e: double pointer to the flow entry
2930 static enum ice_status
2931 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2932 struct ice_flow_entry **e)
2934 enum ice_status status;
2936 ice_acquire_lock(&prof->entries_lock);
2937 status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
2938 ice_release_lock(&prof->entries_lock);
2944 * ice_flow_add_entry - Add a flow entry
2945 * @hw: pointer to the HW struct
2946 * @blk: classification stage
2947 * @prof_id: ID of the profile to add a new flow entry to
2948 * @entry_id: unique ID to identify this flow entry
2949 * @vsi_handle: software VSI handle for the flow entry
2950 * @prio: priority of the flow entry
2951 * @data: pointer to a data buffer containing flow entry's match values/masks
2952 * @acts: arrays of actions to be performed on a match
2953 * @acts_cnt: number of actions
2954 * @entry_h: pointer to buffer that receives the new flow entry's handle
2957 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2958 u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
2959 void *data, struct ice_flow_action *acts, u8 acts_cnt,
2962 struct ice_flow_entry *e = NULL;
2963 struct ice_flow_prof *prof;
2964 enum ice_status status = ICE_SUCCESS;
2966 /* ACL entries must indicate an action */
2967 if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
2968 return ICE_ERR_PARAM;
2970 /* No flow entry data is expected for RSS */
2971 if (!entry_h || (!data && blk != ICE_BLK_RSS))
2972 return ICE_ERR_BAD_PTR;
2974 if (!ice_is_vsi_valid(hw, vsi_handle))
2975 return ICE_ERR_PARAM;
2977 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2979 prof = ice_flow_find_prof_id(hw, blk, prof_id);
2981 status = ICE_ERR_DOES_NOT_EXIST;
2983 /* Allocate memory for the entry being added and associate
2984 * the VSI to the found flow profile
2986 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
2988 status = ICE_ERR_NO_MEMORY;
2990 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
2993 ice_release_lock(&hw->fl_profs_locks[blk]);
2998 e->vsi_handle = vsi_handle;
3007 /* ACL will handle the entry management */
3008 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
3013 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
3019 status = ICE_ERR_NOT_IMPL;
3023 if (blk != ICE_BLK_ACL) {
3024 /* ACL will handle the entry management */
3025 ice_acquire_lock(&prof->entries_lock);
3026 LIST_ADD(&e->l_entry, &prof->entries);
3027 ice_release_lock(&prof->entries_lock);
3030 *entry_h = ICE_FLOW_ENTRY_HNDL(e);
3035 ice_free(hw, e->entry);
3043 * ice_flow_rem_entry - Remove a flow entry
3044 * @hw: pointer to the HW struct
3045 * @blk: classification stage
3046 * @entry_h: handle to the flow entry to be removed
3048 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
3051 struct ice_flow_entry *entry;
3052 struct ice_flow_prof *prof;
3053 enum ice_status status = ICE_SUCCESS;
3055 if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
3056 return ICE_ERR_PARAM;
3058 entry = ICE_FLOW_ENTRY_PTR((unsigned long)entry_h);
3060 /* Retain the pointer to the flow profile as the entry will be freed */
3064 ice_acquire_lock(&prof->entries_lock);
3065 status = ice_flow_rem_entry_sync(hw, blk, entry);
3066 ice_release_lock(&prof->entries_lock);
3073 * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
3074 * @seg: packet segment the field being set belongs to
3075 * @fld: field to be set
3076 * @field_type: type of the field
3077 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3078 * entry's input buffer
3079 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3081 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3082 * entry's input buffer
3084 * This helper function stores information of a field being matched, including
3085 * the type of the field and the locations of the value to match, the mask, and
3086 * and the upper-bound value in the start of the input buffer for a flow entry.
3087 * This function should only be used for fixed-size data structures.
3089 * This function also opportunistically determines the protocol headers to be
3090 * present based on the fields being set. Some fields cannot be used alone to
3091 * determine the protocol headers present. Sometimes, fields for particular
3092 * protocol headers are not matched. In those cases, the protocol headers
3093 * must be explicitly set.
3096 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3097 enum ice_flow_fld_match_type field_type, u16 val_loc,
3098 u16 mask_loc, u16 last_loc)
3100 u64 bit = BIT_ULL(fld);
3103 if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
3106 seg->fields[fld].type = field_type;
3107 seg->fields[fld].src.val = val_loc;
3108 seg->fields[fld].src.mask = mask_loc;
3109 seg->fields[fld].src.last = last_loc;
3111 ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
3115 * ice_flow_set_fld - specifies locations of field from entry's input buffer
3116 * @seg: packet segment the field being set belongs to
3117 * @fld: field to be set
3118 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3119 * entry's input buffer
3120 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3122 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3123 * entry's input buffer
3124 * @range: indicate if field being matched is to be in a range
3126 * This function specifies the locations, in the form of byte offsets from the
3127 * start of the input buffer for a flow entry, from where the value to match,
3128 * the mask value, and upper value can be extracted. These locations are then
3129 * stored in the flow profile. When adding a flow entry associated with the
3130 * flow profile, these locations will be used to quickly extract the values and
3131 * create the content of a match entry. This function should only be used for
3132 * fixed-size data structures.
3135 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3136 u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3138 enum ice_flow_fld_match_type t = range ?
3139 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3141 ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3145 * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3146 * @seg: packet segment the field being set belongs to
3147 * @fld: field to be set
3148 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3149 * entry's input buffer
3150 * @pref_loc: location of prefix value from entry's input buffer
3151 * @pref_sz: size of the location holding the prefix value
3153 * This function specifies the locations, in the form of byte offsets from the
3154 * start of the input buffer for a flow entry, from where the value to match
3155 * and the IPv4 prefix value can be extracted. These locations are then stored
3156 * in the flow profile. When adding flow entries to the associated flow profile,
3157 * these locations can be used to quickly extract the values to create the
3158 * content of a match entry. This function should only be used for fixed-size
3162 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3163 u16 val_loc, u16 pref_loc, u8 pref_sz)
3165 /* For this type of field, the "mask" location is for the prefix value's
3166 * location and the "last" location is for the size of the location of
3169 ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3170 pref_loc, (u16)pref_sz);
3174 * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3175 * @seg: packet segment the field being set belongs to
3176 * @off: offset of the raw field from the beginning of the segment in bytes
3177 * @len: length of the raw pattern to be matched
3178 * @val_loc: location of the value to match from entry's input buffer
3179 * @mask_loc: location of mask value from entry's input buffer
3181 * This function specifies the offset of the raw field to be match from the
3182 * beginning of the specified packet segment, and the locations, in the form of
3183 * byte offsets from the start of the input buffer for a flow entry, from where
3184 * the value to match and the mask value to be extracted. These locations are
3185 * then stored in the flow profile. When adding flow entries to the associated
3186 * flow profile, these locations can be used to quickly extract the values to
3187 * create the content of a match entry. This function should only be used for
3188 * fixed-size data structures.
3191 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3192 u16 val_loc, u16 mask_loc)
3194 if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3195 seg->raws[seg->raws_cnt].off = off;
3196 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3197 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3198 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3199 /* The "last" field is used to store the length of the field */
3200 seg->raws[seg->raws_cnt].info.src.last = len;
3203 /* Overflows of "raws" will be handled as an error condition later in
3204 * the flow when this information is processed.
3209 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3210 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
3212 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3213 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3215 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3216 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
3217 ICE_FLOW_SEG_HDR_SCTP)
3219 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3220 (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3221 ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3222 ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3225 * ice_flow_set_rss_seg_info - setup packet segments for RSS
3226 * @segs: pointer to the flow field segment(s)
3227 * @hash_fields: fields to be hashed on for the segment(s)
3228 * @flow_hdr: protocol header fields within a packet segment
3230 * Helper function to extract fields from hash bitmap and use flow
3231 * header value to set flow field segment for further use in flow
3232 * profile entry or removal.
3234 static enum ice_status
3235 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
3238 u64 val = hash_fields;
3241 for (i = 0; val && i < ICE_FLOW_FIELD_IDX_MAX; i++) {
3242 u64 bit = BIT_ULL(i);
3245 ice_flow_set_fld(segs, (enum ice_flow_field)i,
3246 ICE_FLOW_FLD_OFF_INVAL,
3247 ICE_FLOW_FLD_OFF_INVAL,
3248 ICE_FLOW_FLD_OFF_INVAL, false);
3252 ICE_FLOW_SET_HDRS(segs, flow_hdr);
3254 if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3255 ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER)
3256 return ICE_ERR_PARAM;
3258 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3259 if (val && !ice_is_pow2(val))
3262 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3263 if (val && !ice_is_pow2(val))
3270 * ice_rem_vsi_rss_list - remove VSI from RSS list
3271 * @hw: pointer to the hardware structure
3272 * @vsi_handle: software VSI handle
3274 * Remove the VSI from all RSS configurations in the list.
3276 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3278 struct ice_rss_cfg *r, *tmp;
3280 if (LIST_EMPTY(&hw->rss_list_head))
3283 ice_acquire_lock(&hw->rss_locks);
3284 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3285 ice_rss_cfg, l_entry)
3286 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3287 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3288 LIST_DEL(&r->l_entry);
3291 ice_release_lock(&hw->rss_locks);
3295 * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3296 * @hw: pointer to the hardware structure
3297 * @vsi_handle: software VSI handle
3299 * This function will iterate through all flow profiles and disassociate
3300 * the VSI from that profile. If the flow profile has no VSIs it will
3303 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3305 const enum ice_block blk = ICE_BLK_RSS;
3306 struct ice_flow_prof *p, *t;
3307 enum ice_status status = ICE_SUCCESS;
3309 if (!ice_is_vsi_valid(hw, vsi_handle))
3310 return ICE_ERR_PARAM;
3312 if (LIST_EMPTY(&hw->fl_profs[blk]))
3315 ice_acquire_lock(&hw->fl_profs_locks[blk]);
3316 LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3318 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3319 status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3323 if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3324 status = ice_flow_rem_prof_sync(hw, blk, p);
3329 ice_release_lock(&hw->fl_profs_locks[blk]);
3335 * ice_rem_rss_list - remove RSS configuration from list
3336 * @hw: pointer to the hardware structure
3337 * @vsi_handle: software VSI handle
3338 * @prof: pointer to flow profile
3340 * Assumption: lock has already been acquired for RSS list
3343 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3345 struct ice_rss_cfg *r, *tmp;
3347 /* Search for RSS hash fields associated to the VSI that match the
3348 * hash configurations associated to the flow profile. If found
3349 * remove from the RSS entry list of the VSI context and delete entry.
3351 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3352 ice_rss_cfg, l_entry)
3353 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3354 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3355 ice_clear_bit(vsi_handle, r->vsis);
3356 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3357 LIST_DEL(&r->l_entry);
3365 * ice_add_rss_list - add RSS configuration to list
3366 * @hw: pointer to the hardware structure
3367 * @vsi_handle: software VSI handle
3368 * @prof: pointer to flow profile
3370 * Assumption: lock has already been acquired for RSS list
3372 static enum ice_status
3373 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3375 struct ice_rss_cfg *r, *rss_cfg;
3377 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3378 ice_rss_cfg, l_entry)
3379 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3380 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3381 ice_set_bit(vsi_handle, r->vsis);
3385 rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3387 return ICE_ERR_NO_MEMORY;
3389 rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
3390 rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
3391 rss_cfg->symm = prof->cfg.symm;
3392 ice_set_bit(vsi_handle, rss_cfg->vsis);
3394 LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3399 #define ICE_FLOW_PROF_HASH_S 0
3400 #define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3401 #define ICE_FLOW_PROF_HDR_S 32
3402 #define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3403 #define ICE_FLOW_PROF_ENCAP_S 63
3404 #define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
3406 #define ICE_RSS_OUTER_HEADERS 1
3407 #define ICE_RSS_INNER_HEADERS 2
3409 /* Flow profile ID format:
3410 * [0:31] - Packet match fields
3411 * [32:62] - Protocol header
3412 * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
3414 #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
3415 (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3416 (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3417 ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
3420 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3422 u32 s = ((src % 4) << 3); /* byte shift */
3423 u32 v = dst | 0x80; /* value to program */
3424 u8 i = src / 4; /* register index */
3427 reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3428 reg = (reg & ~(0xff << s)) | (v << s);
3429 wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3433 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3436 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3439 for (i = 0; i < len; i++) {
3440 ice_rss_config_xor_word(hw, prof_id,
3441 /* Yes, field vector in GLQF_HSYMM and
3442 * GLQF_HINSET is inversed!
3444 fv_last_word - (src + i),
3445 fv_last_word - (dst + i));
3446 ice_rss_config_xor_word(hw, prof_id,
3447 fv_last_word - (dst + i),
3448 fv_last_word - (src + i));
3453 ice_rss_update_symm(struct ice_hw *hw,
3454 struct ice_flow_prof *prof)
3456 struct ice_prof_map *map;
3459 map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3460 prof_id = map->prof_id;
3462 /* clear to default */
3463 for (m = 0; m < 6; m++)
3464 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3465 if (prof->cfg.symm) {
3466 struct ice_flow_seg_info *seg =
3467 &prof->segs[prof->segs_cnt - 1];
3469 struct ice_flow_seg_xtrct *ipv4_src =
3470 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3471 struct ice_flow_seg_xtrct *ipv4_dst =
3472 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3473 struct ice_flow_seg_xtrct *ipv6_src =
3474 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3475 struct ice_flow_seg_xtrct *ipv6_dst =
3476 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3478 struct ice_flow_seg_xtrct *tcp_src =
3479 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3480 struct ice_flow_seg_xtrct *tcp_dst =
3481 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3483 struct ice_flow_seg_xtrct *udp_src =
3484 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3485 struct ice_flow_seg_xtrct *udp_dst =
3486 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3488 struct ice_flow_seg_xtrct *sctp_src =
3489 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3490 struct ice_flow_seg_xtrct *sctp_dst =
3491 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3494 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3495 ice_rss_config_xor(hw, prof_id,
3496 ipv4_src->idx, ipv4_dst->idx, 2);
3499 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3500 ice_rss_config_xor(hw, prof_id,
3501 ipv6_src->idx, ipv6_dst->idx, 8);
3504 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3505 ice_rss_config_xor(hw, prof_id,
3506 tcp_src->idx, tcp_dst->idx, 1);
3509 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3510 ice_rss_config_xor(hw, prof_id,
3511 udp_src->idx, udp_dst->idx, 1);
3514 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3515 ice_rss_config_xor(hw, prof_id,
3516 sctp_src->idx, sctp_dst->idx, 1);
3521 * ice_add_rss_cfg_sync - add an RSS configuration
3522 * @hw: pointer to the hardware structure
3523 * @vsi_handle: software VSI handle
3524 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3525 * @addl_hdrs: protocol header fields
3526 * @segs_cnt: packet segment count
3527 * @symm: symmetric hash enable/disable
3529 * Assumption: lock has already been acquired for RSS list
3531 static enum ice_status
3532 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3533 u32 addl_hdrs, u8 segs_cnt, bool symm)
3535 const enum ice_block blk = ICE_BLK_RSS;
3536 struct ice_flow_prof *prof = NULL;
3537 struct ice_flow_seg_info *segs;
3538 enum ice_status status;
3540 if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
3541 return ICE_ERR_PARAM;
3543 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3546 return ICE_ERR_NO_MEMORY;
3548 /* Construct the packet segment info from the hashed fields */
3549 status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3554 /* Search for a flow profile that has matching headers, hash fields
3555 * and has the input VSI associated to it. If found, no further
3556 * operations required and exit.
3558 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3560 ICE_FLOW_FIND_PROF_CHK_FLDS |
3561 ICE_FLOW_FIND_PROF_CHK_VSI);
3563 if (prof->cfg.symm == symm)
3565 prof->cfg.symm = symm;
3569 /* Check if a flow profile exists with the same protocol headers and
3570 * associated with the input VSI. If so disassociate the VSI from
3571 * this profile. The VSI will be added to a new profile created with
3572 * the protocol header and new hash field configuration.
3574 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3575 vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3577 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3579 ice_rem_rss_list(hw, vsi_handle, prof);
3583 /* Remove profile if it has no VSIs associated */
3584 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3585 status = ice_flow_rem_prof(hw, blk, prof->id);
3591 /* Search for a profile that has same match fields only. If this
3592 * exists then associate the VSI to this profile.
3594 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3596 ICE_FLOW_FIND_PROF_CHK_FLDS);
3598 if (prof->cfg.symm == symm) {
3599 status = ice_flow_assoc_prof(hw, blk, prof,
3602 status = ice_add_rss_list(hw, vsi_handle,
3605 /* if a profile exist but with different symmetric
3606 * requirement, just return error.
3608 status = ICE_ERR_NOT_SUPPORTED;
3613 /* Create a new flow profile with generated profile and packet
3614 * segment information.
3616 status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3617 ICE_FLOW_GEN_PROFID(hashed_flds,
3618 segs[segs_cnt - 1].hdrs,
3620 segs, segs_cnt, NULL, 0, &prof);
3624 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3625 /* If association to a new flow profile failed then this profile can
3629 ice_flow_rem_prof(hw, blk, prof->id);
3633 status = ice_add_rss_list(hw, vsi_handle, prof);
3635 prof->cfg.symm = symm;
3638 ice_rss_update_symm(hw, prof);
3646 * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3647 * @hw: pointer to the hardware structure
3648 * @vsi_handle: software VSI handle
3649 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3650 * @addl_hdrs: protocol header fields
3651 * @symm: symmetric hash enable/disable
3653 * This function will generate a flow profile based on fields associated with
3654 * the input fields to hash on, the flow type and use the VSI number to add
3655 * a flow entry to the profile.
3658 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3659 u32 addl_hdrs, bool symm)
3661 enum ice_status status;
3663 if (hashed_flds == ICE_HASH_INVALID ||
3664 !ice_is_vsi_valid(hw, vsi_handle))
3665 return ICE_ERR_PARAM;
3667 ice_acquire_lock(&hw->rss_locks);
3668 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3669 ICE_RSS_OUTER_HEADERS, symm);
3671 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3672 addl_hdrs, ICE_RSS_INNER_HEADERS,
3674 ice_release_lock(&hw->rss_locks);
3680 * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3681 * @hw: pointer to the hardware structure
3682 * @vsi_handle: software VSI handle
3683 * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3684 * @addl_hdrs: Protocol header fields within a packet segment
3685 * @segs_cnt: packet segment count
3687 * Assumption: lock has already been acquired for RSS list
3689 static enum ice_status
3690 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3691 u32 addl_hdrs, u8 segs_cnt)
3693 const enum ice_block blk = ICE_BLK_RSS;
3694 struct ice_flow_seg_info *segs;
3695 struct ice_flow_prof *prof;
3696 enum ice_status status;
3698 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3701 return ICE_ERR_NO_MEMORY;
3703 /* Construct the packet segment info from the hashed fields */
3704 status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3709 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3711 ICE_FLOW_FIND_PROF_CHK_FLDS);
3713 status = ICE_ERR_DOES_NOT_EXIST;
3717 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3721 /* Remove RSS configuration from VSI context before deleting
3724 ice_rem_rss_list(hw, vsi_handle, prof);
3726 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
3727 status = ice_flow_rem_prof(hw, blk, prof->id);
3735 * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
3736 * @hw: pointer to the hardware structure
3737 * @vsi_handle: software VSI handle
3738 * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3739 * @addl_hdrs: Protocol header fields within a packet segment
3741 * This function will lookup the flow profile based on the input
3742 * hash field bitmap, iterate through the profile entry list of
3743 * that profile and find entry associated with input VSI to be
3744 * removed. Calls are made to underlying flow apis which will in
3745 * turn build or update buffers for RSS XLT1 section.
3748 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3751 enum ice_status status;
3753 if (hashed_flds == ICE_HASH_INVALID ||
3754 !ice_is_vsi_valid(hw, vsi_handle))
3755 return ICE_ERR_PARAM;
3757 ice_acquire_lock(&hw->rss_locks);
3758 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3759 ICE_RSS_OUTER_HEADERS);
3761 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3762 addl_hdrs, ICE_RSS_INNER_HEADERS);
3763 ice_release_lock(&hw->rss_locks);
3769 * ice_replay_rss_cfg - replay RSS configurations associated with VSI
3770 * @hw: pointer to the hardware structure
3771 * @vsi_handle: software VSI handle
3773 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3775 enum ice_status status = ICE_SUCCESS;
3776 struct ice_rss_cfg *r;
3778 if (!ice_is_vsi_valid(hw, vsi_handle))
3779 return ICE_ERR_PARAM;
3781 ice_acquire_lock(&hw->rss_locks);
3782 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3783 ice_rss_cfg, l_entry) {
3784 if (ice_is_bit_set(r->vsis, vsi_handle)) {
3785 status = ice_add_rss_cfg_sync(hw, vsi_handle,
3788 ICE_RSS_OUTER_HEADERS,
3792 status = ice_add_rss_cfg_sync(hw, vsi_handle,
3795 ICE_RSS_INNER_HEADERS,
3801 ice_release_lock(&hw->rss_locks);
3807 * ice_get_rss_cfg - returns hashed fields for the given header types
3808 * @hw: pointer to the hardware structure
3809 * @vsi_handle: software VSI handle
3810 * @hdrs: protocol header type
3812 * This function will return the match fields of the first instance of flow
3813 * profile having the given header types and containing input VSI
3815 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
3817 struct ice_rss_cfg *r, *rss_cfg = NULL;
3819 /* verify if the protocol header is non zero and VSI is valid */
3820 if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
3821 return ICE_HASH_INVALID;
3823 ice_acquire_lock(&hw->rss_locks);
3824 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3825 ice_rss_cfg, l_entry)
3826 if (ice_is_bit_set(r->vsis, vsi_handle) &&
3827 r->packet_hdr == hdrs) {
3831 ice_release_lock(&hw->rss_locks);
3833 return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID;