1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
5 #include "ice_common.h"
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE 2
10 #define ICE_FLOW_FLD_SZ_VLAN 2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR 4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR 16
13 #define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
14 #define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
15 #define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
16 #define ICE_FLOW_FLD_SZ_IP_DSCP 1
17 #define ICE_FLOW_FLD_SZ_IP_TTL 1
18 #define ICE_FLOW_FLD_SZ_IP_PROT 1
19 #define ICE_FLOW_FLD_SZ_PORT 2
20 #define ICE_FLOW_FLD_SZ_TCP_FLAGS 1
21 #define ICE_FLOW_FLD_SZ_ICMP_TYPE 1
22 #define ICE_FLOW_FLD_SZ_ICMP_CODE 1
23 #define ICE_FLOW_FLD_SZ_ARP_OPER 2
24 #define ICE_FLOW_FLD_SZ_GRE_KEYID 4
25 #define ICE_FLOW_FLD_SZ_GTP_TEID 4
26 #define ICE_FLOW_FLD_SZ_GTP_QFI 2
27 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID 2
28 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
29 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID 4
30 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
31 #define ICE_FLOW_FLD_SZ_AH_SPI 4
32 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI 4
34 /* Describe properties of a protocol header field */
35 struct ice_flow_field_info {
36 enum ice_flow_seg_hdr hdr;
37 s16 off; /* Offset from start of a protocol header, in bits */
38 u16 size; /* Size of fields in bits */
39 u16 mask; /* 16-bit mask for field */
42 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
44 .off = (_offset_bytes) * BITS_PER_BYTE, \
45 .size = (_size_bytes) * BITS_PER_BYTE, \
49 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
51 .off = (_offset_bytes) * BITS_PER_BYTE, \
52 .size = (_size_bytes) * BITS_PER_BYTE, \
56 /* Table containing properties of supported protocol header fields */
58 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
60 /* ICE_FLOW_FIELD_IDX_ETH_DA */
61 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
62 /* ICE_FLOW_FIELD_IDX_ETH_SA */
63 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
64 /* ICE_FLOW_FIELD_IDX_S_VLAN */
65 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
66 /* ICE_FLOW_FIELD_IDX_C_VLAN */
67 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
68 /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
69 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
71 /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
72 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
74 /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
75 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
77 /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
78 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
79 ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
80 /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
81 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
82 ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
83 /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
84 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
85 ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
86 /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
87 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
88 ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
89 /* ICE_FLOW_FIELD_IDX_IPV4_SA */
90 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
91 /* ICE_FLOW_FIELD_IDX_IPV4_DA */
92 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
93 /* ICE_FLOW_FIELD_IDX_IPV6_SA */
94 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
95 /* ICE_FLOW_FIELD_IDX_IPV6_DA */
96 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
97 /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
98 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
99 ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
100 /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
101 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
102 ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
103 /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
104 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
105 ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
106 /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
107 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
108 ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
109 /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
110 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
111 ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
112 /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
113 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
114 ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
116 /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
117 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
118 /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
119 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
120 /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
121 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
122 /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
123 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
124 /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
125 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
126 /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
127 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
128 /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
129 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
131 /* ICE_FLOW_FIELD_IDX_ARP_SIP */
132 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
133 /* ICE_FLOW_FIELD_IDX_ARP_DIP */
134 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
135 /* ICE_FLOW_FIELD_IDX_ARP_SHA */
136 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
137 /* ICE_FLOW_FIELD_IDX_ARP_DHA */
138 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
139 /* ICE_FLOW_FIELD_IDX_ARP_OP */
140 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
142 /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
143 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
144 /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
145 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
147 /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
148 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
150 /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
151 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
152 ICE_FLOW_FLD_SZ_GTP_TEID),
153 /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
154 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
155 ICE_FLOW_FLD_SZ_GTP_TEID),
156 /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
157 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
158 ICE_FLOW_FLD_SZ_GTP_TEID),
159 /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
160 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
161 ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
162 /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
163 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
164 ICE_FLOW_FLD_SZ_GTP_TEID),
165 /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
166 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
167 ICE_FLOW_FLD_SZ_GTP_TEID),
169 /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
170 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
171 ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
173 /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
174 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
175 ICE_FLOW_FLD_SZ_PFCP_SEID),
177 /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
178 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
179 ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
181 /* ICE_FLOW_FIELD_IDX_ESP_SPI */
182 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
183 ICE_FLOW_FLD_SZ_ESP_SPI),
185 /* ICE_FLOW_FIELD_IDX_AH_SPI */
186 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
187 ICE_FLOW_FLD_SZ_AH_SPI),
189 /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
190 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
191 ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
194 /* Bitmaps indicating relevant packet types for a particular protocol header
196 * Packet types for packets with an Outer/First/Single MAC header
198 static const u32 ice_ptypes_mac_ofos[] = {
199 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
200 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
201 0x00400000, 0x03FFF000, 0x7FFFFFE0, 0x00000000,
202 0x00000000, 0x00000000, 0x00000000, 0x00000000,
203 0x00000000, 0x00000000, 0x00000000, 0x00000000,
204 0x00000000, 0x00000000, 0x00000000, 0x00000000,
205 0x00000000, 0x00000000, 0x00000000, 0x00000000,
206 0x00000000, 0x00000000, 0x00000000, 0x00000000,
209 /* Packet types for packets with an Innermost/Last MAC VLAN header */
210 static const u32 ice_ptypes_macvlan_il[] = {
211 0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
212 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
213 0x00000000, 0x00000000, 0x00000000, 0x00000000,
214 0x00000000, 0x00000000, 0x00000000, 0x00000000,
215 0x00000000, 0x00000000, 0x00000000, 0x00000000,
216 0x00000000, 0x00000000, 0x00000000, 0x00000000,
217 0x00000000, 0x00000000, 0x00000000, 0x00000000,
218 0x00000000, 0x00000000, 0x00000000, 0x00000000,
221 /* Packet types for packets with an Outer/First/Single IPv4 header, does NOT
222 * include IPV4 other PTYPEs
224 static const u32 ice_ptypes_ipv4_ofos[] = {
225 0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
226 0x00000000, 0x00000155, 0x00000000, 0x00000000,
227 0x00000000, 0x000FC000, 0x00000000, 0x00000000,
228 0x00000000, 0x00000000, 0x00000000, 0x00000000,
229 0x00000000, 0x00000000, 0x00000000, 0x00000000,
230 0x00000000, 0x00000000, 0x00000000, 0x00000000,
231 0x00000000, 0x00000000, 0x00000000, 0x00000000,
232 0x00000000, 0x00000000, 0x00000000, 0x00000000,
235 /* Packet types for packets with an Outer/First/Single IPv4 header, includes
238 static const u32 ice_ptypes_ipv4_ofos_all[] = {
239 0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
240 0x00000000, 0x00000155, 0x00000000, 0x00000000,
241 0x00000000, 0x000FC000, 0x83E0F800, 0x00000101,
242 0x00000000, 0x00000000, 0x00000000, 0x00000000,
243 0x00000000, 0x00000000, 0x00000000, 0x00000000,
244 0x00000000, 0x00000000, 0x00000000, 0x00000000,
245 0x00000000, 0x00000000, 0x00000000, 0x00000000,
246 0x00000000, 0x00000000, 0x00000000, 0x00000000,
249 /* Packet types for packets with an Innermost/Last IPv4 header */
250 static const u32 ice_ptypes_ipv4_il[] = {
251 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
252 0x0000000E, 0x00000000, 0x00000000, 0x00000000,
253 0x00000000, 0x00000000, 0x001FF800, 0x00000000,
254 0x00000000, 0x00000000, 0x00000000, 0x00000000,
255 0x00000000, 0x00000000, 0x00000000, 0x00000000,
256 0x00000000, 0x00000000, 0x00000000, 0x00000000,
257 0x00000000, 0x00000000, 0x00000000, 0x00000000,
258 0x00000000, 0x00000000, 0x00000000, 0x00000000,
261 /* Packet types for packets with an Outer/First/Single IPv6 header, does NOT
262 * include IVP6 other PTYPEs
264 static const u32 ice_ptypes_ipv6_ofos[] = {
265 0x00000000, 0x00000000, 0x77000000, 0x10002000,
266 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
267 0x00000000, 0x03F00000, 0x00000000, 0x00000000,
268 0x00000000, 0x00000000, 0x00000000, 0x00000000,
269 0x00000000, 0x00000000, 0x00000000, 0x00000000,
270 0x00000000, 0x00000000, 0x00000000, 0x00000000,
271 0x00000000, 0x00000000, 0x00000000, 0x00000000,
272 0x00000000, 0x00000000, 0x00000000, 0x00000000,
275 /* Packet types for packets with an Outer/First/Single IPv6 header, includes
278 static const u32 ice_ptypes_ipv6_ofos_all[] = {
279 0x00000000, 0x00000000, 0x77000000, 0x10002000,
280 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
281 0x00000000, 0x03F00000, 0x7C1F0000, 0x00000206,
282 0x00000000, 0x00000000, 0x00000000, 0x00000000,
283 0x00000000, 0x00000000, 0x00000000, 0x00000000,
284 0x00000000, 0x00000000, 0x00000000, 0x00000000,
285 0x00000000, 0x00000000, 0x00000000, 0x00000000,
286 0x00000000, 0x00000000, 0x00000000, 0x00000000,
289 /* Packet types for packets with an Innermost/Last IPv6 header */
290 static const u32 ice_ptypes_ipv6_il[] = {
291 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
292 0x00000770, 0x00000000, 0x00000000, 0x00000000,
293 0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
294 0x00000000, 0x00000000, 0x00000000, 0x00000000,
295 0x00000000, 0x00000000, 0x00000000, 0x00000000,
296 0x00000000, 0x00000000, 0x00000000, 0x00000000,
297 0x00000000, 0x00000000, 0x00000000, 0x00000000,
298 0x00000000, 0x00000000, 0x00000000, 0x00000000,
301 /* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */
302 static const u32 ice_ipv4_ofos_no_l4[] = {
303 0x10C00000, 0x04000800, 0x00000000, 0x00000000,
304 0x00000000, 0x00000000, 0x00000000, 0x00000000,
305 0x00000000, 0x000cc000, 0x00000000, 0x00000000,
306 0x00000000, 0x00000000, 0x00000000, 0x00000000,
307 0x00000000, 0x00000000, 0x00000000, 0x00000000,
308 0x00000000, 0x00000000, 0x00000000, 0x00000000,
309 0x00000000, 0x00000000, 0x00000000, 0x00000000,
310 0x00000000, 0x00000000, 0x00000000, 0x00000000,
313 /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
314 static const u32 ice_ipv4_il_no_l4[] = {
315 0x60000000, 0x18043008, 0x80000002, 0x6010c021,
316 0x00000008, 0x00000000, 0x00000000, 0x00000000,
317 0x00000000, 0x00000000, 0x00139800, 0x00000000,
318 0x00000000, 0x00000000, 0x00000000, 0x00000000,
319 0x00000000, 0x00000000, 0x00000000, 0x00000000,
320 0x00000000, 0x00000000, 0x00000000, 0x00000000,
321 0x00000000, 0x00000000, 0x00000000, 0x00000000,
322 0x00000000, 0x00000000, 0x00000000, 0x00000000,
325 /* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */
326 static const u32 ice_ipv6_ofos_no_l4[] = {
327 0x00000000, 0x00000000, 0x43000000, 0x10002000,
328 0x00000000, 0x00000000, 0x00000000, 0x00000000,
329 0x00000000, 0x02300000, 0x00000000, 0x00000000,
330 0x00000000, 0x00000000, 0x00000000, 0x00000000,
331 0x00000000, 0x00000000, 0x00000000, 0x00000000,
332 0x00000000, 0x00000000, 0x00000000, 0x00000000,
333 0x00000000, 0x00000000, 0x00000000, 0x00000000,
334 0x00000000, 0x00000000, 0x00000000, 0x00000000,
337 /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
338 static const u32 ice_ipv6_il_no_l4[] = {
339 0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
340 0x00000430, 0x00000000, 0x00000000, 0x00000000,
341 0x00000000, 0x00000000, 0x4e600000, 0x00000000,
342 0x00000000, 0x00000000, 0x00000000, 0x00000000,
343 0x00000000, 0x00000000, 0x00000000, 0x00000000,
344 0x00000000, 0x00000000, 0x00000000, 0x00000000,
345 0x00000000, 0x00000000, 0x00000000, 0x00000000,
346 0x00000000, 0x00000000, 0x00000000, 0x00000000,
349 /* Packet types for packets with an Outermost/First ARP header */
350 static const u32 ice_ptypes_arp_of[] = {
351 0x00000800, 0x00000000, 0x00000000, 0x00000000,
352 0x00000000, 0x00000000, 0x00000000, 0x00000000,
353 0x00000000, 0x00000000, 0x00000000, 0x00000000,
354 0x00000000, 0x00000000, 0x00000000, 0x00000000,
355 0x00000000, 0x00000000, 0x00000000, 0x00000000,
356 0x00000000, 0x00000000, 0x00000000, 0x00000000,
357 0x00000000, 0x00000000, 0x00000000, 0x00000000,
358 0x00000000, 0x00000000, 0x00000000, 0x00000000,
361 /* UDP Packet types for non-tunneled packets or tunneled
362 * packets with inner UDP.
364 static const u32 ice_ptypes_udp_il[] = {
365 0x81000000, 0x20204040, 0x04000010, 0x80810102,
366 0x00000040, 0x00000000, 0x00000000, 0x00000000,
367 0x00000000, 0x00410000, 0x90842000, 0x00000007,
368 0x00000000, 0x00000000, 0x00000000, 0x00000000,
369 0x00000000, 0x00000000, 0x00000000, 0x00000000,
370 0x00000000, 0x00000000, 0x00000000, 0x00000000,
371 0x00000000, 0x00000000, 0x00000000, 0x00000000,
372 0x00000000, 0x00000000, 0x00000000, 0x00000000,
375 /* Packet types for packets with an Innermost/Last TCP header */
376 static const u32 ice_ptypes_tcp_il[] = {
377 0x04000000, 0x80810102, 0x10000040, 0x02040408,
378 0x00000102, 0x00000000, 0x00000000, 0x00000000,
379 0x00000000, 0x00820000, 0x21084000, 0x00000000,
380 0x00000000, 0x00000000, 0x00000000, 0x00000000,
381 0x00000000, 0x00000000, 0x00000000, 0x00000000,
382 0x00000000, 0x00000000, 0x00000000, 0x00000000,
383 0x00000000, 0x00000000, 0x00000000, 0x00000000,
384 0x00000000, 0x00000000, 0x00000000, 0x00000000,
387 /* Packet types for packets with an Innermost/Last SCTP header */
388 static const u32 ice_ptypes_sctp_il[] = {
389 0x08000000, 0x01020204, 0x20000081, 0x04080810,
390 0x00000204, 0x00000000, 0x00000000, 0x00000000,
391 0x00000000, 0x01040000, 0x00000000, 0x00000000,
392 0x00000000, 0x00000000, 0x00000000, 0x00000000,
393 0x00000000, 0x00000000, 0x00000000, 0x00000000,
394 0x00000000, 0x00000000, 0x00000000, 0x00000000,
395 0x00000000, 0x00000000, 0x00000000, 0x00000000,
396 0x00000000, 0x00000000, 0x00000000, 0x00000000,
399 /* Packet types for packets with an Outermost/First ICMP header */
400 static const u32 ice_ptypes_icmp_of[] = {
401 0x10000000, 0x00000000, 0x00000000, 0x00000000,
402 0x00000000, 0x00000000, 0x00000000, 0x00000000,
403 0x00000000, 0x00000000, 0x00000000, 0x00000000,
404 0x00000000, 0x00000000, 0x00000000, 0x00000000,
405 0x00000000, 0x00000000, 0x00000000, 0x00000000,
406 0x00000000, 0x00000000, 0x00000000, 0x00000000,
407 0x00000000, 0x00000000, 0x00000000, 0x00000000,
408 0x00000000, 0x00000000, 0x00000000, 0x00000000,
411 /* Packet types for packets with an Innermost/Last ICMP header */
412 static const u32 ice_ptypes_icmp_il[] = {
413 0x00000000, 0x02040408, 0x40000102, 0x08101020,
414 0x00000408, 0x00000000, 0x00000000, 0x00000000,
415 0x00000000, 0x00000000, 0x42108000, 0x00000000,
416 0x00000000, 0x00000000, 0x00000000, 0x00000000,
417 0x00000000, 0x00000000, 0x00000000, 0x00000000,
418 0x00000000, 0x00000000, 0x00000000, 0x00000000,
419 0x00000000, 0x00000000, 0x00000000, 0x00000000,
420 0x00000000, 0x00000000, 0x00000000, 0x00000000,
423 /* Packet types for packets with an Outermost/First GRE header */
424 static const u32 ice_ptypes_gre_of[] = {
425 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
426 0x0000017E, 0x00000000, 0x00000000, 0x00000000,
427 0x00000000, 0x00000000, 0x00000000, 0x00000000,
428 0x00000000, 0x00000000, 0x00000000, 0x00000000,
429 0x00000000, 0x00000000, 0x00000000, 0x00000000,
430 0x00000000, 0x00000000, 0x00000000, 0x00000000,
431 0x00000000, 0x00000000, 0x00000000, 0x00000000,
432 0x00000000, 0x00000000, 0x00000000, 0x00000000,
435 /* Packet types for packets with an Innermost/Last MAC header */
436 static const u32 ice_ptypes_mac_il[] = {
437 0x00000000, 0x00000000, 0x00000000, 0x00000000,
438 0x00000000, 0x00000000, 0x00000000, 0x00000000,
439 0x00000000, 0x00000000, 0x00000000, 0x00000000,
440 0x00000000, 0x00000000, 0x00000000, 0x00000000,
441 0x00000000, 0x00000000, 0x00000000, 0x00000000,
442 0x00000000, 0x00000000, 0x00000000, 0x00000000,
443 0x00000000, 0x00000000, 0x00000000, 0x00000000,
444 0x00000000, 0x00000000, 0x00000000, 0x00000000,
447 /* Packet types for GTPC */
448 static const u32 ice_ptypes_gtpc[] = {
449 0x00000000, 0x00000000, 0x00000000, 0x00000000,
450 0x00000000, 0x00000000, 0x00000000, 0x00000000,
451 0x00000000, 0x00000000, 0x00000180, 0x00000000,
452 0x00000000, 0x00000000, 0x00000000, 0x00000000,
453 0x00000000, 0x00000000, 0x00000000, 0x00000000,
454 0x00000000, 0x00000000, 0x00000000, 0x00000000,
455 0x00000000, 0x00000000, 0x00000000, 0x00000000,
456 0x00000000, 0x00000000, 0x00000000, 0x00000000,
459 /* Packet types for GTPC with TEID */
460 static const u32 ice_ptypes_gtpc_tid[] = {
461 0x00000000, 0x00000000, 0x00000000, 0x00000000,
462 0x00000000, 0x00000000, 0x00000000, 0x00000000,
463 0x00000000, 0x00000000, 0x00000060, 0x00000000,
464 0x00000000, 0x00000000, 0x00000000, 0x00000000,
465 0x00000000, 0x00000000, 0x00000000, 0x00000000,
466 0x00000000, 0x00000000, 0x00000000, 0x00000000,
467 0x00000000, 0x00000000, 0x00000000, 0x00000000,
468 0x00000000, 0x00000000, 0x00000000, 0x00000000,
471 /* Packet types for GTPU */
472 static const struct ice_ptype_attributes ice_attr_gtpu_session[] = {
473 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
474 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
475 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
476 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
477 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
478 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
479 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
480 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
481 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
482 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
483 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
484 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
485 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
486 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
487 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
488 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
489 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
490 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
491 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
492 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
495 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
496 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
497 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
498 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
499 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
500 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
501 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
502 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
503 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
504 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
505 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
506 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
507 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
508 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
509 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
510 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
511 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
512 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
513 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
514 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
515 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
518 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
519 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
520 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
521 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
522 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
523 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
524 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
525 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
526 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
527 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
528 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
529 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
530 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
531 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
532 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
533 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
534 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
535 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
536 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
537 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
538 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
541 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
542 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
543 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
544 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
545 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
546 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
547 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
548 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
549 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
550 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
551 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
552 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
553 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
554 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
555 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
556 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
557 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
558 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
559 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
560 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
561 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
564 static const u32 ice_ptypes_gtpu[] = {
565 0x00000000, 0x00000000, 0x00000000, 0x00000000,
566 0x00000000, 0x00000000, 0x00000000, 0x00000000,
567 0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
568 0x00000000, 0x00000000, 0x00000000, 0x00000000,
569 0x00000000, 0x00000000, 0x00000000, 0x00000000,
570 0x00000000, 0x00000000, 0x00000000, 0x00000000,
571 0x00000000, 0x00000000, 0x00000000, 0x00000000,
572 0x00000000, 0x00000000, 0x00000000, 0x00000000,
575 /* Packet types for pppoe */
576 static const u32 ice_ptypes_pppoe[] = {
577 0x00000000, 0x00000000, 0x00000000, 0x00000000,
578 0x00000000, 0x00000000, 0x00000000, 0x00000000,
579 0x00000000, 0x03ffe000, 0x00000000, 0x00000000,
580 0x00000000, 0x00000000, 0x00000000, 0x00000000,
581 0x00000000, 0x00000000, 0x00000000, 0x00000000,
582 0x00000000, 0x00000000, 0x00000000, 0x00000000,
583 0x00000000, 0x00000000, 0x00000000, 0x00000000,
584 0x00000000, 0x00000000, 0x00000000, 0x00000000,
587 /* Packet types for packets with PFCP NODE header */
588 static const u32 ice_ptypes_pfcp_node[] = {
589 0x00000000, 0x00000000, 0x00000000, 0x00000000,
590 0x00000000, 0x00000000, 0x00000000, 0x00000000,
591 0x00000000, 0x00000000, 0x80000000, 0x00000002,
592 0x00000000, 0x00000000, 0x00000000, 0x00000000,
593 0x00000000, 0x00000000, 0x00000000, 0x00000000,
594 0x00000000, 0x00000000, 0x00000000, 0x00000000,
595 0x00000000, 0x00000000, 0x00000000, 0x00000000,
596 0x00000000, 0x00000000, 0x00000000, 0x00000000,
599 /* Packet types for packets with PFCP SESSION header */
600 static const u32 ice_ptypes_pfcp_session[] = {
601 0x00000000, 0x00000000, 0x00000000, 0x00000000,
602 0x00000000, 0x00000000, 0x00000000, 0x00000000,
603 0x00000000, 0x00000000, 0x00000000, 0x00000005,
604 0x00000000, 0x00000000, 0x00000000, 0x00000000,
605 0x00000000, 0x00000000, 0x00000000, 0x00000000,
606 0x00000000, 0x00000000, 0x00000000, 0x00000000,
607 0x00000000, 0x00000000, 0x00000000, 0x00000000,
608 0x00000000, 0x00000000, 0x00000000, 0x00000000,
611 /* Packet types for l2tpv3 */
612 static const u32 ice_ptypes_l2tpv3[] = {
613 0x00000000, 0x00000000, 0x00000000, 0x00000000,
614 0x00000000, 0x00000000, 0x00000000, 0x00000000,
615 0x00000000, 0x00000000, 0x00000000, 0x00000300,
616 0x00000000, 0x00000000, 0x00000000, 0x00000000,
617 0x00000000, 0x00000000, 0x00000000, 0x00000000,
618 0x00000000, 0x00000000, 0x00000000, 0x00000000,
619 0x00000000, 0x00000000, 0x00000000, 0x00000000,
620 0x00000000, 0x00000000, 0x00000000, 0x00000000,
623 /* Packet types for esp */
624 static const u32 ice_ptypes_esp[] = {
625 0x00000000, 0x00000000, 0x00000000, 0x00000000,
626 0x00000000, 0x00000003, 0x00000000, 0x00000000,
627 0x00000000, 0x00000000, 0x00000000, 0x00000000,
628 0x00000000, 0x00000000, 0x00000000, 0x00000000,
629 0x00000000, 0x00000000, 0x00000000, 0x00000000,
630 0x00000000, 0x00000000, 0x00000000, 0x00000000,
631 0x00000000, 0x00000000, 0x00000000, 0x00000000,
632 0x00000000, 0x00000000, 0x00000000, 0x00000000,
635 /* Packet types for ah */
636 static const u32 ice_ptypes_ah[] = {
637 0x00000000, 0x00000000, 0x00000000, 0x00000000,
638 0x00000000, 0x0000000C, 0x00000000, 0x00000000,
639 0x00000000, 0x00000000, 0x00000000, 0x00000000,
640 0x00000000, 0x00000000, 0x00000000, 0x00000000,
641 0x00000000, 0x00000000, 0x00000000, 0x00000000,
642 0x00000000, 0x00000000, 0x00000000, 0x00000000,
643 0x00000000, 0x00000000, 0x00000000, 0x00000000,
644 0x00000000, 0x00000000, 0x00000000, 0x00000000,
647 /* Packet types for packets with NAT_T ESP header */
648 static const u32 ice_ptypes_nat_t_esp[] = {
649 0x00000000, 0x00000000, 0x00000000, 0x00000000,
650 0x00000000, 0x00000030, 0x00000000, 0x00000000,
651 0x00000000, 0x00000000, 0x00000000, 0x00000000,
652 0x00000000, 0x00000000, 0x00000000, 0x00000000,
653 0x00000000, 0x00000000, 0x00000000, 0x00000000,
654 0x00000000, 0x00000000, 0x00000000, 0x00000000,
655 0x00000000, 0x00000000, 0x00000000, 0x00000000,
656 0x00000000, 0x00000000, 0x00000000, 0x00000000,
659 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
660 0x00000846, 0x00000000, 0x00000000, 0x00000000,
661 0x00000000, 0x00000000, 0x00000000, 0x00000000,
662 0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
663 0x00000000, 0x00000000, 0x00000000, 0x00000000,
664 0x00000000, 0x00000000, 0x00000000, 0x00000000,
665 0x00000000, 0x00000000, 0x00000000, 0x00000000,
666 0x00000000, 0x00000000, 0x00000000, 0x00000000,
667 0x00000000, 0x00000000, 0x00000000, 0x00000000,
670 /* Manage parameters and info. used during the creation of a flow profile */
671 struct ice_flow_prof_params {
673 u16 entry_length; /* # of bytes formatted entry will require */
675 struct ice_flow_prof *prof;
677 /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
678 * This will give us the direction flags.
680 struct ice_fv_word es[ICE_MAX_FV_WORDS];
681 /* attributes can be used to add attributes to a particular PTYPE */
682 const struct ice_ptype_attributes *attr;
685 u16 mask[ICE_MAX_FV_WORDS];
686 ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
689 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
690 (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
691 ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
692 ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
693 ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
694 ICE_FLOW_SEG_HDR_NAT_T_ESP)
696 #define ICE_FLOW_SEG_HDRS_L2_MASK \
697 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
698 #define ICE_FLOW_SEG_HDRS_L3_MASK \
699 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
700 ICE_FLOW_SEG_HDR_ARP)
701 #define ICE_FLOW_SEG_HDRS_L4_MASK \
702 (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
703 ICE_FLOW_SEG_HDR_SCTP)
704 /* mask for L4 protocols that are NOT part of IPV4/6 OTHER PTYPE groups */
705 #define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER \
706 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
709 * ice_flow_val_hdrs - validates packet segments for valid protocol headers
710 * @segs: array of one or more packet segments that describe the flow
711 * @segs_cnt: number of packet segments provided
713 static enum ice_status
714 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
718 for (i = 0; i < segs_cnt; i++) {
719 /* Multiple L3 headers */
720 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
721 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
722 return ICE_ERR_PARAM;
724 /* Multiple L4 headers */
725 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
726 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
727 return ICE_ERR_PARAM;
733 /* Sizes of fixed known protocol headers without header options */
734 #define ICE_FLOW_PROT_HDR_SZ_MAC 14
735 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
736 #define ICE_FLOW_PROT_HDR_SZ_IPV4 20
737 #define ICE_FLOW_PROT_HDR_SZ_IPV6 40
738 #define ICE_FLOW_PROT_HDR_SZ_ARP 28
739 #define ICE_FLOW_PROT_HDR_SZ_ICMP 8
740 #define ICE_FLOW_PROT_HDR_SZ_TCP 20
741 #define ICE_FLOW_PROT_HDR_SZ_UDP 8
742 #define ICE_FLOW_PROT_HDR_SZ_SCTP 12
745 * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
746 * @params: information about the flow to be processed
747 * @seg: index of packet segment whose header size is to be determined
749 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
754 sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
755 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
758 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
759 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
760 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
761 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
762 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
763 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
764 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
765 /* A L3 header is required if L4 is specified */
769 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
770 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
771 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
772 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
773 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
774 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
775 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
776 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
782 * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
783 * @params: information about the flow to be processed
785 * This function identifies the packet types associated with the protocol
786 * headers being present in packet segments of the specified flow profile.
788 static enum ice_status
789 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
791 struct ice_flow_prof *prof;
794 ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
799 for (i = 0; i < params->prof->segs_cnt; i++) {
800 const ice_bitmap_t *src;
803 hdrs = prof->segs[i].hdrs;
805 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
806 src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
807 (const ice_bitmap_t *)ice_ptypes_mac_il;
808 ice_and_bitmap(params->ptypes, params->ptypes, src,
812 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
813 src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
814 ice_and_bitmap(params->ptypes, params->ptypes, src,
818 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
819 ice_and_bitmap(params->ptypes, params->ptypes,
820 (const ice_bitmap_t *)ice_ptypes_arp_of,
824 if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
825 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
826 ice_and_bitmap(params->ptypes, params->ptypes, src,
829 if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
830 (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
832 (const ice_bitmap_t *)ice_ptypes_ipv4_il :
833 (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_all;
834 ice_and_bitmap(params->ptypes, params->ptypes, src,
836 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
837 (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
839 (const ice_bitmap_t *)ice_ptypes_ipv6_il :
840 (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_all;
841 ice_and_bitmap(params->ptypes, params->ptypes, src,
843 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
844 !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
845 src = !i ? (const ice_bitmap_t *)ice_ipv4_ofos_no_l4 :
846 (const ice_bitmap_t *)ice_ipv4_il_no_l4;
847 ice_and_bitmap(params->ptypes, params->ptypes, src,
849 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
850 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
851 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
852 ice_and_bitmap(params->ptypes, params->ptypes, src,
854 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
855 !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
856 src = !i ? (const ice_bitmap_t *)ice_ipv6_ofos_no_l4 :
857 (const ice_bitmap_t *)ice_ipv6_il_no_l4;
858 ice_and_bitmap(params->ptypes, params->ptypes, src,
860 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
861 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
862 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
863 ice_and_bitmap(params->ptypes, params->ptypes, src,
867 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
868 src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
869 ice_and_bitmap(params->ptypes, params->ptypes,
870 src, ICE_FLOW_PTYPE_MAX);
871 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
872 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
873 ice_and_bitmap(params->ptypes, params->ptypes, src,
876 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
877 ice_andnot_bitmap(params->ptypes, params->ptypes, src,
881 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
882 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
883 ice_and_bitmap(params->ptypes, params->ptypes, src,
885 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
886 ice_and_bitmap(params->ptypes, params->ptypes,
887 (const ice_bitmap_t *)ice_ptypes_tcp_il,
889 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
890 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
891 ice_and_bitmap(params->ptypes, params->ptypes, src,
895 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
896 src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
897 (const ice_bitmap_t *)ice_ptypes_icmp_il;
898 ice_and_bitmap(params->ptypes, params->ptypes, src,
900 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
902 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
903 ice_and_bitmap(params->ptypes, params->ptypes,
904 src, ICE_FLOW_PTYPE_MAX);
906 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
907 src = (const ice_bitmap_t *)ice_ptypes_gtpc;
908 ice_and_bitmap(params->ptypes, params->ptypes,
909 src, ICE_FLOW_PTYPE_MAX);
910 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
911 src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
912 ice_and_bitmap(params->ptypes, params->ptypes,
913 src, ICE_FLOW_PTYPE_MAX);
914 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
915 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
916 ice_and_bitmap(params->ptypes, params->ptypes,
917 src, ICE_FLOW_PTYPE_MAX);
919 /* Attributes for GTP packet with downlink */
920 params->attr = ice_attr_gtpu_down;
921 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
922 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
923 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
924 ice_and_bitmap(params->ptypes, params->ptypes,
925 src, ICE_FLOW_PTYPE_MAX);
927 /* Attributes for GTP packet with uplink */
928 params->attr = ice_attr_gtpu_up;
929 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
930 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
931 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
932 ice_and_bitmap(params->ptypes, params->ptypes,
933 src, ICE_FLOW_PTYPE_MAX);
935 /* Attributes for GTP packet with Extension Header */
936 params->attr = ice_attr_gtpu_eh;
937 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
938 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
939 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
940 ice_and_bitmap(params->ptypes, params->ptypes,
941 src, ICE_FLOW_PTYPE_MAX);
943 /* Attributes for GTP packet without Extension Header */
944 params->attr = ice_attr_gtpu_session;
945 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session);
946 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
947 src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
948 ice_and_bitmap(params->ptypes, params->ptypes,
949 src, ICE_FLOW_PTYPE_MAX);
950 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
951 src = (const ice_bitmap_t *)ice_ptypes_esp;
952 ice_and_bitmap(params->ptypes, params->ptypes,
953 src, ICE_FLOW_PTYPE_MAX);
954 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
955 src = (const ice_bitmap_t *)ice_ptypes_ah;
956 ice_and_bitmap(params->ptypes, params->ptypes,
957 src, ICE_FLOW_PTYPE_MAX);
958 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
959 src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
960 ice_and_bitmap(params->ptypes, params->ptypes,
961 src, ICE_FLOW_PTYPE_MAX);
964 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
965 if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
967 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
970 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
972 ice_and_bitmap(params->ptypes, params->ptypes,
973 src, ICE_FLOW_PTYPE_MAX);
975 src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
976 ice_andnot_bitmap(params->ptypes, params->ptypes,
977 src, ICE_FLOW_PTYPE_MAX);
979 src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
980 ice_andnot_bitmap(params->ptypes, params->ptypes,
981 src, ICE_FLOW_PTYPE_MAX);
989 * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
990 * @hw: pointer to the HW struct
991 * @params: information about the flow to be processed
992 * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
994 * This function will allocate an extraction sequence entries for a DWORD size
995 * chunk of the packet flags.
997 static enum ice_status
998 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
999 struct ice_flow_prof_params *params,
1000 enum ice_flex_mdid_pkt_flags flags)
1002 u8 fv_words = hw->blk[params->blk].es.fvw;
1005 /* Make sure the number of extraction sequence entries required does not
1006 * exceed the block's capacity.
1008 if (params->es_cnt >= fv_words)
1009 return ICE_ERR_MAX_LIMIT;
1011 /* some blocks require a reversed field vector layout */
1012 if (hw->blk[params->blk].es.reverse)
1013 idx = fv_words - params->es_cnt - 1;
1015 idx = params->es_cnt;
1017 params->es[idx].prot_id = ICE_PROT_META_ID;
1018 params->es[idx].off = flags;
1025 * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
1026 * @hw: pointer to the HW struct
1027 * @params: information about the flow to be processed
1028 * @seg: packet segment index of the field to be extracted
1029 * @fld: ID of field to be extracted
1030 * @match: bitfield of all fields
1032 * This function determines the protocol ID, offset, and size of the given
1033 * field. It then allocates one or more extraction sequence entries for the
1034 * given field, and fill the entries with protocol ID and offset information.
1036 static enum ice_status
1037 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
1038 u8 seg, enum ice_flow_field fld, u64 match)
1040 enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
1041 enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
1042 u8 fv_words = hw->blk[params->blk].es.fvw;
1043 struct ice_flow_fld_info *flds;
1044 u16 cnt, ese_bits, i;
1049 flds = params->prof->segs[seg].fields;
1052 case ICE_FLOW_FIELD_IDX_ETH_DA:
1053 case ICE_FLOW_FIELD_IDX_ETH_SA:
1054 case ICE_FLOW_FIELD_IDX_S_VLAN:
1055 case ICE_FLOW_FIELD_IDX_C_VLAN:
1056 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
1058 case ICE_FLOW_FIELD_IDX_ETH_TYPE:
1059 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
1061 case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
1062 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1064 case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
1065 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1067 case ICE_FLOW_FIELD_IDX_IPV4_TTL:
1068 case ICE_FLOW_FIELD_IDX_IPV4_PROT:
1069 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1071 /* TTL and PROT share the same extraction seq. entry.
1072 * Each is considered a sibling to the other in terms of sharing
1073 * the same extraction sequence entry.
1075 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
1076 sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
1077 else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
1078 sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
1080 /* If the sibling field is also included, that field's
1081 * mask needs to be included.
1083 if (match & BIT(sib))
1084 sib_mask = ice_flds_info[sib].mask;
1086 case ICE_FLOW_FIELD_IDX_IPV6_TTL:
1087 case ICE_FLOW_FIELD_IDX_IPV6_PROT:
1088 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1090 /* TTL and PROT share the same extraction seq. entry.
1091 * Each is considered a sibling to the other in terms of sharing
1092 * the same extraction sequence entry.
1094 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
1095 sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
1096 else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
1097 sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
1099 /* If the sibling field is also included, that field's
1100 * mask needs to be included.
1102 if (match & BIT(sib))
1103 sib_mask = ice_flds_info[sib].mask;
1105 case ICE_FLOW_FIELD_IDX_IPV4_SA:
1106 case ICE_FLOW_FIELD_IDX_IPV4_DA:
1107 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1109 case ICE_FLOW_FIELD_IDX_IPV6_SA:
1110 case ICE_FLOW_FIELD_IDX_IPV6_DA:
1111 case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
1112 case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
1113 case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
1114 case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
1115 case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
1116 case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
1117 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1119 case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
1120 case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
1121 case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
1122 prot_id = ICE_PROT_TCP_IL;
1124 case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
1125 case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
1126 prot_id = ICE_PROT_UDP_IL_OR_S;
1128 case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
1129 case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
1130 prot_id = ICE_PROT_SCTP_IL;
1132 case ICE_FLOW_FIELD_IDX_GTPC_TEID:
1133 case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
1134 case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
1135 case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
1136 case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
1137 case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
1138 /* GTP is accessed through UDP OF protocol */
1139 prot_id = ICE_PROT_UDP_OF;
1141 case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
1142 prot_id = ICE_PROT_PPPOE;
1144 case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1145 prot_id = ICE_PROT_UDP_IL_OR_S;
1147 case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1148 prot_id = ICE_PROT_L2TPV3;
1150 case ICE_FLOW_FIELD_IDX_ESP_SPI:
1151 prot_id = ICE_PROT_ESP_F;
1153 case ICE_FLOW_FIELD_IDX_AH_SPI:
1154 prot_id = ICE_PROT_ESP_2;
1156 case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1157 prot_id = ICE_PROT_UDP_IL_OR_S;
1159 case ICE_FLOW_FIELD_IDX_ARP_SIP:
1160 case ICE_FLOW_FIELD_IDX_ARP_DIP:
1161 case ICE_FLOW_FIELD_IDX_ARP_SHA:
1162 case ICE_FLOW_FIELD_IDX_ARP_DHA:
1163 case ICE_FLOW_FIELD_IDX_ARP_OP:
1164 prot_id = ICE_PROT_ARP_OF;
1166 case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1167 case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1168 /* ICMP type and code share the same extraction seq. entry */
1169 prot_id = (params->prof->segs[seg].hdrs &
1170 ICE_FLOW_SEG_HDR_IPV4) ?
1171 ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1172 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1173 ICE_FLOW_FIELD_IDX_ICMP_CODE :
1174 ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1176 case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1177 prot_id = ICE_PROT_GRE_OF;
1180 return ICE_ERR_NOT_IMPL;
1183 /* Each extraction sequence entry is a word in size, and extracts a
1184 * word-aligned offset from a protocol header.
1186 ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1188 flds[fld].xtrct.prot_id = prot_id;
1189 flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1190 ICE_FLOW_FV_EXTRACT_SZ;
1191 flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
1192 flds[fld].xtrct.idx = params->es_cnt;
1193 flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1195 /* Adjust the next field-entry index after accommodating the number of
1196 * entries this field consumes
1198 cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1199 ice_flds_info[fld].size, ese_bits);
1201 /* Fill in the extraction sequence entries needed for this field */
1202 off = flds[fld].xtrct.off;
1203 mask = flds[fld].xtrct.mask;
1204 for (i = 0; i < cnt; i++) {
1205 /* Only consume an extraction sequence entry if there is no
1206 * sibling field associated with this field or the sibling entry
1207 * already extracts the word shared with this field.
1209 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1210 flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1211 flds[sib].xtrct.off != off) {
1214 /* Make sure the number of extraction sequence required
1215 * does not exceed the block's capability
1217 if (params->es_cnt >= fv_words)
1218 return ICE_ERR_MAX_LIMIT;
1220 /* some blocks require a reversed field vector layout */
1221 if (hw->blk[params->blk].es.reverse)
1222 idx = fv_words - params->es_cnt - 1;
1224 idx = params->es_cnt;
1226 params->es[idx].prot_id = prot_id;
1227 params->es[idx].off = off;
1228 params->mask[idx] = mask | sib_mask;
1232 off += ICE_FLOW_FV_EXTRACT_SZ;
1239 * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1240 * @hw: pointer to the HW struct
1241 * @params: information about the flow to be processed
1242 * @seg: index of packet segment whose raw fields are to be be extracted
1244 static enum ice_status
1245 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1252 if (!params->prof->segs[seg].raws_cnt)
1255 if (params->prof->segs[seg].raws_cnt >
1256 ARRAY_SIZE(params->prof->segs[seg].raws))
1257 return ICE_ERR_MAX_LIMIT;
1259 /* Offsets within the segment headers are not supported */
1260 hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1262 return ICE_ERR_PARAM;
1264 fv_words = hw->blk[params->blk].es.fvw;
1266 for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1267 struct ice_flow_seg_fld_raw *raw;
1270 raw = ¶ms->prof->segs[seg].raws[i];
1272 /* Storing extraction information */
1273 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1274 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1275 ICE_FLOW_FV_EXTRACT_SZ;
1276 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1278 raw->info.xtrct.idx = params->es_cnt;
1280 /* Determine the number of field vector entries this raw field
1283 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1284 (raw->info.src.last * BITS_PER_BYTE),
1285 (ICE_FLOW_FV_EXTRACT_SZ *
1287 off = raw->info.xtrct.off;
1288 for (j = 0; j < cnt; j++) {
1291 /* Make sure the number of extraction sequence required
1292 * does not exceed the block's capability
1294 if (params->es_cnt >= hw->blk[params->blk].es.count ||
1295 params->es_cnt >= ICE_MAX_FV_WORDS)
1296 return ICE_ERR_MAX_LIMIT;
1298 /* some blocks require a reversed field vector layout */
1299 if (hw->blk[params->blk].es.reverse)
1300 idx = fv_words - params->es_cnt - 1;
1302 idx = params->es_cnt;
1304 params->es[idx].prot_id = raw->info.xtrct.prot_id;
1305 params->es[idx].off = off;
1307 off += ICE_FLOW_FV_EXTRACT_SZ;
1315 * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1316 * @hw: pointer to the HW struct
1317 * @params: information about the flow to be processed
1319 * This function iterates through all matched fields in the given segments, and
1320 * creates an extraction sequence for the fields.
1322 static enum ice_status
1323 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1324 struct ice_flow_prof_params *params)
1326 enum ice_status status = ICE_SUCCESS;
1329 /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1332 if (params->blk == ICE_BLK_ACL) {
1333 status = ice_flow_xtract_pkt_flags(hw, params,
1334 ICE_RX_MDID_PKT_FLAGS_15_0);
1339 for (i = 0; i < params->prof->segs_cnt; i++) {
1340 u64 match = params->prof->segs[i].match;
1341 enum ice_flow_field j;
1343 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1344 const u64 bit = BIT_ULL(j);
1347 status = ice_flow_xtract_fld(hw, params, i, j,
1355 /* Process raw matching bytes */
1356 status = ice_flow_xtract_raws(hw, params, i);
1365 * ice_flow_sel_acl_scen - returns the specific scenario
1366 * @hw: pointer to the hardware structure
1367 * @params: information about the flow to be processed
1369 * This function will return the specific scenario based on the
1370 * params passed to it
1372 static enum ice_status
1373 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1375 /* Find the best-fit scenario for the provided match width */
1376 struct ice_acl_scen *cand_scen = NULL, *scen;
1379 return ICE_ERR_DOES_NOT_EXIST;
1381 /* Loop through each scenario and match against the scenario width
1382 * to select the specific scenario
1384 LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1385 if (scen->eff_width >= params->entry_length &&
1386 (!cand_scen || cand_scen->eff_width > scen->eff_width))
1389 return ICE_ERR_DOES_NOT_EXIST;
1391 params->prof->cfg.scen = cand_scen;
1397 * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1398 * @params: information about the flow to be processed
1400 static enum ice_status
1401 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1403 u16 index, i, range_idx = 0;
1405 index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1407 for (i = 0; i < params->prof->segs_cnt; i++) {
1408 struct ice_flow_seg_info *seg = ¶ms->prof->segs[i];
1409 u64 match = seg->match;
1412 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1413 struct ice_flow_fld_info *fld;
1414 const u64 bit = BIT_ULL(j);
1419 fld = &seg->fields[j];
1420 fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1422 if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1423 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1425 /* Range checking only supported for single
1428 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1430 BITS_PER_BYTE * 2) > 1)
1431 return ICE_ERR_PARAM;
1433 /* Ranges must define low and high values */
1434 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1435 fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1436 return ICE_ERR_PARAM;
1438 fld->entry.val = range_idx++;
1440 /* Store adjusted byte-length of field for later
1441 * use, taking into account potential
1442 * non-byte-aligned displacement
1444 fld->entry.last = DIVIDE_AND_ROUND_UP
1445 (ice_flds_info[j].size +
1446 (fld->xtrct.disp % BITS_PER_BYTE),
1448 fld->entry.val = index;
1449 index += fld->entry.last;
1455 for (j = 0; j < seg->raws_cnt; j++) {
1456 struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1458 raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1459 raw->info.entry.val = index;
1460 raw->info.entry.last = raw->info.src.last;
1461 index += raw->info.entry.last;
1465 /* Currently only support using the byte selection base, which only
1466 * allows for an effective entry size of 30 bytes. Reject anything
1469 if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1470 return ICE_ERR_PARAM;
1472 /* Only 8 range checkers per profile, reject anything trying to use
1475 if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1476 return ICE_ERR_PARAM;
1478 /* Store # bytes required for entry for later use */
1479 params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1485 * ice_flow_proc_segs - process all packet segments associated with a profile
1486 * @hw: pointer to the HW struct
1487 * @params: information about the flow to be processed
1489 static enum ice_status
1490 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1492 enum ice_status status;
1494 status = ice_flow_proc_seg_hdrs(params);
1498 status = ice_flow_create_xtrct_seq(hw, params);
1502 switch (params->blk) {
1505 status = ICE_SUCCESS;
1508 status = ice_flow_acl_def_entry_frmt(params);
1511 status = ice_flow_sel_acl_scen(hw, params);
1516 return ICE_ERR_NOT_IMPL;
1522 #define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001
1523 #define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002
1524 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004
1527 * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1528 * @hw: pointer to the HW struct
1529 * @blk: classification stage
1530 * @dir: flow direction
1531 * @segs: array of one or more packet segments that describe the flow
1532 * @segs_cnt: number of packet segments provided
1533 * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1534 * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1536 static struct ice_flow_prof *
1537 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1538 enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1539 u8 segs_cnt, u16 vsi_handle, u32 conds)
1541 struct ice_flow_prof *p, *prof = NULL;
1543 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1544 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1545 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1546 segs_cnt && segs_cnt == p->segs_cnt) {
1549 /* Check for profile-VSI association if specified */
1550 if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1551 ice_is_vsi_valid(hw, vsi_handle) &&
1552 !ice_is_bit_set(p->vsis, vsi_handle))
1555 /* Protocol headers must be checked. Matched fields are
1556 * checked if specified.
1558 for (i = 0; i < segs_cnt; i++)
1559 if (segs[i].hdrs != p->segs[i].hdrs ||
1560 ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1561 segs[i].match != p->segs[i].match))
1564 /* A match is found if all segments are matched */
1565 if (i == segs_cnt) {
1570 ice_release_lock(&hw->fl_profs_locks[blk]);
1576 * ice_flow_find_prof - Look up a profile matching headers and matched fields
1577 * @hw: pointer to the HW struct
1578 * @blk: classification stage
1579 * @dir: flow direction
1580 * @segs: array of one or more packet segments that describe the flow
1581 * @segs_cnt: number of packet segments provided
1584 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1585 struct ice_flow_seg_info *segs, u8 segs_cnt)
1587 struct ice_flow_prof *p;
1589 p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1590 ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1592 return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1596 * ice_flow_find_prof_id - Look up a profile with given profile ID
1597 * @hw: pointer to the HW struct
1598 * @blk: classification stage
1599 * @prof_id: unique ID to identify this flow profile
1601 static struct ice_flow_prof *
1602 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1604 struct ice_flow_prof *p;
1606 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1607 if (p->id == prof_id)
1614 * ice_dealloc_flow_entry - Deallocate flow entry memory
1615 * @hw: pointer to the HW struct
1616 * @entry: flow entry to be removed
1619 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1625 ice_free(hw, entry->entry);
1627 if (entry->range_buf) {
1628 ice_free(hw, entry->range_buf);
1629 entry->range_buf = NULL;
1633 ice_free(hw, entry->acts);
1635 entry->acts_cnt = 0;
1638 ice_free(hw, entry);
1641 #define ICE_ACL_INVALID_SCEN 0x3f
1644 * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
1645 * @hw: pointer to the hardware structure
1646 * @prof: pointer to flow profile
1647 * @buf: destination buffer function writes partial extraction sequence to
1649 * returns ICE_SUCCESS if no PF is associated to the given profile
1650 * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
1651 * returns other error code for real error
1653 static enum ice_status
1654 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1655 struct ice_aqc_acl_prof_generic_frmt *buf)
1657 enum ice_status status;
1660 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1664 status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1668 /* If all PF's associated scenarios are all 0 or all
1669 * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1670 * not been configured yet.
1672 if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1673 buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1674 buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1675 buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1678 if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1679 buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1680 buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1681 buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1682 buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1683 buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1684 buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1685 buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1688 return ICE_ERR_IN_USE;
1692 * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
1693 * @hw: pointer to the hardware structure
1694 * @acts: array of actions to be performed on a match
1695 * @acts_cnt: number of actions
1697 static enum ice_status
1698 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1703 for (i = 0; i < acts_cnt; i++) {
1704 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1705 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1706 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1707 struct ice_acl_cntrs cntrs;
1708 enum ice_status status;
1710 cntrs.bank = 0; /* Only bank0 for the moment */
1712 LE16_TO_CPU(acts[i].data.acl_act.value);
1714 LE16_TO_CPU(acts[i].data.acl_act.value);
1716 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1717 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1719 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1721 status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1730 * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
1731 * @hw: pointer to the hardware structure
1732 * @prof: pointer to flow profile
1734 * Disassociate the scenario from the profile for the PF of the VSI.
1736 static enum ice_status
1737 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1739 struct ice_aqc_acl_prof_generic_frmt buf;
1740 enum ice_status status = ICE_SUCCESS;
1743 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1745 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1749 status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1753 /* Clear scenario for this PF */
1754 buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1755 status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1761 * ice_flow_rem_entry_sync - Remove a flow entry
1762 * @hw: pointer to the HW struct
1763 * @blk: classification stage
1764 * @entry: flow entry to be removed
1766 static enum ice_status
1767 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1768 struct ice_flow_entry *entry)
1771 return ICE_ERR_BAD_PTR;
1773 if (blk == ICE_BLK_ACL) {
1774 enum ice_status status;
1777 return ICE_ERR_BAD_PTR;
1779 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1780 entry->scen_entry_idx);
1784 /* Checks if we need to release an ACL counter. */
1785 if (entry->acts_cnt && entry->acts)
1786 ice_flow_acl_free_act_cntr(hw, entry->acts,
1790 LIST_DEL(&entry->l_entry);
1792 ice_dealloc_flow_entry(hw, entry);
1798 * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1799 * @hw: pointer to the HW struct
1800 * @blk: classification stage
1801 * @dir: flow direction
1802 * @prof_id: unique ID to identify this flow profile
1803 * @segs: array of one or more packet segments that describe the flow
1804 * @segs_cnt: number of packet segments provided
1805 * @acts: array of default actions
1806 * @acts_cnt: number of default actions
1807 * @prof: stores the returned flow profile added
1809 * Assumption: the caller has acquired the lock to the profile list
1811 static enum ice_status
1812 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1813 enum ice_flow_dir dir, u64 prof_id,
1814 struct ice_flow_seg_info *segs, u8 segs_cnt,
1815 struct ice_flow_action *acts, u8 acts_cnt,
1816 struct ice_flow_prof **prof)
1818 struct ice_flow_prof_params *params;
1819 enum ice_status status;
1822 if (!prof || (acts_cnt && !acts))
1823 return ICE_ERR_BAD_PTR;
1825 params = (struct ice_flow_prof_params *)ice_malloc(hw, sizeof(*params));
1827 return ICE_ERR_NO_MEMORY;
1829 params->prof = (struct ice_flow_prof *)
1830 ice_malloc(hw, sizeof(*params->prof));
1831 if (!params->prof) {
1832 status = ICE_ERR_NO_MEMORY;
1836 /* initialize extraction sequence to all invalid (0xff) */
1837 for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1838 params->es[i].prot_id = ICE_PROT_INVALID;
1839 params->es[i].off = ICE_FV_OFFSET_INVAL;
1843 params->prof->id = prof_id;
1844 params->prof->dir = dir;
1845 params->prof->segs_cnt = segs_cnt;
1847 /* Make a copy of the segments that need to be persistent in the flow
1850 for (i = 0; i < segs_cnt; i++)
1851 ice_memcpy(¶ms->prof->segs[i], &segs[i], sizeof(*segs),
1852 ICE_NONDMA_TO_NONDMA);
1854 /* Make a copy of the actions that need to be persistent in the flow
1858 params->prof->acts = (struct ice_flow_action *)
1859 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
1860 ICE_NONDMA_TO_NONDMA);
1862 if (!params->prof->acts) {
1863 status = ICE_ERR_NO_MEMORY;
1868 status = ice_flow_proc_segs(hw, params);
1870 ice_debug(hw, ICE_DBG_FLOW,
1871 "Error processing a flow's packet segments\n");
1875 /* Add a HW profile for this flow profile */
1876 status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
1877 params->attr, params->attr_cnt, params->es,
1880 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
1884 INIT_LIST_HEAD(¶ms->prof->entries);
1885 ice_init_lock(¶ms->prof->entries_lock);
1886 *prof = params->prof;
1890 if (params->prof->acts)
1891 ice_free(hw, params->prof->acts);
1892 ice_free(hw, params->prof);
1895 ice_free(hw, params);
1901 * ice_flow_rem_prof_sync - remove a flow profile
1902 * @hw: pointer to the hardware structure
1903 * @blk: classification stage
1904 * @prof: pointer to flow profile to remove
1906 * Assumption: the caller has acquired the lock to the profile list
1908 static enum ice_status
1909 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
1910 struct ice_flow_prof *prof)
1912 enum ice_status status;
1914 /* Remove all remaining flow entries before removing the flow profile */
1915 if (!LIST_EMPTY(&prof->entries)) {
1916 struct ice_flow_entry *e, *t;
1918 ice_acquire_lock(&prof->entries_lock);
1920 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
1922 status = ice_flow_rem_entry_sync(hw, blk, e);
1927 ice_release_lock(&prof->entries_lock);
1930 if (blk == ICE_BLK_ACL) {
1931 struct ice_aqc_acl_profile_ranges query_rng_buf;
1932 struct ice_aqc_acl_prof_generic_frmt buf;
1935 /* Disassociate the scenario from the profile for the PF */
1936 status = ice_flow_acl_disassoc_scen(hw, prof);
1940 /* Clear the range-checker if the profile ID is no longer
1943 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1944 if (status && status != ICE_ERR_IN_USE) {
1946 } else if (!status) {
1947 /* Clear the range-checker value for profile ID */
1948 ice_memset(&query_rng_buf, 0,
1949 sizeof(struct ice_aqc_acl_profile_ranges),
1952 status = ice_flow_get_hw_prof(hw, blk, prof->id,
1957 status = ice_prog_acl_prof_ranges(hw, prof_id,
1958 &query_rng_buf, NULL);
1964 /* Remove all hardware profiles associated with this flow profile */
1965 status = ice_rem_prof(hw, blk, prof->id);
1967 LIST_DEL(&prof->l_entry);
1968 ice_destroy_lock(&prof->entries_lock);
1970 ice_free(hw, prof->acts);
1978 * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
1979 * @buf: Destination buffer function writes partial xtrct sequence to
1980 * @info: Info about field
1983 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
1984 struct ice_flow_fld_info *info)
1989 src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
1990 info->xtrct.disp / BITS_PER_BYTE;
1991 dst = info->entry.val;
1992 for (i = 0; i < info->entry.last; i++)
1993 /* HW stores field vector words in LE, convert words back to BE
1994 * so constructed entries will end up in network order
1996 buf->byte_selection[dst++] = src++ ^ 1;
2000 * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
2001 * @hw: pointer to the hardware structure
2002 * @prof: pointer to flow profile
2004 static enum ice_status
2005 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
2007 struct ice_aqc_acl_prof_generic_frmt buf;
2008 struct ice_flow_fld_info *info;
2009 enum ice_status status;
2013 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2015 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2019 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2020 if (status && status != ICE_ERR_IN_USE)
2024 /* Program the profile dependent configuration. This is done
2025 * only once regardless of the number of PFs using that profile
2027 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2029 for (i = 0; i < prof->segs_cnt; i++) {
2030 struct ice_flow_seg_info *seg = &prof->segs[i];
2031 u64 match = seg->match;
2034 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
2035 const u64 bit = BIT_ULL(j);
2040 info = &seg->fields[j];
2042 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2043 buf.word_selection[info->entry.val] =
2046 ice_flow_acl_set_xtrct_seq_fld(&buf,
2052 for (j = 0; j < seg->raws_cnt; j++) {
2053 info = &seg->raws[j].info;
2054 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
2058 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
2059 ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
2063 /* Update the current PF */
2064 buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
2065 status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
2071 * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
2072 * @hw: pointer to the hardware structure
2073 * @blk: classification stage
2074 * @vsi_handle: software VSI handle
2075 * @vsig: target VSI group
2077 * Assumption: the caller has already verified that the VSI to
2078 * be added has the same characteristics as the VSIG and will
2079 * thereby have access to all resources added to that VSIG.
2082 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
2085 enum ice_status status;
2087 if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
2088 return ICE_ERR_PARAM;
2090 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2091 status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
2093 ice_release_lock(&hw->fl_profs_locks[blk]);
2099 * ice_flow_assoc_prof - associate a VSI with a flow profile
2100 * @hw: pointer to the hardware structure
2101 * @blk: classification stage
2102 * @prof: pointer to flow profile
2103 * @vsi_handle: software VSI handle
2105 * Assumption: the caller has acquired the lock to the profile list
2106 * and the software VSI handle has been validated
2108 static enum ice_status
2109 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
2110 struct ice_flow_prof *prof, u16 vsi_handle)
2112 enum ice_status status = ICE_SUCCESS;
2114 if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
2115 if (blk == ICE_BLK_ACL) {
2116 status = ice_flow_acl_set_xtrct_seq(hw, prof);
2120 status = ice_add_prof_id_flow(hw, blk,
2121 ice_get_hw_vsi_num(hw,
2125 ice_set_bit(vsi_handle, prof->vsis);
2127 ice_debug(hw, ICE_DBG_FLOW,
2128 "HW profile add failed, %d\n",
2136 * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
2137 * @hw: pointer to the hardware structure
2138 * @blk: classification stage
2139 * @prof: pointer to flow profile
2140 * @vsi_handle: software VSI handle
2142 * Assumption: the caller has acquired the lock to the profile list
2143 * and the software VSI handle has been validated
2145 static enum ice_status
2146 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
2147 struct ice_flow_prof *prof, u16 vsi_handle)
2149 enum ice_status status = ICE_SUCCESS;
2151 if (ice_is_bit_set(prof->vsis, vsi_handle)) {
2152 status = ice_rem_prof_id_flow(hw, blk,
2153 ice_get_hw_vsi_num(hw,
2157 ice_clear_bit(vsi_handle, prof->vsis);
2159 ice_debug(hw, ICE_DBG_FLOW,
2160 "HW profile remove failed, %d\n",
2168 * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2169 * @hw: pointer to the HW struct
2170 * @blk: classification stage
2171 * @dir: flow direction
2172 * @prof_id: unique ID to identify this flow profile
2173 * @segs: array of one or more packet segments that describe the flow
2174 * @segs_cnt: number of packet segments provided
2175 * @acts: array of default actions
2176 * @acts_cnt: number of default actions
2177 * @prof: stores the returned flow profile added
2180 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2181 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2182 struct ice_flow_action *acts, u8 acts_cnt,
2183 struct ice_flow_prof **prof)
2185 enum ice_status status;
2187 if (segs_cnt > ICE_FLOW_SEG_MAX)
2188 return ICE_ERR_MAX_LIMIT;
2191 return ICE_ERR_PARAM;
2194 return ICE_ERR_BAD_PTR;
2196 status = ice_flow_val_hdrs(segs, segs_cnt);
2200 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2202 status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2203 acts, acts_cnt, prof);
2205 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2207 ice_release_lock(&hw->fl_profs_locks[blk]);
2213 * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2214 * @hw: pointer to the HW struct
2215 * @blk: the block for which the flow profile is to be removed
2216 * @prof_id: unique ID of the flow profile to be removed
2219 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2221 struct ice_flow_prof *prof;
2222 enum ice_status status;
2224 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2226 prof = ice_flow_find_prof_id(hw, blk, prof_id);
2228 status = ICE_ERR_DOES_NOT_EXIST;
2232 /* prof becomes invalid after the call */
2233 status = ice_flow_rem_prof_sync(hw, blk, prof);
2236 ice_release_lock(&hw->fl_profs_locks[blk]);
2242 * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
2243 * @hw: pointer to the HW struct
2244 * @blk: classification stage
2245 * @prof_id: the profile ID handle
2246 * @hw_prof_id: pointer to variable to receive the HW profile ID
2249 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2252 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
2253 struct ice_prof_map *map;
2255 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
2256 map = ice_search_prof_id(hw, blk, prof_id);
2258 *hw_prof_id = map->prof_id;
2259 status = ICE_SUCCESS;
2261 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
2266 * ice_flow_find_entry - look for a flow entry using its unique ID
2267 * @hw: pointer to the HW struct
2268 * @blk: classification stage
2269 * @entry_id: unique ID to identify this flow entry
2271 * This function looks for the flow entry with the specified unique ID in all
2272 * flow profiles of the specified classification stage. If the entry is found,
2273 * and it returns the handle to the flow entry. Otherwise, it returns
2274 * ICE_FLOW_ENTRY_ID_INVAL.
2276 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2278 struct ice_flow_entry *found = NULL;
2279 struct ice_flow_prof *p;
2281 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2283 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2284 struct ice_flow_entry *e;
2286 ice_acquire_lock(&p->entries_lock);
2287 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2288 if (e->id == entry_id) {
2292 ice_release_lock(&p->entries_lock);
2298 ice_release_lock(&hw->fl_profs_locks[blk]);
2300 return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2304 * ice_flow_acl_check_actions - Checks the ACL rule's actions
2305 * @hw: pointer to the hardware structure
2306 * @acts: array of actions to be performed on a match
2307 * @acts_cnt: number of actions
2308 * @cnt_alloc: indicates if an ACL counter has been allocated.
2310 static enum ice_status
2311 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2312 u8 acts_cnt, bool *cnt_alloc)
2314 ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2317 ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2320 if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2321 return ICE_ERR_OUT_OF_RANGE;
2323 for (i = 0; i < acts_cnt; i++) {
2324 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2325 acts[i].type != ICE_FLOW_ACT_DROP &&
2326 acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2327 acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2330 /* If the caller want to add two actions of the same type, then
2331 * it is considered invalid configuration.
2333 if (ice_test_and_set_bit(acts[i].type, dup_check))
2334 return ICE_ERR_PARAM;
2337 /* Checks if ACL counters are needed. */
2338 for (i = 0; i < acts_cnt; i++) {
2339 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2340 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2341 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2342 struct ice_acl_cntrs cntrs;
2343 enum ice_status status;
2346 cntrs.bank = 0; /* Only bank0 for the moment */
2348 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2349 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2351 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2353 status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2356 /* Counter index within the bank */
2357 acts[i].data.acl_act.value =
2358 CPU_TO_LE16(cntrs.first_cntr);
2367 * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2368 * @fld: number of the given field
2369 * @info: info about field
2370 * @range_buf: range checker configuration buffer
2371 * @data: pointer to a data buffer containing flow entry's match values/masks
2372 * @range: Input/output param indicating which range checkers are being used
2375 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2376 struct ice_aqc_acl_profile_ranges *range_buf,
2377 u8 *data, u8 *range)
2381 /* If not specified, default mask is all bits in field */
2382 new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2383 BIT(ice_flds_info[fld].size) - 1 :
2384 (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2386 /* If the mask is 0, then we don't need to worry about this input
2387 * range checker value.
2391 (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2393 (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2394 u8 range_idx = info->entry.val;
2396 range_buf->checker_cfg[range_idx].low_boundary =
2397 CPU_TO_BE16(new_low);
2398 range_buf->checker_cfg[range_idx].high_boundary =
2399 CPU_TO_BE16(new_high);
2400 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2402 /* Indicate which range checker is being used */
2403 *range |= BIT(range_idx);
2408 * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2409 * @fld: number of the given field
2410 * @info: info about the field
2411 * @buf: buffer containing the entry
2412 * @dontcare: buffer containing don't care mask for entry
2413 * @data: pointer to a data buffer containing flow entry's match values/masks
2416 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2417 u8 *dontcare, u8 *data)
2419 u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2420 bool use_mask = false;
2423 src = info->src.val;
2424 mask = info->src.mask;
2425 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2426 disp = info->xtrct.disp % BITS_PER_BYTE;
2428 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2431 for (k = 0; k < info->entry.last; k++, dst++) {
2432 /* Add overflow bits from previous byte */
2433 buf[dst] = (tmp_s & 0xff00) >> 8;
2435 /* If mask is not valid, tmp_m is always zero, so just setting
2436 * dontcare to 0 (no masked bits). If mask is valid, pulls in
2437 * overflow bits of mask from prev byte
2439 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2441 /* If there is displacement, last byte will only contain
2442 * displaced data, but there is no more data to read from user
2443 * buffer, so skip so as not to potentially read beyond end of
2446 if (!disp || k < info->entry.last - 1) {
2447 /* Store shifted data to use in next byte */
2448 tmp_s = data[src++] << disp;
2450 /* Add current (shifted) byte */
2451 buf[dst] |= tmp_s & 0xff;
2453 /* Handle mask if valid */
2455 tmp_m = (~data[mask++] & 0xff) << disp;
2456 dontcare[dst] |= tmp_m & 0xff;
2461 /* Fill in don't care bits at beginning of field */
2463 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2464 for (k = 0; k < disp; k++)
2465 dontcare[dst] |= BIT(k);
2468 end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2470 /* Fill in don't care bits at end of field */
2472 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2473 info->entry.last - 1;
2474 for (k = end_disp; k < BITS_PER_BYTE; k++)
2475 dontcare[dst] |= BIT(k);
2480 * ice_flow_acl_frmt_entry - Format ACL entry
2481 * @hw: pointer to the hardware structure
2482 * @prof: pointer to flow profile
2483 * @e: pointer to the flow entry
2484 * @data: pointer to a data buffer containing flow entry's match values/masks
2485 * @acts: array of actions to be performed on a match
2486 * @acts_cnt: number of actions
2488 * Formats the key (and key_inverse) to be matched from the data passed in,
2489 * along with data from the flow profile. This key/key_inverse pair makes up
2490 * the 'entry' for an ACL flow entry.
2492 static enum ice_status
2493 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2494 struct ice_flow_entry *e, u8 *data,
2495 struct ice_flow_action *acts, u8 acts_cnt)
2497 u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2498 struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2499 enum ice_status status;
2504 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2508 /* Format the result action */
2510 status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2514 status = ICE_ERR_NO_MEMORY;
2516 e->acts = (struct ice_flow_action *)
2517 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2518 ICE_NONDMA_TO_NONDMA);
2523 e->acts_cnt = acts_cnt;
2525 /* Format the matching data */
2526 buf_sz = prof->cfg.scen->width;
2527 buf = (u8 *)ice_malloc(hw, buf_sz);
2531 dontcare = (u8 *)ice_malloc(hw, buf_sz);
2535 /* 'key' buffer will store both key and key_inverse, so must be twice
2538 key = (u8 *)ice_malloc(hw, buf_sz * 2);
2542 range_buf = (struct ice_aqc_acl_profile_ranges *)
2543 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2547 /* Set don't care mask to all 1's to start, will zero out used bytes */
2548 ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2550 for (i = 0; i < prof->segs_cnt; i++) {
2551 struct ice_flow_seg_info *seg = &prof->segs[i];
2552 u64 match = seg->match;
2555 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
2556 struct ice_flow_fld_info *info;
2557 const u64 bit = BIT_ULL(j);
2562 info = &seg->fields[j];
2564 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2565 ice_flow_acl_frmt_entry_range(j, info,
2569 ice_flow_acl_frmt_entry_fld(j, info, buf,
2575 for (j = 0; j < seg->raws_cnt; j++) {
2576 struct ice_flow_fld_info *info = &seg->raws[j].info;
2577 u16 dst, src, mask, k;
2578 bool use_mask = false;
2580 src = info->src.val;
2581 dst = info->entry.val -
2582 ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2583 mask = info->src.mask;
2585 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2588 for (k = 0; k < info->entry.last; k++, dst++) {
2589 buf[dst] = data[src++];
2591 dontcare[dst] = ~data[mask++];
2598 buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2599 dontcare[prof->cfg.scen->pid_idx] = 0;
2601 /* Format the buffer for direction flags */
2602 dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2604 if (prof->dir == ICE_FLOW_RX)
2605 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2608 buf[prof->cfg.scen->rng_chk_idx] = range;
2609 /* Mark any unused range checkers as don't care */
2610 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2611 e->range_buf = range_buf;
2613 ice_free(hw, range_buf);
2616 status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2622 e->entry_sz = buf_sz * 2;
2629 ice_free(hw, dontcare);
2634 if (status && range_buf) {
2635 ice_free(hw, range_buf);
2636 e->range_buf = NULL;
2639 if (status && e->acts) {
2640 ice_free(hw, e->acts);
2645 if (status && cnt_alloc)
2646 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2652 * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2653 * the compared data.
2654 * @prof: pointer to flow profile
2655 * @e: pointer to the comparing flow entry
2656 * @do_chg_action: decide if we want to change the ACL action
2657 * @do_add_entry: decide if we want to add the new ACL entry
2658 * @do_rem_entry: decide if we want to remove the current ACL entry
2660 * Find an ACL scenario entry that matches the compared data. In the same time,
2661 * this function also figure out:
2662 * a/ If we want to change the ACL action
2663 * b/ If we want to add the new ACL entry
2664 * c/ If we want to remove the current ACL entry
2666 static struct ice_flow_entry *
2667 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2668 struct ice_flow_entry *e, bool *do_chg_action,
2669 bool *do_add_entry, bool *do_rem_entry)
2671 struct ice_flow_entry *p, *return_entry = NULL;
2675 * a/ There exists an entry with same matching data, but different
2676 * priority, then we remove this existing ACL entry. Then, we
2677 * will add the new entry to the ACL scenario.
2678 * b/ There exists an entry with same matching data, priority, and
2679 * result action, then we do nothing
2680 * c/ There exists an entry with same matching data, priority, but
2681 * different, action, then do only change the action's entry.
2682 * d/ Else, we add this new entry to the ACL scenario.
2684 *do_chg_action = false;
2685 *do_add_entry = true;
2686 *do_rem_entry = false;
2687 LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2688 if (memcmp(p->entry, e->entry, p->entry_sz))
2691 /* From this point, we have the same matching_data. */
2692 *do_add_entry = false;
2695 if (p->priority != e->priority) {
2696 /* matching data && !priority */
2697 *do_add_entry = true;
2698 *do_rem_entry = true;
2702 /* From this point, we will have matching_data && priority */
2703 if (p->acts_cnt != e->acts_cnt)
2704 *do_chg_action = true;
2705 for (i = 0; i < p->acts_cnt; i++) {
2706 bool found_not_match = false;
2708 for (j = 0; j < e->acts_cnt; j++)
2709 if (memcmp(&p->acts[i], &e->acts[j],
2710 sizeof(struct ice_flow_action))) {
2711 found_not_match = true;
2715 if (found_not_match) {
2716 *do_chg_action = true;
2721 /* (do_chg_action = true) means :
2722 * matching_data && priority && !result_action
2723 * (do_chg_action = false) means :
2724 * matching_data && priority && result_action
2729 return return_entry;
2733 * ice_flow_acl_convert_to_acl_prior - Convert to ACL priority
2736 static enum ice_acl_entry_prior
2737 ice_flow_acl_convert_to_acl_prior(enum ice_flow_priority p)
2739 enum ice_acl_entry_prior acl_prior;
2742 case ICE_FLOW_PRIO_LOW:
2743 acl_prior = ICE_LOW;
2745 case ICE_FLOW_PRIO_NORMAL:
2746 acl_prior = ICE_NORMAL;
2748 case ICE_FLOW_PRIO_HIGH:
2749 acl_prior = ICE_HIGH;
2752 acl_prior = ICE_NORMAL;
2760 * ice_flow_acl_union_rng_chk - Perform union operation between two
2761 * range-range checker buffers
2762 * @dst_buf: pointer to destination range checker buffer
2763 * @src_buf: pointer to source range checker buffer
2765 * For this function, we do the union between dst_buf and src_buf
2766 * range checker buffer, and we will save the result back to dst_buf
2768 static enum ice_status
2769 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2770 struct ice_aqc_acl_profile_ranges *src_buf)
2774 if (!dst_buf || !src_buf)
2775 return ICE_ERR_BAD_PTR;
2777 for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2778 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2779 bool will_populate = false;
2781 in_data = &src_buf->checker_cfg[i];
2786 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2787 cfg_data = &dst_buf->checker_cfg[j];
2789 if (!cfg_data->mask ||
2790 !memcmp(cfg_data, in_data,
2791 sizeof(struct ice_acl_rng_data))) {
2792 will_populate = true;
2797 if (will_populate) {
2798 ice_memcpy(cfg_data, in_data,
2799 sizeof(struct ice_acl_rng_data),
2800 ICE_NONDMA_TO_NONDMA);
2802 /* No available slot left to program range checker */
2803 return ICE_ERR_MAX_LIMIT;
2811 * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2812 * @hw: pointer to the hardware structure
2813 * @prof: pointer to flow profile
2814 * @entry: double pointer to the flow entry
2816 * For this function, we will look at the current added entries in the
2817 * corresponding ACL scenario. Then, we will perform matching logic to
2818 * see if we want to add/modify/do nothing with this new entry.
2820 static enum ice_status
2821 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2822 struct ice_flow_entry **entry)
2824 bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2825 struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2826 struct ice_acl_act_entry *acts = NULL;
2827 struct ice_flow_entry *exist;
2828 enum ice_status status = ICE_SUCCESS;
2829 struct ice_flow_entry *e;
2832 if (!entry || !(*entry) || !prof)
2833 return ICE_ERR_BAD_PTR;
2837 do_chg_rng_chk = false;
2841 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2846 /* Query the current range-checker value in FW */
2847 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2851 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2852 sizeof(struct ice_aqc_acl_profile_ranges),
2853 ICE_NONDMA_TO_NONDMA);
2855 /* Generate the new range-checker value */
2856 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2860 /* Reconfigure the range check if the buffer is changed. */
2861 do_chg_rng_chk = false;
2862 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2863 sizeof(struct ice_aqc_acl_profile_ranges))) {
2864 status = ice_prog_acl_prof_ranges(hw, prof_id,
2865 &cfg_rng_buf, NULL);
2869 do_chg_rng_chk = true;
2873 /* Figure out if we want to (change the ACL action) and/or
2874 * (Add the new ACL entry) and/or (Remove the current ACL entry)
2876 exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
2877 &do_add_entry, &do_rem_entry);
2880 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
2885 /* Prepare the result action buffer */
2886 acts = (struct ice_acl_act_entry *)ice_calloc
2887 (hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
2888 for (i = 0; i < e->acts_cnt; i++)
2889 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
2890 sizeof(struct ice_acl_act_entry),
2891 ICE_NONDMA_TO_NONDMA);
2894 enum ice_acl_entry_prior prior;
2898 keys = (u8 *)e->entry;
2899 inverts = keys + (e->entry_sz / 2);
2900 prior = ice_flow_acl_convert_to_acl_prior(e->priority);
2902 status = ice_acl_add_entry(hw, prof->cfg.scen, prior, keys,
2903 inverts, acts, e->acts_cnt,
2908 e->scen_entry_idx = entry_idx;
2909 LIST_ADD(&e->l_entry, &prof->entries);
2911 if (do_chg_action) {
2912 /* For the action memory info, update the SW's copy of
2913 * exist entry with e's action memory info
2915 ice_free(hw, exist->acts);
2916 exist->acts_cnt = e->acts_cnt;
2917 exist->acts = (struct ice_flow_action *)
2918 ice_calloc(hw, exist->acts_cnt,
2919 sizeof(struct ice_flow_action));
2922 status = ICE_ERR_NO_MEMORY;
2926 ice_memcpy(exist->acts, e->acts,
2927 sizeof(struct ice_flow_action) * e->acts_cnt,
2928 ICE_NONDMA_TO_NONDMA);
2930 status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
2932 exist->scen_entry_idx);
2937 if (do_chg_rng_chk) {
2938 /* In this case, we want to update the range checker
2939 * information of the exist entry
2941 status = ice_flow_acl_union_rng_chk(exist->range_buf,
2947 /* As we don't add the new entry to our SW DB, deallocate its
2948 * memories, and return the exist entry to the caller
2950 ice_dealloc_flow_entry(hw, e);
2961 * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
2962 * @hw: pointer to the hardware structure
2963 * @prof: pointer to flow profile
2964 * @e: double pointer to the flow entry
2966 static enum ice_status
2967 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2968 struct ice_flow_entry **e)
2970 enum ice_status status;
2972 ice_acquire_lock(&prof->entries_lock);
2973 status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
2974 ice_release_lock(&prof->entries_lock);
2980 * ice_flow_add_entry - Add a flow entry
2981 * @hw: pointer to the HW struct
2982 * @blk: classification stage
2983 * @prof_id: ID of the profile to add a new flow entry to
2984 * @entry_id: unique ID to identify this flow entry
2985 * @vsi_handle: software VSI handle for the flow entry
2986 * @prio: priority of the flow entry
2987 * @data: pointer to a data buffer containing flow entry's match values/masks
2988 * @acts: arrays of actions to be performed on a match
2989 * @acts_cnt: number of actions
2990 * @entry_h: pointer to buffer that receives the new flow entry's handle
2993 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2994 u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
2995 void *data, struct ice_flow_action *acts, u8 acts_cnt,
2998 struct ice_flow_entry *e = NULL;
2999 struct ice_flow_prof *prof;
3000 enum ice_status status = ICE_SUCCESS;
3002 /* ACL entries must indicate an action */
3003 if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
3004 return ICE_ERR_PARAM;
3006 /* No flow entry data is expected for RSS */
3007 if (!entry_h || (!data && blk != ICE_BLK_RSS))
3008 return ICE_ERR_BAD_PTR;
3010 if (!ice_is_vsi_valid(hw, vsi_handle))
3011 return ICE_ERR_PARAM;
3013 ice_acquire_lock(&hw->fl_profs_locks[blk]);
3015 prof = ice_flow_find_prof_id(hw, blk, prof_id);
3017 status = ICE_ERR_DOES_NOT_EXIST;
3019 /* Allocate memory for the entry being added and associate
3020 * the VSI to the found flow profile
3022 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
3024 status = ICE_ERR_NO_MEMORY;
3026 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3029 ice_release_lock(&hw->fl_profs_locks[blk]);
3034 e->vsi_handle = vsi_handle;
3043 /* ACL will handle the entry management */
3044 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
3049 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
3055 status = ICE_ERR_NOT_IMPL;
3059 if (blk != ICE_BLK_ACL) {
3060 /* ACL will handle the entry management */
3061 ice_acquire_lock(&prof->entries_lock);
3062 LIST_ADD(&e->l_entry, &prof->entries);
3063 ice_release_lock(&prof->entries_lock);
3066 *entry_h = ICE_FLOW_ENTRY_HNDL(e);
3071 ice_free(hw, e->entry);
3079 * ice_flow_rem_entry - Remove a flow entry
3080 * @hw: pointer to the HW struct
3081 * @blk: classification stage
3082 * @entry_h: handle to the flow entry to be removed
3084 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
3087 struct ice_flow_entry *entry;
3088 struct ice_flow_prof *prof;
3089 enum ice_status status = ICE_SUCCESS;
3091 if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
3092 return ICE_ERR_PARAM;
3094 entry = ICE_FLOW_ENTRY_PTR((unsigned long)entry_h);
3096 /* Retain the pointer to the flow profile as the entry will be freed */
3100 ice_acquire_lock(&prof->entries_lock);
3101 status = ice_flow_rem_entry_sync(hw, blk, entry);
3102 ice_release_lock(&prof->entries_lock);
3109 * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
3110 * @seg: packet segment the field being set belongs to
3111 * @fld: field to be set
3112 * @field_type: type of the field
3113 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3114 * entry's input buffer
3115 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3117 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3118 * entry's input buffer
3120 * This helper function stores information of a field being matched, including
3121 * the type of the field and the locations of the value to match, the mask, and
3122 * and the upper-bound value in the start of the input buffer for a flow entry.
3123 * This function should only be used for fixed-size data structures.
3125 * This function also opportunistically determines the protocol headers to be
3126 * present based on the fields being set. Some fields cannot be used alone to
3127 * determine the protocol headers present. Sometimes, fields for particular
3128 * protocol headers are not matched. In those cases, the protocol headers
3129 * must be explicitly set.
3132 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3133 enum ice_flow_fld_match_type field_type, u16 val_loc,
3134 u16 mask_loc, u16 last_loc)
3136 u64 bit = BIT_ULL(fld);
3139 if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
3142 seg->fields[fld].type = field_type;
3143 seg->fields[fld].src.val = val_loc;
3144 seg->fields[fld].src.mask = mask_loc;
3145 seg->fields[fld].src.last = last_loc;
3147 ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
3151 * ice_flow_set_fld - specifies locations of field from entry's input buffer
3152 * @seg: packet segment the field being set belongs to
3153 * @fld: field to be set
3154 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3155 * entry's input buffer
3156 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3158 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3159 * entry's input buffer
3160 * @range: indicate if field being matched is to be in a range
3162 * This function specifies the locations, in the form of byte offsets from the
3163 * start of the input buffer for a flow entry, from where the value to match,
3164 * the mask value, and upper value can be extracted. These locations are then
3165 * stored in the flow profile. When adding a flow entry associated with the
3166 * flow profile, these locations will be used to quickly extract the values and
3167 * create the content of a match entry. This function should only be used for
3168 * fixed-size data structures.
3171 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3172 u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3174 enum ice_flow_fld_match_type t = range ?
3175 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3177 ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3181 * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3182 * @seg: packet segment the field being set belongs to
3183 * @fld: field to be set
3184 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3185 * entry's input buffer
3186 * @pref_loc: location of prefix value from entry's input buffer
3187 * @pref_sz: size of the location holding the prefix value
3189 * This function specifies the locations, in the form of byte offsets from the
3190 * start of the input buffer for a flow entry, from where the value to match
3191 * and the IPv4 prefix value can be extracted. These locations are then stored
3192 * in the flow profile. When adding flow entries to the associated flow profile,
3193 * these locations can be used to quickly extract the values to create the
3194 * content of a match entry. This function should only be used for fixed-size
3198 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3199 u16 val_loc, u16 pref_loc, u8 pref_sz)
3201 /* For this type of field, the "mask" location is for the prefix value's
3202 * location and the "last" location is for the size of the location of
3205 ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3206 pref_loc, (u16)pref_sz);
3210 * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3211 * @seg: packet segment the field being set belongs to
3212 * @off: offset of the raw field from the beginning of the segment in bytes
3213 * @len: length of the raw pattern to be matched
3214 * @val_loc: location of the value to match from entry's input buffer
3215 * @mask_loc: location of mask value from entry's input buffer
3217 * This function specifies the offset of the raw field to be match from the
3218 * beginning of the specified packet segment, and the locations, in the form of
3219 * byte offsets from the start of the input buffer for a flow entry, from where
3220 * the value to match and the mask value to be extracted. These locations are
3221 * then stored in the flow profile. When adding flow entries to the associated
3222 * flow profile, these locations can be used to quickly extract the values to
3223 * create the content of a match entry. This function should only be used for
3224 * fixed-size data structures.
3227 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3228 u16 val_loc, u16 mask_loc)
3230 if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3231 seg->raws[seg->raws_cnt].off = off;
3232 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3233 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3234 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3235 /* The "last" field is used to store the length of the field */
3236 seg->raws[seg->raws_cnt].info.src.last = len;
3239 /* Overflows of "raws" will be handled as an error condition later in
3240 * the flow when this information is processed.
3245 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3246 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
3248 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3249 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3251 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3252 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
3253 ICE_FLOW_SEG_HDR_SCTP)
3255 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3256 (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3257 ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3258 ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3261 * ice_flow_set_rss_seg_info - setup packet segments for RSS
3262 * @segs: pointer to the flow field segment(s)
3263 * @hash_fields: fields to be hashed on for the segment(s)
3264 * @flow_hdr: protocol header fields within a packet segment
3266 * Helper function to extract fields from hash bitmap and use flow
3267 * header value to set flow field segment for further use in flow
3268 * profile entry or removal.
3270 static enum ice_status
3271 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
3274 u64 val = hash_fields;
3277 for (i = 0; val && i < ICE_FLOW_FIELD_IDX_MAX; i++) {
3278 u64 bit = BIT_ULL(i);
3281 ice_flow_set_fld(segs, (enum ice_flow_field)i,
3282 ICE_FLOW_FLD_OFF_INVAL,
3283 ICE_FLOW_FLD_OFF_INVAL,
3284 ICE_FLOW_FLD_OFF_INVAL, false);
3288 ICE_FLOW_SET_HDRS(segs, flow_hdr);
3290 if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3291 ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER)
3292 return ICE_ERR_PARAM;
3294 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3295 if (val && !ice_is_pow2(val))
3298 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3299 if (val && !ice_is_pow2(val))
3306 * ice_rem_vsi_rss_list - remove VSI from RSS list
3307 * @hw: pointer to the hardware structure
3308 * @vsi_handle: software VSI handle
3310 * Remove the VSI from all RSS configurations in the list.
3312 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3314 struct ice_rss_cfg *r, *tmp;
3316 if (LIST_EMPTY(&hw->rss_list_head))
3319 ice_acquire_lock(&hw->rss_locks);
3320 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3321 ice_rss_cfg, l_entry)
3322 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3323 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3324 LIST_DEL(&r->l_entry);
3327 ice_release_lock(&hw->rss_locks);
3331 * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3332 * @hw: pointer to the hardware structure
3333 * @vsi_handle: software VSI handle
3335 * This function will iterate through all flow profiles and disassociate
3336 * the VSI from that profile. If the flow profile has no VSIs it will
3339 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3341 const enum ice_block blk = ICE_BLK_RSS;
3342 struct ice_flow_prof *p, *t;
3343 enum ice_status status = ICE_SUCCESS;
3345 if (!ice_is_vsi_valid(hw, vsi_handle))
3346 return ICE_ERR_PARAM;
3348 if (LIST_EMPTY(&hw->fl_profs[blk]))
3351 ice_acquire_lock(&hw->rss_locks);
3352 LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3354 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3355 status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3359 if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3360 status = ice_flow_rem_prof(hw, blk, p->id);
3365 ice_release_lock(&hw->rss_locks);
3371 * ice_rem_rss_list - remove RSS configuration from list
3372 * @hw: pointer to the hardware structure
3373 * @vsi_handle: software VSI handle
3374 * @prof: pointer to flow profile
3376 * Assumption: lock has already been acquired for RSS list
3379 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3381 struct ice_rss_cfg *r, *tmp;
3383 /* Search for RSS hash fields associated to the VSI that match the
3384 * hash configurations associated to the flow profile. If found
3385 * remove from the RSS entry list of the VSI context and delete entry.
3387 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3388 ice_rss_cfg, l_entry)
3389 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3390 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3391 ice_clear_bit(vsi_handle, r->vsis);
3392 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3393 LIST_DEL(&r->l_entry);
3401 * ice_add_rss_list - add RSS configuration to list
3402 * @hw: pointer to the hardware structure
3403 * @vsi_handle: software VSI handle
3404 * @prof: pointer to flow profile
3406 * Assumption: lock has already been acquired for RSS list
3408 static enum ice_status
3409 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3411 struct ice_rss_cfg *r, *rss_cfg;
3413 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3414 ice_rss_cfg, l_entry)
3415 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3416 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3417 ice_set_bit(vsi_handle, r->vsis);
3421 rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3423 return ICE_ERR_NO_MEMORY;
3425 rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
3426 rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
3427 rss_cfg->symm = prof->cfg.symm;
3428 ice_set_bit(vsi_handle, rss_cfg->vsis);
3430 LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3435 #define ICE_FLOW_PROF_HASH_S 0
3436 #define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3437 #define ICE_FLOW_PROF_HDR_S 32
3438 #define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3439 #define ICE_FLOW_PROF_ENCAP_S 63
3440 #define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
3442 #define ICE_RSS_OUTER_HEADERS 1
3443 #define ICE_RSS_INNER_HEADERS 2
3445 /* Flow profile ID format:
3446 * [0:31] - Packet match fields
3447 * [32:62] - Protocol header
3448 * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
3450 #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
3451 (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3452 (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3453 ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
3456 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3458 u32 s = ((src % 4) << 3); /* byte shift */
3459 u32 v = dst | 0x80; /* value to program */
3460 u8 i = src / 4; /* register index */
3463 reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3464 reg = (reg & ~(0xff << s)) | (v << s);
3465 wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3469 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3472 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3475 for (i = 0; i < len; i++) {
3476 ice_rss_config_xor_word(hw, prof_id,
3477 /* Yes, field vector in GLQF_HSYMM and
3478 * GLQF_HINSET is inversed!
3480 fv_last_word - (src + i),
3481 fv_last_word - (dst + i));
3482 ice_rss_config_xor_word(hw, prof_id,
3483 fv_last_word - (dst + i),
3484 fv_last_word - (src + i));
3489 ice_rss_update_symm(struct ice_hw *hw,
3490 struct ice_flow_prof *prof)
3492 struct ice_prof_map *map;
3495 ice_acquire_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3496 map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3498 prof_id = map->prof_id;
3499 ice_release_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3502 /* clear to default */
3503 for (m = 0; m < 6; m++)
3504 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3505 if (prof->cfg.symm) {
3506 struct ice_flow_seg_info *seg =
3507 &prof->segs[prof->segs_cnt - 1];
3509 struct ice_flow_seg_xtrct *ipv4_src =
3510 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3511 struct ice_flow_seg_xtrct *ipv4_dst =
3512 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3513 struct ice_flow_seg_xtrct *ipv6_src =
3514 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3515 struct ice_flow_seg_xtrct *ipv6_dst =
3516 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3518 struct ice_flow_seg_xtrct *tcp_src =
3519 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3520 struct ice_flow_seg_xtrct *tcp_dst =
3521 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3523 struct ice_flow_seg_xtrct *udp_src =
3524 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3525 struct ice_flow_seg_xtrct *udp_dst =
3526 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3528 struct ice_flow_seg_xtrct *sctp_src =
3529 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3530 struct ice_flow_seg_xtrct *sctp_dst =
3531 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3534 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3535 ice_rss_config_xor(hw, prof_id,
3536 ipv4_src->idx, ipv4_dst->idx, 2);
3539 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3540 ice_rss_config_xor(hw, prof_id,
3541 ipv6_src->idx, ipv6_dst->idx, 8);
3544 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3545 ice_rss_config_xor(hw, prof_id,
3546 tcp_src->idx, tcp_dst->idx, 1);
3549 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3550 ice_rss_config_xor(hw, prof_id,
3551 udp_src->idx, udp_dst->idx, 1);
3554 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3555 ice_rss_config_xor(hw, prof_id,
3556 sctp_src->idx, sctp_dst->idx, 1);
3561 * ice_add_rss_cfg_sync - add an RSS configuration
3562 * @hw: pointer to the hardware structure
3563 * @vsi_handle: software VSI handle
3564 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3565 * @addl_hdrs: protocol header fields
3566 * @segs_cnt: packet segment count
3567 * @symm: symmetric hash enable/disable
3569 * Assumption: lock has already been acquired for RSS list
3571 static enum ice_status
3572 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3573 u32 addl_hdrs, u8 segs_cnt, bool symm)
3575 const enum ice_block blk = ICE_BLK_RSS;
3576 struct ice_flow_prof *prof = NULL;
3577 struct ice_flow_seg_info *segs;
3578 enum ice_status status;
3580 if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
3581 return ICE_ERR_PARAM;
3583 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3586 return ICE_ERR_NO_MEMORY;
3588 /* Construct the packet segment info from the hashed fields */
3589 status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3594 /* don't do RSS for GTPU outer */
3595 if (segs_cnt == ICE_RSS_OUTER_HEADERS &&
3596 segs[segs_cnt - 1].hdrs & ICE_FLOW_SEG_HDR_GTPU) {
3597 status = ICE_SUCCESS;
3601 /* Search for a flow profile that has matching headers, hash fields
3602 * and has the input VSI associated to it. If found, no further
3603 * operations required and exit.
3605 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3607 ICE_FLOW_FIND_PROF_CHK_FLDS |
3608 ICE_FLOW_FIND_PROF_CHK_VSI);
3610 if (prof->cfg.symm == symm)
3612 prof->cfg.symm = symm;
3616 /* Check if a flow profile exists with the same protocol headers and
3617 * associated with the input VSI. If so disassociate the VSI from
3618 * this profile. The VSI will be added to a new profile created with
3619 * the protocol header and new hash field configuration.
3621 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3622 vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3624 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3626 ice_rem_rss_list(hw, vsi_handle, prof);
3630 /* Remove profile if it has no VSIs associated */
3631 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3632 status = ice_flow_rem_prof(hw, blk, prof->id);
3638 /* Search for a profile that has same match fields only. If this
3639 * exists then associate the VSI to this profile.
3641 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3643 ICE_FLOW_FIND_PROF_CHK_FLDS);
3645 if (prof->cfg.symm == symm) {
3646 status = ice_flow_assoc_prof(hw, blk, prof,
3649 status = ice_add_rss_list(hw, vsi_handle,
3652 /* if a profile exist but with different symmetric
3653 * requirement, just return error.
3655 status = ICE_ERR_NOT_SUPPORTED;
3660 /* Create a new flow profile with generated profile and packet
3661 * segment information.
3663 status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3664 ICE_FLOW_GEN_PROFID(hashed_flds,
3665 segs[segs_cnt - 1].hdrs,
3667 segs, segs_cnt, NULL, 0, &prof);
3671 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3672 /* If association to a new flow profile failed then this profile can
3676 ice_flow_rem_prof(hw, blk, prof->id);
3680 status = ice_add_rss_list(hw, vsi_handle, prof);
3682 prof->cfg.symm = symm;
3685 ice_rss_update_symm(hw, prof);
3693 * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3694 * @hw: pointer to the hardware structure
3695 * @vsi_handle: software VSI handle
3696 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3697 * @addl_hdrs: protocol header fields
3698 * @symm: symmetric hash enable/disable
3700 * This function will generate a flow profile based on fields associated with
3701 * the input fields to hash on, the flow type and use the VSI number to add
3702 * a flow entry to the profile.
3705 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3706 u32 addl_hdrs, bool symm)
3708 enum ice_status status;
3710 if (hashed_flds == ICE_HASH_INVALID ||
3711 !ice_is_vsi_valid(hw, vsi_handle))
3712 return ICE_ERR_PARAM;
3714 ice_acquire_lock(&hw->rss_locks);
3715 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3716 ICE_RSS_OUTER_HEADERS, symm);
3719 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3720 addl_hdrs, ICE_RSS_INNER_HEADERS,
3722 ice_release_lock(&hw->rss_locks);
3728 * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3729 * @hw: pointer to the hardware structure
3730 * @vsi_handle: software VSI handle
3731 * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3732 * @addl_hdrs: Protocol header fields within a packet segment
3733 * @segs_cnt: packet segment count
3735 * Assumption: lock has already been acquired for RSS list
3737 static enum ice_status
3738 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3739 u32 addl_hdrs, u8 segs_cnt)
3741 const enum ice_block blk = ICE_BLK_RSS;
3742 struct ice_flow_seg_info *segs;
3743 struct ice_flow_prof *prof;
3744 enum ice_status status;
3746 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3749 return ICE_ERR_NO_MEMORY;
3751 /* Construct the packet segment info from the hashed fields */
3752 status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3757 if (segs_cnt == ICE_RSS_OUTER_HEADERS &&
3758 segs[segs_cnt - 1].hdrs & ICE_FLOW_SEG_HDR_GTPU) {
3759 status = ICE_SUCCESS;
3763 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3765 ICE_FLOW_FIND_PROF_CHK_FLDS);
3767 status = ICE_ERR_DOES_NOT_EXIST;
3771 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3775 /* Remove RSS configuration from VSI context before deleting
3778 ice_rem_rss_list(hw, vsi_handle, prof);
3780 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
3781 status = ice_flow_rem_prof(hw, blk, prof->id);
3789 * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
3790 * @hw: pointer to the hardware structure
3791 * @vsi_handle: software VSI handle
3792 * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3793 * @addl_hdrs: Protocol header fields within a packet segment
3795 * This function will lookup the flow profile based on the input
3796 * hash field bitmap, iterate through the profile entry list of
3797 * that profile and find entry associated with input VSI to be
3798 * removed. Calls are made to underlying flow apis which will in
3799 * turn build or update buffers for RSS XLT1 section.
3802 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3805 enum ice_status status;
3807 if (hashed_flds == ICE_HASH_INVALID ||
3808 !ice_is_vsi_valid(hw, vsi_handle))
3809 return ICE_ERR_PARAM;
3811 ice_acquire_lock(&hw->rss_locks);
3812 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3813 ICE_RSS_OUTER_HEADERS);
3815 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3816 addl_hdrs, ICE_RSS_INNER_HEADERS);
3817 ice_release_lock(&hw->rss_locks);
3823 * ice_replay_rss_cfg - replay RSS configurations associated with VSI
3824 * @hw: pointer to the hardware structure
3825 * @vsi_handle: software VSI handle
3827 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3829 enum ice_status status = ICE_SUCCESS;
3830 struct ice_rss_cfg *r;
3832 if (!ice_is_vsi_valid(hw, vsi_handle))
3833 return ICE_ERR_PARAM;
3835 ice_acquire_lock(&hw->rss_locks);
3836 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3837 ice_rss_cfg, l_entry) {
3838 if (ice_is_bit_set(r->vsis, vsi_handle)) {
3839 status = ice_add_rss_cfg_sync(hw, vsi_handle,
3842 ICE_RSS_OUTER_HEADERS,
3846 status = ice_add_rss_cfg_sync(hw, vsi_handle,
3849 ICE_RSS_INNER_HEADERS,
3855 ice_release_lock(&hw->rss_locks);
3861 * ice_get_rss_cfg - returns hashed fields for the given header types
3862 * @hw: pointer to the hardware structure
3863 * @vsi_handle: software VSI handle
3864 * @hdrs: protocol header type
3866 * This function will return the match fields of the first instance of flow
3867 * profile having the given header types and containing input VSI
3869 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
3871 u64 rss_hash = ICE_HASH_INVALID;
3872 struct ice_rss_cfg *r;
3874 /* verify if the protocol header is non zero and VSI is valid */
3875 if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
3876 return ICE_HASH_INVALID;
3878 ice_acquire_lock(&hw->rss_locks);
3879 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3880 ice_rss_cfg, l_entry)
3881 if (ice_is_bit_set(r->vsis, vsi_handle) &&
3882 r->packet_hdr == hdrs) {
3883 rss_hash = r->hashed_flds;
3886 ice_release_lock(&hw->rss_locks);