1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
5 #include "ice_common.h"
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE 2
10 #define ICE_FLOW_FLD_SZ_VLAN 2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR 4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR 16
13 #define ICE_FLOW_FLD_SZ_IP_DSCP 1
14 #define ICE_FLOW_FLD_SZ_IP_TTL 1
15 #define ICE_FLOW_FLD_SZ_IP_PROT 1
16 #define ICE_FLOW_FLD_SZ_PORT 2
17 #define ICE_FLOW_FLD_SZ_TCP_FLAGS 1
18 #define ICE_FLOW_FLD_SZ_ICMP_TYPE 1
19 #define ICE_FLOW_FLD_SZ_ICMP_CODE 1
20 #define ICE_FLOW_FLD_SZ_ARP_OPER 2
21 #define ICE_FLOW_FLD_SZ_GRE_KEYID 4
22 #define ICE_FLOW_FLD_SZ_GTP_TEID 4
23 #define ICE_FLOW_FLD_SZ_GTP_QFI 2
24 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID 2
25 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
26 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID 4
27 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
28 #define ICE_FLOW_FLD_SZ_AH_SPI 4
29 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI 4
31 /* Describe properties of a protocol header field */
32 struct ice_flow_field_info {
33 enum ice_flow_seg_hdr hdr;
34 s16 off; /* Offset from start of a protocol header, in bits */
35 u16 size; /* Size of fields in bits */
36 u16 mask; /* 16-bit mask for field */
39 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
41 .off = (_offset_bytes) * BITS_PER_BYTE, \
42 .size = (_size_bytes) * BITS_PER_BYTE, \
46 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
48 .off = (_offset_bytes) * BITS_PER_BYTE, \
49 .size = (_size_bytes) * BITS_PER_BYTE, \
53 /* Table containing properties of supported protocol header fields */
55 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
57 /* ICE_FLOW_FIELD_IDX_ETH_DA */
58 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
59 /* ICE_FLOW_FIELD_IDX_ETH_SA */
60 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
61 /* ICE_FLOW_FIELD_IDX_S_VLAN */
62 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
63 /* ICE_FLOW_FIELD_IDX_C_VLAN */
64 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
65 /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
66 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 12, ICE_FLOW_FLD_SZ_ETH_TYPE),
68 /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
69 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
71 /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
72 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
74 /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
75 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
76 ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
77 /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
78 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
79 ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
80 /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
81 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
82 ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
83 /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
84 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
85 ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
86 /* ICE_FLOW_FIELD_IDX_IPV4_SA */
87 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
88 /* ICE_FLOW_FIELD_IDX_IPV4_DA */
89 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
90 /* ICE_FLOW_FIELD_IDX_IPV6_SA */
91 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
92 /* ICE_FLOW_FIELD_IDX_IPV6_DA */
93 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
95 /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
96 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
97 /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
98 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
99 /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
100 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
101 /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
102 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
103 /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
104 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
105 /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
106 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
107 /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
108 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
110 /* ICE_FLOW_FIELD_IDX_ARP_SIP */
111 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
112 /* ICE_FLOW_FIELD_IDX_ARP_DIP */
113 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
114 /* ICE_FLOW_FIELD_IDX_ARP_SHA */
115 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
116 /* ICE_FLOW_FIELD_IDX_ARP_DHA */
117 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
118 /* ICE_FLOW_FIELD_IDX_ARP_OP */
119 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
121 /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
122 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
123 /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
124 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
126 /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
127 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
129 /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
130 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
131 ICE_FLOW_FLD_SZ_GTP_TEID),
132 /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
133 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
134 ICE_FLOW_FLD_SZ_GTP_TEID),
135 /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
136 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
137 ICE_FLOW_FLD_SZ_GTP_TEID),
138 /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
139 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
140 ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
141 /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
142 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
143 ICE_FLOW_FLD_SZ_GTP_TEID),
144 /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
145 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
146 ICE_FLOW_FLD_SZ_GTP_TEID),
148 /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
149 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
150 ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
152 /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
153 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
154 ICE_FLOW_FLD_SZ_PFCP_SEID),
156 /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
157 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
158 ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
160 /* ICE_FLOW_FIELD_IDX_ESP_SPI */
161 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
162 ICE_FLOW_FLD_SZ_ESP_SPI),
164 /* ICE_FLOW_FIELD_IDX_AH_SPI */
165 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
166 ICE_FLOW_FLD_SZ_AH_SPI),
168 /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
169 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
170 ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
173 /* Bitmaps indicating relevant packet types for a particular protocol header
175 * Packet types for packets with an Outer/First/Single MAC header
177 static const u32 ice_ptypes_mac_ofos[] = {
178 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
179 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
180 0x00000000, 0x03FFF000, 0x7FFFFFE0, 0x00000000,
181 0x00000000, 0x00000000, 0x00000000, 0x00000000,
182 0x00000000, 0x00000000, 0x00000000, 0x00000000,
183 0x00000000, 0x00000000, 0x00000000, 0x00000000,
184 0x00000000, 0x00000000, 0x00000000, 0x00000000,
185 0x00000000, 0x00000000, 0x00000000, 0x00000000,
188 /* Packet types for packets with an Innermost/Last MAC VLAN header */
189 static const u32 ice_ptypes_macvlan_il[] = {
190 0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
191 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
192 0x00000000, 0x00000000, 0x00000000, 0x00000000,
193 0x00000000, 0x00000000, 0x00000000, 0x00000000,
194 0x00000000, 0x00000000, 0x00000000, 0x00000000,
195 0x00000000, 0x00000000, 0x00000000, 0x00000000,
196 0x00000000, 0x00000000, 0x00000000, 0x00000000,
197 0x00000000, 0x00000000, 0x00000000, 0x00000000,
200 /* Packet types for packets with an Outer/First/Single IPv4 header */
201 static const u32 ice_ptypes_ipv4_ofos[] = {
202 0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
203 0x00000000, 0x00000155, 0x00000000, 0x00000000,
204 0x0003000F, 0x000FC000, 0x83E0F800, 0x00000101,
205 0x00000000, 0x00000000, 0x00000000, 0x00000000,
206 0x00000000, 0x00000000, 0x00000000, 0x00000000,
207 0x00000000, 0x00000000, 0x00000000, 0x00000000,
208 0x00000000, 0x00000000, 0x00000000, 0x00000000,
209 0x00000000, 0x00000000, 0x00000000, 0x00000000,
212 /* Packet types for packets with an Innermost/Last IPv4 header */
213 static const u32 ice_ptypes_ipv4_il[] = {
214 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
215 0x0000000E, 0x00000000, 0x00000000, 0x00000000,
216 0x00000000, 0x00000000, 0x001FF800, 0x00000000,
217 0x00000000, 0x00000000, 0x00000000, 0x00000000,
218 0x00000000, 0x00000000, 0x00000000, 0x00000000,
219 0x00000000, 0x00000000, 0x00000000, 0x00000000,
220 0x00000000, 0x00000000, 0x00000000, 0x00000000,
221 0x00000000, 0x00000000, 0x00000000, 0x00000000,
224 /* Packet types for packets with an Outer/First/Single IPv6 header */
225 static const u32 ice_ptypes_ipv6_ofos[] = {
226 0x00000000, 0x00000000, 0x77000000, 0x10002000,
227 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
228 0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000206,
229 0x00000000, 0x00000000, 0x00000000, 0x00000000,
230 0x00000000, 0x00000000, 0x00000000, 0x00000000,
231 0x00000000, 0x00000000, 0x00000000, 0x00000000,
232 0x00000000, 0x00000000, 0x00000000, 0x00000000,
233 0x00000000, 0x00000000, 0x00000000, 0x00000000,
236 /* Packet types for packets with an Innermost/Last IPv6 header */
237 static const u32 ice_ptypes_ipv6_il[] = {
238 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
239 0x00000770, 0x00000000, 0x00000000, 0x00000000,
240 0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
241 0x00000000, 0x00000000, 0x00000000, 0x00000000,
242 0x00000000, 0x00000000, 0x00000000, 0x00000000,
243 0x00000000, 0x00000000, 0x00000000, 0x00000000,
244 0x00000000, 0x00000000, 0x00000000, 0x00000000,
245 0x00000000, 0x00000000, 0x00000000, 0x00000000,
248 /* Packet types for packets with an Outermost/First ARP header */
249 static const u32 ice_ptypes_arp_of[] = {
250 0x00000800, 0x00000000, 0x00000000, 0x00000000,
251 0x00000000, 0x00000000, 0x00000000, 0x00000000,
252 0x00000000, 0x00000000, 0x00000000, 0x00000000,
253 0x00000000, 0x00000000, 0x00000000, 0x00000000,
254 0x00000000, 0x00000000, 0x00000000, 0x00000000,
255 0x00000000, 0x00000000, 0x00000000, 0x00000000,
256 0x00000000, 0x00000000, 0x00000000, 0x00000000,
257 0x00000000, 0x00000000, 0x00000000, 0x00000000,
260 /* UDP Packet types for non-tunneled packets or tunneled
261 * packets with inner UDP.
263 static const u32 ice_ptypes_udp_il[] = {
264 0x81000000, 0x20204040, 0x04000010, 0x80810102,
265 0x00000040, 0x00000000, 0x00000000, 0x00000000,
266 0x00000000, 0x00410000, 0x90842000, 0x00000007,
267 0x00000000, 0x00000000, 0x00000000, 0x00000000,
268 0x00000000, 0x00000000, 0x00000000, 0x00000000,
269 0x00000000, 0x00000000, 0x00000000, 0x00000000,
270 0x00000000, 0x00000000, 0x00000000, 0x00000000,
271 0x00000000, 0x00000000, 0x00000000, 0x00000000,
274 /* Packet types for packets with an Innermost/Last TCP header */
275 static const u32 ice_ptypes_tcp_il[] = {
276 0x04000000, 0x80810102, 0x10000040, 0x02040408,
277 0x00000102, 0x00000000, 0x00000000, 0x00000000,
278 0x00000000, 0x00820000, 0x21084000, 0x00000000,
279 0x00000000, 0x00000000, 0x00000000, 0x00000000,
280 0x00000000, 0x00000000, 0x00000000, 0x00000000,
281 0x00000000, 0x00000000, 0x00000000, 0x00000000,
282 0x00000000, 0x00000000, 0x00000000, 0x00000000,
283 0x00000000, 0x00000000, 0x00000000, 0x00000000,
286 /* Packet types for packets with an Innermost/Last SCTP header */
287 static const u32 ice_ptypes_sctp_il[] = {
288 0x08000000, 0x01020204, 0x20000081, 0x04080810,
289 0x00000204, 0x00000000, 0x00000000, 0x00000000,
290 0x00000000, 0x01040000, 0x00000000, 0x00000000,
291 0x00000000, 0x00000000, 0x00000000, 0x00000000,
292 0x00000000, 0x00000000, 0x00000000, 0x00000000,
293 0x00000000, 0x00000000, 0x00000000, 0x00000000,
294 0x00000000, 0x00000000, 0x00000000, 0x00000000,
295 0x00000000, 0x00000000, 0x00000000, 0x00000000,
298 /* Packet types for packets with an Outermost/First ICMP header */
299 static const u32 ice_ptypes_icmp_of[] = {
300 0x10000000, 0x00000000, 0x00000000, 0x00000000,
301 0x00000000, 0x00000000, 0x00000000, 0x00000000,
302 0x00000000, 0x00000000, 0x00000000, 0x00000000,
303 0x00000000, 0x00000000, 0x00000000, 0x00000000,
304 0x00000000, 0x00000000, 0x00000000, 0x00000000,
305 0x00000000, 0x00000000, 0x00000000, 0x00000000,
306 0x00000000, 0x00000000, 0x00000000, 0x00000000,
307 0x00000000, 0x00000000, 0x00000000, 0x00000000,
310 /* Packet types for packets with an Innermost/Last ICMP header */
311 static const u32 ice_ptypes_icmp_il[] = {
312 0x00000000, 0x02040408, 0x40000102, 0x08101020,
313 0x00000408, 0x00000000, 0x00000000, 0x00000000,
314 0x00000000, 0x00000000, 0x42108000, 0x00000000,
315 0x00000000, 0x00000000, 0x00000000, 0x00000000,
316 0x00000000, 0x00000000, 0x00000000, 0x00000000,
317 0x00000000, 0x00000000, 0x00000000, 0x00000000,
318 0x00000000, 0x00000000, 0x00000000, 0x00000000,
319 0x00000000, 0x00000000, 0x00000000, 0x00000000,
322 /* Packet types for packets with an Outermost/First GRE header */
323 static const u32 ice_ptypes_gre_of[] = {
324 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
325 0x0000017E, 0x00000000, 0x00000000, 0x00000000,
326 0x00000000, 0x00000000, 0x00000000, 0x00000000,
327 0x00000000, 0x00000000, 0x00000000, 0x00000000,
328 0x00000000, 0x00000000, 0x00000000, 0x00000000,
329 0x00000000, 0x00000000, 0x00000000, 0x00000000,
330 0x00000000, 0x00000000, 0x00000000, 0x00000000,
331 0x00000000, 0x00000000, 0x00000000, 0x00000000,
334 /* Packet types for packets with an Innermost/Last MAC header */
335 static const u32 ice_ptypes_mac_il[] = {
336 0x00000000, 0x00000000, 0x00000000, 0x00000000,
337 0x00000000, 0x00000000, 0x00000000, 0x00000000,
338 0x00000000, 0x00000000, 0x00000000, 0x00000000,
339 0x00000000, 0x00000000, 0x00000000, 0x00000000,
340 0x00000000, 0x00000000, 0x00000000, 0x00000000,
341 0x00000000, 0x00000000, 0x00000000, 0x00000000,
342 0x00000000, 0x00000000, 0x00000000, 0x00000000,
343 0x00000000, 0x00000000, 0x00000000, 0x00000000,
346 /* Packet types for GTPC */
347 static const u32 ice_ptypes_gtpc[] = {
348 0x00000000, 0x00000000, 0x00000000, 0x00000000,
349 0x00000000, 0x00000000, 0x00000000, 0x00000000,
350 0x00000000, 0x00000000, 0x00000180, 0x00000000,
351 0x00000000, 0x00000000, 0x00000000, 0x00000000,
352 0x00000000, 0x00000000, 0x00000000, 0x00000000,
353 0x00000000, 0x00000000, 0x00000000, 0x00000000,
354 0x00000000, 0x00000000, 0x00000000, 0x00000000,
355 0x00000000, 0x00000000, 0x00000000, 0x00000000,
358 /* Packet types for GTPC with TEID */
359 static const u32 ice_ptypes_gtpc_tid[] = {
360 0x00000000, 0x00000000, 0x00000000, 0x00000000,
361 0x00000000, 0x00000000, 0x00000000, 0x00000000,
362 0x00000000, 0x00000000, 0x00000060, 0x00000000,
363 0x00000000, 0x00000000, 0x00000000, 0x00000000,
364 0x00000000, 0x00000000, 0x00000000, 0x00000000,
365 0x00000000, 0x00000000, 0x00000000, 0x00000000,
366 0x00000000, 0x00000000, 0x00000000, 0x00000000,
367 0x00000000, 0x00000000, 0x00000000, 0x00000000,
370 /* Packet types for GTPU */
371 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
372 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
373 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
374 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
375 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
376 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
377 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
378 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
379 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
380 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
381 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
382 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
383 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
384 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
385 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
386 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
387 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
388 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
389 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
390 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
391 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
394 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
395 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
396 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
397 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
398 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
399 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
400 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
401 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
402 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
403 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
404 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
405 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
406 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
407 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
408 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
409 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
410 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
411 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
412 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
413 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
414 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
417 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
418 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
419 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
420 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
421 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
422 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
423 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
424 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
425 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
426 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
427 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
428 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
429 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
430 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
431 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
432 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
433 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
434 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
435 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
436 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
437 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
440 static const u32 ice_ptypes_gtpu[] = {
441 0x00000000, 0x00000000, 0x00000000, 0x00000000,
442 0x00000000, 0x00000000, 0x00000000, 0x00000000,
443 0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
444 0x00000000, 0x00000000, 0x00000000, 0x00000000,
445 0x00000000, 0x00000000, 0x00000000, 0x00000000,
446 0x00000000, 0x00000000, 0x00000000, 0x00000000,
447 0x00000000, 0x00000000, 0x00000000, 0x00000000,
448 0x00000000, 0x00000000, 0x00000000, 0x00000000,
451 /* Packet types for pppoe */
452 static const u32 ice_ptypes_pppoe[] = {
453 0x00000000, 0x00000000, 0x00000000, 0x00000000,
454 0x00000000, 0x00000000, 0x00000000, 0x00000000,
455 0x00000000, 0x03FFF000, 0x00000000, 0x00000000,
456 0x00000000, 0x00000000, 0x00000000, 0x00000000,
457 0x00000000, 0x00000000, 0x00000000, 0x00000000,
458 0x00000000, 0x00000000, 0x00000000, 0x00000000,
459 0x00000000, 0x00000000, 0x00000000, 0x00000000,
460 0x00000000, 0x00000000, 0x00000000, 0x00000000,
463 /* Packet types for packets with PFCP NODE header */
464 static const u32 ice_ptypes_pfcp_node[] = {
465 0x00000000, 0x00000000, 0x00000000, 0x00000000,
466 0x00000000, 0x00000000, 0x00000000, 0x00000000,
467 0x00000000, 0x00000000, 0x80000000, 0x00000002,
468 0x00000000, 0x00000000, 0x00000000, 0x00000000,
469 0x00000000, 0x00000000, 0x00000000, 0x00000000,
470 0x00000000, 0x00000000, 0x00000000, 0x00000000,
471 0x00000000, 0x00000000, 0x00000000, 0x00000000,
472 0x00000000, 0x00000000, 0x00000000, 0x00000000,
475 /* Packet types for packets with PFCP SESSION header */
476 static const u32 ice_ptypes_pfcp_session[] = {
477 0x00000000, 0x00000000, 0x00000000, 0x00000000,
478 0x00000000, 0x00000000, 0x00000000, 0x00000000,
479 0x00000000, 0x00000000, 0x00000000, 0x00000005,
480 0x00000000, 0x00000000, 0x00000000, 0x00000000,
481 0x00000000, 0x00000000, 0x00000000, 0x00000000,
482 0x00000000, 0x00000000, 0x00000000, 0x00000000,
483 0x00000000, 0x00000000, 0x00000000, 0x00000000,
484 0x00000000, 0x00000000, 0x00000000, 0x00000000,
487 /* Packet types for l2tpv3 */
488 static const u32 ice_ptypes_l2tpv3[] = {
489 0x00000000, 0x00000000, 0x00000000, 0x00000000,
490 0x00000000, 0x00000000, 0x00000000, 0x00000000,
491 0x00000000, 0x00000000, 0x00000000, 0x00000300,
492 0x00000000, 0x00000000, 0x00000000, 0x00000000,
493 0x00000000, 0x00000000, 0x00000000, 0x00000000,
494 0x00000000, 0x00000000, 0x00000000, 0x00000000,
495 0x00000000, 0x00000000, 0x00000000, 0x00000000,
496 0x00000000, 0x00000000, 0x00000000, 0x00000000,
499 /* Packet types for esp */
500 static const u32 ice_ptypes_esp[] = {
501 0x00000000, 0x00000000, 0x00000000, 0x00000000,
502 0x00000000, 0x00000003, 0x00000000, 0x00000000,
503 0x00000000, 0x00000000, 0x00000000, 0x00000000,
504 0x00000000, 0x00000000, 0x00000000, 0x00000000,
505 0x00000000, 0x00000000, 0x00000000, 0x00000000,
506 0x00000000, 0x00000000, 0x00000000, 0x00000000,
507 0x00000000, 0x00000000, 0x00000000, 0x00000000,
508 0x00000000, 0x00000000, 0x00000000, 0x00000000,
511 /* Packet types for ah */
512 static const u32 ice_ptypes_ah[] = {
513 0x00000000, 0x00000000, 0x00000000, 0x00000000,
514 0x00000000, 0x0000000C, 0x00000000, 0x00000000,
515 0x00000000, 0x00000000, 0x00000000, 0x00000000,
516 0x00000000, 0x00000000, 0x00000000, 0x00000000,
517 0x00000000, 0x00000000, 0x00000000, 0x00000000,
518 0x00000000, 0x00000000, 0x00000000, 0x00000000,
519 0x00000000, 0x00000000, 0x00000000, 0x00000000,
520 0x00000000, 0x00000000, 0x00000000, 0x00000000,
523 /* Packet types for packets with NAT_T ESP header */
524 static const u32 ice_ptypes_nat_t_esp[] = {
525 0x00000000, 0x00000000, 0x00000000, 0x00000000,
526 0x00000000, 0x00000030, 0x00000000, 0x00000000,
527 0x00000000, 0x00000000, 0x00000000, 0x00000000,
528 0x00000000, 0x00000000, 0x00000000, 0x00000000,
529 0x00000000, 0x00000000, 0x00000000, 0x00000000,
530 0x00000000, 0x00000000, 0x00000000, 0x00000000,
531 0x00000000, 0x00000000, 0x00000000, 0x00000000,
532 0x00000000, 0x00000000, 0x00000000, 0x00000000,
535 /* Manage parameters and info. used during the creation of a flow profile */
536 struct ice_flow_prof_params {
538 u16 entry_length; /* # of bytes formatted entry will require */
540 struct ice_flow_prof *prof;
542 /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
543 * This will give us the direction flags.
545 struct ice_fv_word es[ICE_MAX_FV_WORDS];
546 /* attributes can be used to add attributes to a particular PTYPE */
547 const struct ice_ptype_attributes *attr;
550 u16 mask[ICE_MAX_FV_WORDS];
551 ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
554 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
555 (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
556 ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
557 ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
558 ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
559 ICE_FLOW_SEG_HDR_NAT_T_ESP)
561 #define ICE_FLOW_SEG_HDRS_L2_MASK \
562 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
563 #define ICE_FLOW_SEG_HDRS_L3_MASK \
564 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
565 ICE_FLOW_SEG_HDR_ARP)
566 #define ICE_FLOW_SEG_HDRS_L4_MASK \
567 (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
568 ICE_FLOW_SEG_HDR_SCTP)
571 * ice_flow_val_hdrs - validates packet segments for valid protocol headers
572 * @segs: array of one or more packet segments that describe the flow
573 * @segs_cnt: number of packet segments provided
575 static enum ice_status
576 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
580 for (i = 0; i < segs_cnt; i++) {
581 /* Multiple L3 headers */
582 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
583 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
584 return ICE_ERR_PARAM;
586 /* Multiple L4 headers */
587 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
588 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
589 return ICE_ERR_PARAM;
595 /* Sizes of fixed known protocol headers without header options */
596 #define ICE_FLOW_PROT_HDR_SZ_MAC 14
597 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
598 #define ICE_FLOW_PROT_HDR_SZ_IPV4 20
599 #define ICE_FLOW_PROT_HDR_SZ_IPV6 40
600 #define ICE_FLOW_PROT_HDR_SZ_ARP 28
601 #define ICE_FLOW_PROT_HDR_SZ_ICMP 8
602 #define ICE_FLOW_PROT_HDR_SZ_TCP 20
603 #define ICE_FLOW_PROT_HDR_SZ_UDP 8
604 #define ICE_FLOW_PROT_HDR_SZ_SCTP 12
607 * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
608 * @params: information about the flow to be processed
609 * @seg: index of packet segment whose header size is to be determined
611 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
616 sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
617 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
620 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
621 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
622 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
623 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
624 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
625 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
626 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
627 /* A L3 header is required if L4 is specified */
631 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
632 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
633 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
634 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
635 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
636 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
637 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
638 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
644 * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
645 * @params: information about the flow to be processed
647 * This function identifies the packet types associated with the protocol
648 * headers being present in packet segments of the specified flow profile.
650 static enum ice_status
651 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
653 struct ice_flow_prof *prof;
656 ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
661 for (i = 0; i < params->prof->segs_cnt; i++) {
662 const ice_bitmap_t *src;
665 hdrs = prof->segs[i].hdrs;
667 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
668 src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
669 (const ice_bitmap_t *)ice_ptypes_mac_il;
670 ice_and_bitmap(params->ptypes, params->ptypes, src,
674 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
675 src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
676 ice_and_bitmap(params->ptypes, params->ptypes, src,
680 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
681 ice_and_bitmap(params->ptypes, params->ptypes,
682 (const ice_bitmap_t *)ice_ptypes_arp_of,
686 if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
687 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
688 ice_and_bitmap(params->ptypes, params->ptypes, src,
692 if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
693 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
694 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
695 ice_and_bitmap(params->ptypes, params->ptypes, src,
697 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
698 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
699 ice_and_bitmap(params->ptypes,
702 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
703 ice_and_bitmap(params->ptypes, params->ptypes,
704 (const ice_bitmap_t *)
707 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
708 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
709 ice_and_bitmap(params->ptypes, params->ptypes,
710 src, ICE_FLOW_PTYPE_MAX);
712 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
713 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
714 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
715 ice_and_bitmap(params->ptypes, params->ptypes, src,
717 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
718 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
719 ice_and_bitmap(params->ptypes,
722 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
723 ice_and_bitmap(params->ptypes, params->ptypes,
724 (const ice_bitmap_t *)
727 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
728 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
729 ice_and_bitmap(params->ptypes, params->ptypes,
730 src, ICE_FLOW_PTYPE_MAX);
734 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
735 src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
736 (const ice_bitmap_t *)ice_ptypes_icmp_il;
737 ice_and_bitmap(params->ptypes, params->ptypes, src,
739 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
741 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
742 ice_and_bitmap(params->ptypes, params->ptypes,
743 src, ICE_FLOW_PTYPE_MAX);
745 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
746 src = (const ice_bitmap_t *)ice_ptypes_gtpc;
747 ice_and_bitmap(params->ptypes, params->ptypes,
748 src, ICE_FLOW_PTYPE_MAX);
749 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
750 src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
751 ice_and_bitmap(params->ptypes, params->ptypes,
752 src, ICE_FLOW_PTYPE_MAX);
753 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
754 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
755 ice_and_bitmap(params->ptypes, params->ptypes,
756 src, ICE_FLOW_PTYPE_MAX);
758 /* Attributes for GTP packet with downlink */
759 params->attr = ice_attr_gtpu_down;
760 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
761 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
762 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
763 ice_and_bitmap(params->ptypes, params->ptypes,
764 src, ICE_FLOW_PTYPE_MAX);
766 /* Attributes for GTP packet with uplink */
767 params->attr = ice_attr_gtpu_up;
768 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
769 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
770 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
771 ice_and_bitmap(params->ptypes, params->ptypes,
772 src, ICE_FLOW_PTYPE_MAX);
774 /* Attributes for GTP packet with Extension Header */
775 params->attr = ice_attr_gtpu_eh;
776 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
777 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
778 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
779 ice_and_bitmap(params->ptypes, params->ptypes,
780 src, ICE_FLOW_PTYPE_MAX);
781 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
782 src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
783 ice_and_bitmap(params->ptypes, params->ptypes,
784 src, ICE_FLOW_PTYPE_MAX);
785 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
786 src = (const ice_bitmap_t *)ice_ptypes_esp;
787 ice_and_bitmap(params->ptypes, params->ptypes,
788 src, ICE_FLOW_PTYPE_MAX);
789 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
790 src = (const ice_bitmap_t *)ice_ptypes_ah;
791 ice_and_bitmap(params->ptypes, params->ptypes,
792 src, ICE_FLOW_PTYPE_MAX);
793 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
794 src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
795 ice_and_bitmap(params->ptypes, params->ptypes,
796 src, ICE_FLOW_PTYPE_MAX);
799 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
800 if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
802 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
805 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
807 ice_and_bitmap(params->ptypes, params->ptypes,
808 src, ICE_FLOW_PTYPE_MAX);
810 src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
811 ice_andnot_bitmap(params->ptypes, params->ptypes,
812 src, ICE_FLOW_PTYPE_MAX);
814 src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
815 ice_andnot_bitmap(params->ptypes, params->ptypes,
816 src, ICE_FLOW_PTYPE_MAX);
824 * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
825 * @hw: pointer to the HW struct
826 * @params: information about the flow to be processed
827 * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
829 * This function will allocate an extraction sequence entries for a DWORD size
830 * chunk of the packet flags.
832 static enum ice_status
833 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
834 struct ice_flow_prof_params *params,
835 enum ice_flex_mdid_pkt_flags flags)
837 u8 fv_words = hw->blk[params->blk].es.fvw;
840 /* Make sure the number of extraction sequence entries required does not
841 * exceed the block's capacity.
843 if (params->es_cnt >= fv_words)
844 return ICE_ERR_MAX_LIMIT;
846 /* some blocks require a reversed field vector layout */
847 if (hw->blk[params->blk].es.reverse)
848 idx = fv_words - params->es_cnt - 1;
850 idx = params->es_cnt;
852 params->es[idx].prot_id = ICE_PROT_META_ID;
853 params->es[idx].off = flags;
860 * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
861 * @hw: pointer to the HW struct
862 * @params: information about the flow to be processed
863 * @seg: packet segment index of the field to be extracted
864 * @fld: ID of field to be extracted
865 * @match: bitfield of all fields
867 * This function determines the protocol ID, offset, and size of the given
868 * field. It then allocates one or more extraction sequence entries for the
869 * given field, and fill the entries with protocol ID and offset information.
871 static enum ice_status
872 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
873 u8 seg, enum ice_flow_field fld, u64 match)
875 enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
876 enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
877 u8 fv_words = hw->blk[params->blk].es.fvw;
878 struct ice_flow_fld_info *flds;
879 u16 cnt, ese_bits, i;
885 flds = params->prof->segs[seg].fields;
888 case ICE_FLOW_FIELD_IDX_ETH_DA:
889 case ICE_FLOW_FIELD_IDX_ETH_SA:
890 case ICE_FLOW_FIELD_IDX_S_VLAN:
891 case ICE_FLOW_FIELD_IDX_C_VLAN:
892 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
894 case ICE_FLOW_FIELD_IDX_ETH_TYPE:
895 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
897 case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
898 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
900 case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
901 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
903 case ICE_FLOW_FIELD_IDX_IPV4_TTL:
904 case ICE_FLOW_FIELD_IDX_IPV4_PROT:
905 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
907 /* TTL and PROT share the same extraction seq. entry.
908 * Each is considered a sibling to the other in terms of sharing
909 * the same extraction sequence entry.
911 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
912 sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
913 else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
914 sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
916 /* If the sibling field is also included, that field's
917 * mask needs to be included.
919 if (match & BIT(sib))
920 sib_mask = ice_flds_info[sib].mask;
922 case ICE_FLOW_FIELD_IDX_IPV6_TTL:
923 case ICE_FLOW_FIELD_IDX_IPV6_PROT:
924 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
926 /* TTL and PROT share the same extraction seq. entry.
927 * Each is considered a sibling to the other in terms of sharing
928 * the same extraction sequence entry.
930 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
931 sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
932 else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
933 sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
935 /* If the sibling field is also included, that field's
936 * mask needs to be included.
938 if (match & BIT(sib))
939 sib_mask = ice_flds_info[sib].mask;
941 case ICE_FLOW_FIELD_IDX_IPV4_SA:
942 case ICE_FLOW_FIELD_IDX_IPV4_DA:
943 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
945 case ICE_FLOW_FIELD_IDX_IPV6_SA:
946 case ICE_FLOW_FIELD_IDX_IPV6_DA:
947 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
949 case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
950 case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
951 case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
952 prot_id = ICE_PROT_TCP_IL;
954 case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
955 case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
956 prot_id = ICE_PROT_UDP_IL_OR_S;
958 case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
959 case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
960 prot_id = ICE_PROT_SCTP_IL;
962 case ICE_FLOW_FIELD_IDX_GTPC_TEID:
963 case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
964 case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
965 case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
966 case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
967 case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
968 /* GTP is accessed through UDP OF protocol */
969 prot_id = ICE_PROT_UDP_OF;
971 case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
972 prot_id = ICE_PROT_PPPOE;
974 case ICE_FLOW_FIELD_IDX_PFCP_SEID:
975 prot_id = ICE_PROT_UDP_IL_OR_S;
977 case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
978 prot_id = ICE_PROT_L2TPV3;
980 case ICE_FLOW_FIELD_IDX_ESP_SPI:
981 prot_id = ICE_PROT_ESP_F;
983 case ICE_FLOW_FIELD_IDX_AH_SPI:
984 prot_id = ICE_PROT_ESP_2;
986 case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
987 prot_id = ICE_PROT_UDP_IL_OR_S;
989 case ICE_FLOW_FIELD_IDX_ARP_SIP:
990 case ICE_FLOW_FIELD_IDX_ARP_DIP:
991 case ICE_FLOW_FIELD_IDX_ARP_SHA:
992 case ICE_FLOW_FIELD_IDX_ARP_DHA:
993 case ICE_FLOW_FIELD_IDX_ARP_OP:
994 prot_id = ICE_PROT_ARP_OF;
996 case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
997 case ICE_FLOW_FIELD_IDX_ICMP_CODE:
998 /* ICMP type and code share the same extraction seq. entry */
999 prot_id = (params->prof->segs[seg].hdrs &
1000 ICE_FLOW_SEG_HDR_IPV4) ?
1001 ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1002 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1003 ICE_FLOW_FIELD_IDX_ICMP_CODE :
1004 ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1006 case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1007 prot_id = ICE_PROT_GRE_OF;
1010 return ICE_ERR_NOT_IMPL;
1013 /* Each extraction sequence entry is a word in size, and extracts a
1014 * word-aligned offset from a protocol header.
1016 ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1018 flds[fld].xtrct.prot_id = prot_id;
1019 flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1020 ICE_FLOW_FV_EXTRACT_SZ;
1021 flds[fld].xtrct.disp = (u8)((ice_flds_info[fld].off + adj) % ese_bits);
1022 flds[fld].xtrct.idx = params->es_cnt;
1023 flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1025 /* Adjust the next field-entry index after accommodating the number of
1026 * entries this field consumes
1028 cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1029 ice_flds_info[fld].size, ese_bits);
1031 /* Fill in the extraction sequence entries needed for this field */
1032 off = flds[fld].xtrct.off;
1033 mask = flds[fld].xtrct.mask;
1034 for (i = 0; i < cnt; i++) {
1035 /* Only consume an extraction sequence entry if there is no
1036 * sibling field associated with this field or the sibling entry
1037 * already extracts the word shared with this field.
1039 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1040 flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1041 flds[sib].xtrct.off != off) {
1044 /* Make sure the number of extraction sequence required
1045 * does not exceed the block's capability
1047 if (params->es_cnt >= fv_words)
1048 return ICE_ERR_MAX_LIMIT;
1050 /* some blocks require a reversed field vector layout */
1051 if (hw->blk[params->blk].es.reverse)
1052 idx = fv_words - params->es_cnt - 1;
1054 idx = params->es_cnt;
1056 params->es[idx].prot_id = prot_id;
1057 params->es[idx].off = off;
1058 params->mask[idx] = mask | sib_mask;
1062 off += ICE_FLOW_FV_EXTRACT_SZ;
1069 * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1070 * @hw: pointer to the HW struct
1071 * @params: information about the flow to be processed
1072 * @seg: index of packet segment whose raw fields are to be be extracted
1074 static enum ice_status
1075 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1082 if (!params->prof->segs[seg].raws_cnt)
1085 if (params->prof->segs[seg].raws_cnt >
1086 ARRAY_SIZE(params->prof->segs[seg].raws))
1087 return ICE_ERR_MAX_LIMIT;
1089 /* Offsets within the segment headers are not supported */
1090 hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1092 return ICE_ERR_PARAM;
1094 fv_words = hw->blk[params->blk].es.fvw;
1096 for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1097 struct ice_flow_seg_fld_raw *raw;
1100 raw = ¶ms->prof->segs[seg].raws[i];
1102 /* Storing extraction information */
1103 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1104 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1105 ICE_FLOW_FV_EXTRACT_SZ;
1106 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1108 raw->info.xtrct.idx = params->es_cnt;
1110 /* Determine the number of field vector entries this raw field
1113 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1114 (raw->info.src.last * BITS_PER_BYTE),
1115 (ICE_FLOW_FV_EXTRACT_SZ *
1117 off = raw->info.xtrct.off;
1118 for (j = 0; j < cnt; j++) {
1121 /* Make sure the number of extraction sequence required
1122 * does not exceed the block's capability
1124 if (params->es_cnt >= hw->blk[params->blk].es.count ||
1125 params->es_cnt >= ICE_MAX_FV_WORDS)
1126 return ICE_ERR_MAX_LIMIT;
1128 /* some blocks require a reversed field vector layout */
1129 if (hw->blk[params->blk].es.reverse)
1130 idx = fv_words - params->es_cnt - 1;
1132 idx = params->es_cnt;
1134 params->es[idx].prot_id = raw->info.xtrct.prot_id;
1135 params->es[idx].off = off;
1137 off += ICE_FLOW_FV_EXTRACT_SZ;
1145 * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1146 * @hw: pointer to the HW struct
1147 * @params: information about the flow to be processed
1149 * This function iterates through all matched fields in the given segments, and
1150 * creates an extraction sequence for the fields.
1152 static enum ice_status
1153 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1154 struct ice_flow_prof_params *params)
1156 enum ice_status status = ICE_SUCCESS;
1159 /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1162 if (params->blk == ICE_BLK_ACL) {
1163 status = ice_flow_xtract_pkt_flags(hw, params,
1164 ICE_RX_MDID_PKT_FLAGS_15_0);
1169 for (i = 0; i < params->prof->segs_cnt; i++) {
1170 u64 match = params->prof->segs[i].match;
1171 enum ice_flow_field j;
1173 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1174 const u64 bit = BIT_ULL(j);
1177 status = ice_flow_xtract_fld(hw, params, i, j,
1185 /* Process raw matching bytes */
1186 status = ice_flow_xtract_raws(hw, params, i);
1195 * ice_flow_sel_acl_scen - returns the specific scenario
1196 * @hw: pointer to the hardware structure
1197 * @params: information about the flow to be processed
1199 * This function will return the specific scenario based on the
1200 * params passed to it
1202 static enum ice_status
1203 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1205 /* Find the best-fit scenario for the provided match width */
1206 struct ice_acl_scen *cand_scen = NULL, *scen;
1209 return ICE_ERR_DOES_NOT_EXIST;
1211 /* Loop through each scenario and match against the scenario width
1212 * to select the specific scenario
1214 LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1215 if (scen->eff_width >= params->entry_length &&
1216 (!cand_scen || cand_scen->eff_width > scen->eff_width))
1219 return ICE_ERR_DOES_NOT_EXIST;
1221 params->prof->cfg.scen = cand_scen;
1227 * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1228 * @params: information about the flow to be processed
1230 static enum ice_status
1231 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1233 u16 index, i, range_idx = 0;
1235 index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1237 for (i = 0; i < params->prof->segs_cnt; i++) {
1238 struct ice_flow_seg_info *seg = ¶ms->prof->segs[i];
1239 u64 match = seg->match;
1242 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1243 struct ice_flow_fld_info *fld;
1244 const u64 bit = BIT_ULL(j);
1249 fld = &seg->fields[j];
1250 fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1252 if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1253 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1255 /* Range checking only supported for single
1258 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1260 BITS_PER_BYTE * 2) > 1)
1261 return ICE_ERR_PARAM;
1263 /* Ranges must define low and high values */
1264 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1265 fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1266 return ICE_ERR_PARAM;
1268 fld->entry.val = range_idx++;
1270 /* Store adjusted byte-length of field for later
1271 * use, taking into account potential
1272 * non-byte-aligned displacement
1274 fld->entry.last = DIVIDE_AND_ROUND_UP
1275 (ice_flds_info[j].size +
1276 (fld->xtrct.disp % BITS_PER_BYTE),
1278 fld->entry.val = index;
1279 index += fld->entry.last;
1285 for (j = 0; j < seg->raws_cnt; j++) {
1286 struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1288 raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1289 raw->info.entry.val = index;
1290 raw->info.entry.last = raw->info.src.last;
1291 index += raw->info.entry.last;
1295 /* Currently only support using the byte selection base, which only
1296 * allows for an effective entry size of 30 bytes. Reject anything
1299 if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1300 return ICE_ERR_PARAM;
1302 /* Only 8 range checkers per profile, reject anything trying to use
1305 if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1306 return ICE_ERR_PARAM;
1308 /* Store # bytes required for entry for later use */
1309 params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1315 * ice_flow_proc_segs - process all packet segments associated with a profile
1316 * @hw: pointer to the HW struct
1317 * @params: information about the flow to be processed
1319 static enum ice_status
1320 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1322 enum ice_status status;
1324 status = ice_flow_proc_seg_hdrs(params);
1328 status = ice_flow_create_xtrct_seq(hw, params);
1332 switch (params->blk) {
1335 status = ICE_SUCCESS;
1338 status = ice_flow_acl_def_entry_frmt(params);
1341 status = ice_flow_sel_acl_scen(hw, params);
1347 return ICE_ERR_NOT_IMPL;
1353 #define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001
1354 #define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002
1355 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004
1358 * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1359 * @hw: pointer to the HW struct
1360 * @blk: classification stage
1361 * @dir: flow direction
1362 * @segs: array of one or more packet segments that describe the flow
1363 * @segs_cnt: number of packet segments provided
1364 * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1365 * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1367 static struct ice_flow_prof *
1368 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1369 enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1370 u8 segs_cnt, u16 vsi_handle, u32 conds)
1372 struct ice_flow_prof *p, *prof = NULL;
1374 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1375 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
1376 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1377 segs_cnt && segs_cnt == p->segs_cnt) {
1380 /* Check for profile-VSI association if specified */
1381 if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1382 ice_is_vsi_valid(hw, vsi_handle) &&
1383 !ice_is_bit_set(p->vsis, vsi_handle))
1386 /* Protocol headers must be checked. Matched fields are
1387 * checked if specified.
1389 for (i = 0; i < segs_cnt; i++)
1390 if (segs[i].hdrs != p->segs[i].hdrs ||
1391 ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1392 segs[i].match != p->segs[i].match))
1395 /* A match is found if all segments are matched */
1396 if (i == segs_cnt) {
1402 ice_release_lock(&hw->fl_profs_locks[blk]);
1408 * ice_flow_find_prof - Look up a profile matching headers and matched fields
1409 * @hw: pointer to the HW struct
1410 * @blk: classification stage
1411 * @dir: flow direction
1412 * @segs: array of one or more packet segments that describe the flow
1413 * @segs_cnt: number of packet segments provided
1416 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1417 struct ice_flow_seg_info *segs, u8 segs_cnt)
1419 struct ice_flow_prof *p;
1421 p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1422 ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1424 return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1428 * ice_flow_find_prof_id - Look up a profile with given profile ID
1429 * @hw: pointer to the HW struct
1430 * @blk: classification stage
1431 * @prof_id: unique ID to identify this flow profile
1433 static struct ice_flow_prof *
1434 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1436 struct ice_flow_prof *p;
1438 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
1439 if (p->id == prof_id)
1447 * ice_dealloc_flow_entry - Deallocate flow entry memory
1448 * @hw: pointer to the HW struct
1449 * @entry: flow entry to be removed
1452 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1458 ice_free(hw, entry->entry);
1460 if (entry->range_buf) {
1461 ice_free(hw, entry->range_buf);
1462 entry->range_buf = NULL;
1466 ice_free(hw, entry->acts);
1468 entry->acts_cnt = 0;
1471 ice_free(hw, entry);
1474 #define ICE_ACL_INVALID_SCEN 0x3f
1477 * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any pf
1478 * @hw: pointer to the hardware structure
1479 * @prof: pointer to flow profile
1480 * @buf: destination buffer function writes partial xtrct sequence to
1482 * returns ICE_SUCCESS if no pf is associated to the given profile
1483 * returns ICE_ERR_IN_USE if at least one pf is associated to the given profile
1484 * returns other error code for real error
1486 static enum ice_status
1487 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1488 struct ice_aqc_acl_prof_generic_frmt *buf)
1490 enum ice_status status;
1493 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1497 status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1501 /* If all pf's associated scenarios are all 0 or all
1502 * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1503 * not been configured yet.
1505 if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1506 buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1507 buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1508 buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1511 if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1512 buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1513 buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1514 buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1515 buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1516 buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1517 buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1518 buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1521 return ICE_ERR_IN_USE;
1525 * ice_flow_acl_free_act_cntr - Free the acl rule's actions
1526 * @hw: pointer to the hardware structure
1527 * @acts: array of actions to be performed on a match
1528 * @acts_cnt: number of actions
1530 static enum ice_status
1531 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1536 for (i = 0; i < acts_cnt; i++) {
1537 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1538 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1539 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1540 struct ice_acl_cntrs cntrs;
1541 enum ice_status status;
1543 cntrs.bank = 0; /* Only bank0 for the moment */
1545 LE16_TO_CPU(acts[i].data.acl_act.value);
1547 LE16_TO_CPU(acts[i].data.acl_act.value);
1549 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1550 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1552 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1554 status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1563 * ice_flow_acl_disassoc_scen - Disassociate the scenario to the Profile
1564 * @hw: pointer to the hardware structure
1565 * @prof: pointer to flow profile
1567 * Disassociate the scenario to the Profile for the PF of the VSI.
1569 static enum ice_status
1570 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1572 struct ice_aqc_acl_prof_generic_frmt buf;
1573 enum ice_status status = ICE_SUCCESS;
1576 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1578 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1582 status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1586 /* Clear scenario for this pf */
1587 buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1588 status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1594 * ice_flow_rem_entry_sync - Remove a flow entry
1595 * @hw: pointer to the HW struct
1596 * @blk: classification stage
1597 * @entry: flow entry to be removed
1599 static enum ice_status
1600 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1601 struct ice_flow_entry *entry)
1604 return ICE_ERR_BAD_PTR;
1606 if (blk == ICE_BLK_ACL) {
1607 enum ice_status status;
1610 return ICE_ERR_BAD_PTR;
1612 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1613 entry->scen_entry_idx);
1617 /* Checks if we need to release an ACL counter. */
1618 if (entry->acts_cnt && entry->acts)
1619 ice_flow_acl_free_act_cntr(hw, entry->acts,
1623 LIST_DEL(&entry->l_entry);
1625 ice_dealloc_flow_entry(hw, entry);
1631 * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1632 * @hw: pointer to the HW struct
1633 * @blk: classification stage
1634 * @dir: flow direction
1635 * @prof_id: unique ID to identify this flow profile
1636 * @segs: array of one or more packet segments that describe the flow
1637 * @segs_cnt: number of packet segments provided
1638 * @acts: array of default actions
1639 * @acts_cnt: number of default actions
1640 * @prof: stores the returned flow profile added
1642 * Assumption: the caller has acquired the lock to the profile list
1644 static enum ice_status
1645 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1646 enum ice_flow_dir dir, u64 prof_id,
1647 struct ice_flow_seg_info *segs, u8 segs_cnt,
1648 struct ice_flow_action *acts, u8 acts_cnt,
1649 struct ice_flow_prof **prof)
1651 struct ice_flow_prof_params params;
1652 enum ice_status status;
1655 if (!prof || (acts_cnt && !acts))
1656 return ICE_ERR_BAD_PTR;
1658 ice_memset(¶ms, 0, sizeof(params), ICE_NONDMA_MEM);
1659 params.prof = (struct ice_flow_prof *)
1660 ice_malloc(hw, sizeof(*params.prof));
1662 return ICE_ERR_NO_MEMORY;
1664 /* initialize extraction sequence to all invalid (0xff) */
1665 for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1666 params.es[i].prot_id = ICE_PROT_INVALID;
1667 params.es[i].off = ICE_FV_OFFSET_INVAL;
1671 params.prof->id = prof_id;
1672 params.prof->dir = dir;
1673 params.prof->segs_cnt = segs_cnt;
1675 /* Make a copy of the segments that need to be persistent in the flow
1678 for (i = 0; i < segs_cnt; i++)
1679 ice_memcpy(¶ms.prof->segs[i], &segs[i], sizeof(*segs),
1680 ICE_NONDMA_TO_NONDMA);
1682 /* Make a copy of the actions that need to be persistent in the flow
1686 params.prof->acts = (struct ice_flow_action *)
1687 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
1688 ICE_NONDMA_TO_NONDMA);
1690 if (!params.prof->acts) {
1691 status = ICE_ERR_NO_MEMORY;
1696 status = ice_flow_proc_segs(hw, ¶ms);
1698 ice_debug(hw, ICE_DBG_FLOW,
1699 "Error processing a flow's packet segments\n");
1703 /* Add a HW profile for this flow profile */
1704 status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes,
1705 params.attr, params.attr_cnt, params.es,
1708 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
1712 INIT_LIST_HEAD(¶ms.prof->entries);
1713 ice_init_lock(¶ms.prof->entries_lock);
1714 *prof = params.prof;
1718 if (params.prof->acts)
1719 ice_free(hw, params.prof->acts);
1720 ice_free(hw, params.prof);
1727 * ice_flow_rem_prof_sync - remove a flow profile
1728 * @hw: pointer to the hardware structure
1729 * @blk: classification stage
1730 * @prof: pointer to flow profile to remove
1732 * Assumption: the caller has acquired the lock to the profile list
1734 static enum ice_status
1735 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
1736 struct ice_flow_prof *prof)
1738 enum ice_status status;
1740 /* Remove all remaining flow entries before removing the flow profile */
1741 if (!LIST_EMPTY(&prof->entries)) {
1742 struct ice_flow_entry *e, *t;
1744 ice_acquire_lock(&prof->entries_lock);
1746 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
1748 status = ice_flow_rem_entry_sync(hw, blk, e);
1753 ice_release_lock(&prof->entries_lock);
1756 if (blk == ICE_BLK_ACL) {
1757 struct ice_aqc_acl_profile_ranges query_rng_buf;
1758 struct ice_aqc_acl_prof_generic_frmt buf;
1761 /* Deassociate the scenario to the Profile for the PF */
1762 status = ice_flow_acl_disassoc_scen(hw, prof);
1766 /* Clear the range-checker if the profile ID is no longer
1769 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1770 if (status && status != ICE_ERR_IN_USE) {
1772 } else if (!status) {
1773 /* Clear the range-checker value for profile ID */
1774 ice_memset(&query_rng_buf, 0,
1775 sizeof(struct ice_aqc_acl_profile_ranges),
1778 status = ice_flow_get_hw_prof(hw, blk, prof->id,
1783 status = ice_prog_acl_prof_ranges(hw, prof_id,
1784 &query_rng_buf, NULL);
1790 /* Remove all hardware profiles associated with this flow profile */
1791 status = ice_rem_prof(hw, blk, prof->id);
1793 LIST_DEL(&prof->l_entry);
1794 ice_destroy_lock(&prof->entries_lock);
1796 ice_free(hw, prof->acts);
1804 * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
1805 * @buf: Destination buffer function writes partial xtrct sequence to
1806 * @info: Info about field
1809 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
1810 struct ice_flow_fld_info *info)
1815 src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
1816 info->xtrct.disp / BITS_PER_BYTE;
1817 dst = info->entry.val;
1818 for (i = 0; i < info->entry.last; i++)
1819 /* HW stores field vector words in LE, convert words back to BE
1820 * so constructed entries will end up in network order
1822 buf->byte_selection[dst++] = src++ ^ 1;
1826 * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
1827 * @hw: pointer to the hardware structure
1828 * @prof: pointer to flow profile
1830 static enum ice_status
1831 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
1833 struct ice_aqc_acl_prof_generic_frmt buf;
1834 struct ice_flow_fld_info *info;
1835 enum ice_status status;
1839 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1841 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1845 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1846 if (status && status != ICE_ERR_IN_USE)
1850 /* Program the profile dependent configuration. This is done
1851 * only once regardless of the number of PFs using that profile
1853 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1855 for (i = 0; i < prof->segs_cnt; i++) {
1856 struct ice_flow_seg_info *seg = &prof->segs[i];
1857 u64 match = seg->match;
1860 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1861 const u64 bit = BIT_ULL(j);
1866 info = &seg->fields[j];
1868 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
1869 buf.word_selection[info->entry.val] =
1872 ice_flow_acl_set_xtrct_seq_fld(&buf,
1878 for (j = 0; j < seg->raws_cnt; j++) {
1879 info = &seg->raws[j].info;
1880 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
1884 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
1885 ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
1889 /* Update the current PF */
1890 buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
1891 status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1897 * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
1898 * @hw: pointer to the hardware structure
1899 * @blk: classification stage
1900 * @vsi_handle: software VSI handle
1901 * @vsig: target VSI group
1903 * Assumption: the caller has already verified that the VSI to
1904 * be added has the same characteristics as the VSIG and will
1905 * thereby have access to all resources added to that VSIG.
1908 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
1911 enum ice_status status;
1913 if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
1914 return ICE_ERR_PARAM;
1916 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1917 status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
1919 ice_release_lock(&hw->fl_profs_locks[blk]);
1925 * ice_flow_assoc_prof - associate a VSI with a flow profile
1926 * @hw: pointer to the hardware structure
1927 * @blk: classification stage
1928 * @prof: pointer to flow profile
1929 * @vsi_handle: software VSI handle
1931 * Assumption: the caller has acquired the lock to the profile list
1932 * and the software VSI handle has been validated
1934 static enum ice_status
1935 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
1936 struct ice_flow_prof *prof, u16 vsi_handle)
1938 enum ice_status status = ICE_SUCCESS;
1940 if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
1941 if (blk == ICE_BLK_ACL) {
1942 status = ice_flow_acl_set_xtrct_seq(hw, prof);
1946 status = ice_add_prof_id_flow(hw, blk,
1947 ice_get_hw_vsi_num(hw,
1951 ice_set_bit(vsi_handle, prof->vsis);
1953 ice_debug(hw, ICE_DBG_FLOW,
1954 "HW profile add failed, %d\n",
1962 * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
1963 * @hw: pointer to the hardware structure
1964 * @blk: classification stage
1965 * @prof: pointer to flow profile
1966 * @vsi_handle: software VSI handle
1968 * Assumption: the caller has acquired the lock to the profile list
1969 * and the software VSI handle has been validated
1971 static enum ice_status
1972 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
1973 struct ice_flow_prof *prof, u16 vsi_handle)
1975 enum ice_status status = ICE_SUCCESS;
1977 if (ice_is_bit_set(prof->vsis, vsi_handle)) {
1978 status = ice_rem_prof_id_flow(hw, blk,
1979 ice_get_hw_vsi_num(hw,
1983 ice_clear_bit(vsi_handle, prof->vsis);
1985 ice_debug(hw, ICE_DBG_FLOW,
1986 "HW profile remove failed, %d\n",
1994 * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
1995 * @hw: pointer to the HW struct
1996 * @blk: classification stage
1997 * @dir: flow direction
1998 * @prof_id: unique ID to identify this flow profile
1999 * @segs: array of one or more packet segments that describe the flow
2000 * @segs_cnt: number of packet segments provided
2001 * @acts: array of default actions
2002 * @acts_cnt: number of default actions
2003 * @prof: stores the returned flow profile added
2006 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2007 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2008 struct ice_flow_action *acts, u8 acts_cnt,
2009 struct ice_flow_prof **prof)
2011 enum ice_status status;
2013 if (segs_cnt > ICE_FLOW_SEG_MAX)
2014 return ICE_ERR_MAX_LIMIT;
2017 return ICE_ERR_PARAM;
2020 return ICE_ERR_BAD_PTR;
2022 status = ice_flow_val_hdrs(segs, segs_cnt);
2026 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2028 status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2029 acts, acts_cnt, prof);
2031 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2033 ice_release_lock(&hw->fl_profs_locks[blk]);
2039 * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2040 * @hw: pointer to the HW struct
2041 * @blk: the block for which the flow profile is to be removed
2042 * @prof_id: unique ID of the flow profile to be removed
2045 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2047 struct ice_flow_prof *prof;
2048 enum ice_status status;
2050 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2052 prof = ice_flow_find_prof_id(hw, blk, prof_id);
2054 status = ICE_ERR_DOES_NOT_EXIST;
2058 /* prof becomes invalid after the call */
2059 status = ice_flow_rem_prof_sync(hw, blk, prof);
2062 ice_release_lock(&hw->fl_profs_locks[blk]);
2068 * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
2069 * @hw: pointer to the HW struct
2070 * @blk: classification stage
2071 * @prof_id: the profile ID handle
2072 * @hw_prof_id: pointer to variable to receive the HW profile ID
2075 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2078 struct ice_prof_map *map;
2080 map = ice_search_prof_id(hw, blk, prof_id);
2082 *hw_prof_id = map->prof_id;
2086 return ICE_ERR_DOES_NOT_EXIST;
2090 * ice_flow_find_entry - look for a flow entry using its unique ID
2091 * @hw: pointer to the HW struct
2092 * @blk: classification stage
2093 * @entry_id: unique ID to identify this flow entry
2095 * This function looks for the flow entry with the specified unique ID in all
2096 * flow profiles of the specified classification stage. If the entry is found,
2097 * and it returns the handle to the flow entry. Otherwise, it returns
2098 * ICE_FLOW_ENTRY_ID_INVAL.
2100 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2102 struct ice_flow_entry *found = NULL;
2103 struct ice_flow_prof *p;
2105 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2107 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2108 struct ice_flow_entry *e;
2110 ice_acquire_lock(&p->entries_lock);
2111 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2112 if (e->id == entry_id) {
2116 ice_release_lock(&p->entries_lock);
2122 ice_release_lock(&hw->fl_profs_locks[blk]);
2124 return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2128 * ice_flow_acl_check_actions - Checks the acl rule's actions
2129 * @hw: pointer to the hardware structure
2130 * @acts: array of actions to be performed on a match
2131 * @acts_cnt: number of actions
2132 * @cnt_alloc: indicates if a ACL counter has been allocated.
2134 static enum ice_status
2135 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2136 u8 acts_cnt, bool *cnt_alloc)
2138 ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2141 ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2144 if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2145 return ICE_ERR_OUT_OF_RANGE;
2147 for (i = 0; i < acts_cnt; i++) {
2148 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2149 acts[i].type != ICE_FLOW_ACT_DROP &&
2150 acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2151 acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2154 /* If the caller want to add two actions of the same type, then
2155 * it is considered invalid configuration.
2157 if (ice_test_and_set_bit(acts[i].type, dup_check))
2158 return ICE_ERR_PARAM;
2161 /* Checks if ACL counters are needed. */
2162 for (i = 0; i < acts_cnt; i++) {
2163 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2164 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2165 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2166 struct ice_acl_cntrs cntrs;
2167 enum ice_status status;
2170 cntrs.bank = 0; /* Only bank0 for the moment */
2172 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2173 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2175 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2177 status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2180 /* Counter index within the bank */
2181 acts[i].data.acl_act.value =
2182 CPU_TO_LE16(cntrs.first_cntr);
2191 * ice_flow_acl_frmt_entry_range - Format an acl range checker for a given field
2192 * @fld: number of the given field
2193 * @info: info about field
2194 * @range_buf: range checker configuration buffer
2195 * @data: pointer to a data buffer containing flow entry's match values/masks
2196 * @range: Input/output param indicating which range checkers are being used
2199 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2200 struct ice_aqc_acl_profile_ranges *range_buf,
2201 u8 *data, u8 *range)
2205 /* If not specified, default mask is all bits in field */
2206 new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2207 BIT(ice_flds_info[fld].size) - 1 :
2208 (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2210 /* If the mask is 0, then we don't need to worry about this input
2211 * range checker value.
2215 (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2217 (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2218 u8 range_idx = info->entry.val;
2220 range_buf->checker_cfg[range_idx].low_boundary =
2221 CPU_TO_BE16(new_low);
2222 range_buf->checker_cfg[range_idx].high_boundary =
2223 CPU_TO_BE16(new_high);
2224 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2226 /* Indicate which range checker is being used */
2227 *range |= BIT(range_idx);
2232 * ice_flow_acl_frmt_entry_fld - Partially format acl entry for a given field
2233 * @fld: number of the given field
2234 * @info: info about the field
2235 * @buf: buffer containing the entry
2236 * @dontcare: buffer containing don't care mask for entry
2237 * @data: pointer to a data buffer containing flow entry's match values/masks
2240 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2241 u8 *dontcare, u8 *data)
2243 u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2244 bool use_mask = false;
2247 src = info->src.val;
2248 mask = info->src.mask;
2249 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2250 disp = info->xtrct.disp % BITS_PER_BYTE;
2252 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2255 for (k = 0; k < info->entry.last; k++, dst++) {
2256 /* Add overflow bits from previous byte */
2257 buf[dst] = (tmp_s & 0xff00) >> 8;
2259 /* If mask is not valid, tmp_m is always zero, so just setting
2260 * dontcare to 0 (no masked bits). If mask is valid, pulls in
2261 * overflow bits of mask from prev byte
2263 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2265 /* If there is displacement, last byte will only contain
2266 * displaced data, but there is no more data to read from user
2267 * buffer, so skip so as not to potentially read beyond end of
2270 if (!disp || k < info->entry.last - 1) {
2271 /* Store shifted data to use in next byte */
2272 tmp_s = data[src++] << disp;
2274 /* Add current (shifted) byte */
2275 buf[dst] |= tmp_s & 0xff;
2277 /* Handle mask if valid */
2279 tmp_m = (~data[mask++] & 0xff) << disp;
2280 dontcare[dst] |= tmp_m & 0xff;
2285 /* Fill in don't care bits at beginning of field */
2287 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2288 for (k = 0; k < disp; k++)
2289 dontcare[dst] |= BIT(k);
2292 end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2294 /* Fill in don't care bits at end of field */
2296 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2297 info->entry.last - 1;
2298 for (k = end_disp; k < BITS_PER_BYTE; k++)
2299 dontcare[dst] |= BIT(k);
2304 * ice_flow_acl_frmt_entry - Format acl entry
2305 * @hw: pointer to the hardware structure
2306 * @prof: pointer to flow profile
2307 * @e: pointer to the flow entry
2308 * @data: pointer to a data buffer containing flow entry's match values/masks
2309 * @acts: array of actions to be performed on a match
2310 * @acts_cnt: number of actions
2312 * Formats the key (and key_inverse) to be matched from the data passed in,
2313 * along with data from the flow profile. This key/key_inverse pair makes up
2314 * the 'entry' for an acl flow entry.
2316 static enum ice_status
2317 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2318 struct ice_flow_entry *e, u8 *data,
2319 struct ice_flow_action *acts, u8 acts_cnt)
2321 u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2322 struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2323 enum ice_status status;
2328 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2332 /* Format the result action */
2334 status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2338 status = ICE_ERR_NO_MEMORY;
2340 e->acts = (struct ice_flow_action *)
2341 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2342 ICE_NONDMA_TO_NONDMA);
2347 e->acts_cnt = acts_cnt;
2349 /* Format the matching data */
2350 buf_sz = prof->cfg.scen->width;
2351 buf = (u8 *)ice_malloc(hw, buf_sz);
2355 dontcare = (u8 *)ice_malloc(hw, buf_sz);
2359 /* 'key' buffer will store both key and key_inverse, so must be twice
2362 key = (u8 *)ice_malloc(hw, buf_sz * 2);
2366 range_buf = (struct ice_aqc_acl_profile_ranges *)
2367 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2371 /* Set don't care mask to all 1's to start, will zero out used bytes */
2372 ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2374 for (i = 0; i < prof->segs_cnt; i++) {
2375 struct ice_flow_seg_info *seg = &prof->segs[i];
2376 u64 match = seg->match;
2379 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
2380 struct ice_flow_fld_info *info;
2381 const u64 bit = BIT_ULL(j);
2386 info = &seg->fields[j];
2388 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2389 ice_flow_acl_frmt_entry_range(j, info,
2393 ice_flow_acl_frmt_entry_fld(j, info, buf,
2399 for (j = 0; j < seg->raws_cnt; j++) {
2400 struct ice_flow_fld_info *info = &seg->raws[j].info;
2401 u16 dst, src, mask, k;
2402 bool use_mask = false;
2404 src = info->src.val;
2405 dst = info->entry.val -
2406 ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2407 mask = info->src.mask;
2409 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2412 for (k = 0; k < info->entry.last; k++, dst++) {
2413 buf[dst] = data[src++];
2415 dontcare[dst] = ~data[mask++];
2422 buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2423 dontcare[prof->cfg.scen->pid_idx] = 0;
2425 /* Format the buffer for direction flags */
2426 dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2428 if (prof->dir == ICE_FLOW_RX)
2429 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2432 buf[prof->cfg.scen->rng_chk_idx] = range;
2433 /* Mark any unused range checkers as don't care */
2434 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2435 e->range_buf = range_buf;
2437 ice_free(hw, range_buf);
2440 status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2446 e->entry_sz = buf_sz * 2;
2453 ice_free(hw, dontcare);
2458 if (status && range_buf) {
2459 ice_free(hw, range_buf);
2460 e->range_buf = NULL;
2463 if (status && e->acts) {
2464 ice_free(hw, e->acts);
2469 if (status && cnt_alloc)
2470 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2476 * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2477 * the compared data.
2478 * @prof: pointer to flow profile
2479 * @e: pointer to the comparing flow entry
2480 * @do_chg_action: decide if we want to change the ACL action
2481 * @do_add_entry: decide if we want to add the new ACL entry
2482 * @do_rem_entry: decide if we want to remove the current ACL entry
2484 * Find an ACL scenario entry that matches the compared data. In the same time,
2485 * this function also figure out:
2486 * a/ If we want to change the ACL action
2487 * b/ If we want to add the new ACL entry
2488 * c/ If we want to remove the current ACL entry
2490 static struct ice_flow_entry *
2491 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2492 struct ice_flow_entry *e, bool *do_chg_action,
2493 bool *do_add_entry, bool *do_rem_entry)
2495 struct ice_flow_entry *p, *return_entry = NULL;
2499 * a/ There exists an entry with same matching data, but different
2500 * priority, then we remove this existing ACL entry. Then, we
2501 * will add the new entry to the ACL scenario.
2502 * b/ There exists an entry with same matching data, priority, and
2503 * result action, then we do nothing
2504 * c/ There exists an entry with same matching data, priority, but
2505 * different, action, then do only change the action's entry.
2506 * d/ Else, we add this new entry to the ACL scenario.
2508 *do_chg_action = false;
2509 *do_add_entry = true;
2510 *do_rem_entry = false;
2511 LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2512 if (memcmp(p->entry, e->entry, p->entry_sz))
2515 /* From this point, we have the same matching_data. */
2516 *do_add_entry = false;
2519 if (p->priority != e->priority) {
2520 /* matching data && !priority */
2521 *do_add_entry = true;
2522 *do_rem_entry = true;
2526 /* From this point, we will have matching_data && priority */
2527 if (p->acts_cnt != e->acts_cnt)
2528 *do_chg_action = true;
2529 for (i = 0; i < p->acts_cnt; i++) {
2530 bool found_not_match = false;
2532 for (j = 0; j < e->acts_cnt; j++)
2533 if (memcmp(&p->acts[i], &e->acts[j],
2534 sizeof(struct ice_flow_action))) {
2535 found_not_match = true;
2539 if (found_not_match) {
2540 *do_chg_action = true;
2545 /* (do_chg_action = true) means :
2546 * matching_data && priority && !result_action
2547 * (do_chg_action = false) means :
2548 * matching_data && priority && result_action
2553 return return_entry;
2557 * ice_flow_acl_convert_to_acl_prior - Convert to ACL priority
2560 static enum ice_acl_entry_prior
2561 ice_flow_acl_convert_to_acl_prior(enum ice_flow_priority p)
2563 enum ice_acl_entry_prior acl_prior;
2566 case ICE_FLOW_PRIO_LOW:
2567 acl_prior = ICE_LOW;
2569 case ICE_FLOW_PRIO_NORMAL:
2570 acl_prior = ICE_NORMAL;
2572 case ICE_FLOW_PRIO_HIGH:
2573 acl_prior = ICE_HIGH;
2576 acl_prior = ICE_NORMAL;
2584 * ice_flow_acl_union_rng_chk - Perform union operation between two
2585 * range-range checker buffers
2586 * @dst_buf: pointer to destination range checker buffer
2587 * @src_buf: pointer to source range checker buffer
2589 * For this function, we do the union between dst_buf and src_buf
2590 * range checker buffer, and we will save the result back to dst_buf
2592 static enum ice_status
2593 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2594 struct ice_aqc_acl_profile_ranges *src_buf)
2598 if (!dst_buf || !src_buf)
2599 return ICE_ERR_BAD_PTR;
2601 for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2602 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2603 bool will_populate = false;
2605 in_data = &src_buf->checker_cfg[i];
2610 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2611 cfg_data = &dst_buf->checker_cfg[j];
2613 if (!cfg_data->mask ||
2614 !memcmp(cfg_data, in_data,
2615 sizeof(struct ice_acl_rng_data))) {
2616 will_populate = true;
2621 if (will_populate) {
2622 ice_memcpy(cfg_data, in_data,
2623 sizeof(struct ice_acl_rng_data),
2624 ICE_NONDMA_TO_NONDMA);
2626 /* No available slot left to program range checker */
2627 return ICE_ERR_MAX_LIMIT;
2635 * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2636 * @hw: pointer to the hardware structure
2637 * @prof: pointer to flow profile
2638 * @entry: double pointer to the flow entry
2640 * For this function, we will look at the current added entries in the
2641 * corresponding ACL scenario. Then, we will perform matching logic to
2642 * see if we want to add/modify/do nothing with this new entry.
2644 static enum ice_status
2645 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2646 struct ice_flow_entry **entry)
2648 bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2649 struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2650 struct ice_acl_act_entry *acts = NULL;
2651 struct ice_flow_entry *exist;
2652 enum ice_status status = ICE_SUCCESS;
2653 struct ice_flow_entry *e;
2656 if (!entry || !(*entry) || !prof)
2657 return ICE_ERR_BAD_PTR;
2661 do_chg_rng_chk = false;
2665 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2670 /* Query the current range-checker value in FW */
2671 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2675 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2676 sizeof(struct ice_aqc_acl_profile_ranges),
2677 ICE_NONDMA_TO_NONDMA);
2679 /* Generate the new range-checker value */
2680 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2684 /* Reconfigure the range check if the buffer is changed. */
2685 do_chg_rng_chk = false;
2686 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2687 sizeof(struct ice_aqc_acl_profile_ranges))) {
2688 status = ice_prog_acl_prof_ranges(hw, prof_id,
2689 &cfg_rng_buf, NULL);
2693 do_chg_rng_chk = true;
2697 /* Figure out if we want to (change the ACL action) and/or
2698 * (Add the new ACL entry) and/or (Remove the current ACL entry)
2700 exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
2701 &do_add_entry, &do_rem_entry);
2704 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
2709 /* Prepare the result action buffer */
2710 acts = (struct ice_acl_act_entry *)ice_calloc
2711 (hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
2712 for (i = 0; i < e->acts_cnt; i++)
2713 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
2714 sizeof(struct ice_acl_act_entry),
2715 ICE_NONDMA_TO_NONDMA);
2718 enum ice_acl_entry_prior prior;
2722 keys = (u8 *)e->entry;
2723 inverts = keys + (e->entry_sz / 2);
2724 prior = ice_flow_acl_convert_to_acl_prior(e->priority);
2726 status = ice_acl_add_entry(hw, prof->cfg.scen, prior, keys,
2727 inverts, acts, e->acts_cnt,
2732 e->scen_entry_idx = entry_idx;
2733 LIST_ADD(&e->l_entry, &prof->entries);
2735 if (do_chg_action) {
2736 /* For the action memory info, update the SW's copy of
2737 * exist entry with e's action memory info
2739 ice_free(hw, exist->acts);
2740 exist->acts_cnt = e->acts_cnt;
2741 exist->acts = (struct ice_flow_action *)
2742 ice_calloc(hw, exist->acts_cnt,
2743 sizeof(struct ice_flow_action));
2746 status = ICE_ERR_NO_MEMORY;
2750 ice_memcpy(exist->acts, e->acts,
2751 sizeof(struct ice_flow_action) * e->acts_cnt,
2752 ICE_NONDMA_TO_NONDMA);
2754 status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
2756 exist->scen_entry_idx);
2761 if (do_chg_rng_chk) {
2762 /* In this case, we want to update the range checker
2763 * information of the exist entry
2765 status = ice_flow_acl_union_rng_chk(exist->range_buf,
2771 /* As we don't add the new entry to our SW DB, deallocate its
2772 * memories, and return the exist entry to the caller
2774 ice_dealloc_flow_entry(hw, e);
2785 * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
2786 * @hw: pointer to the hardware structure
2787 * @prof: pointer to flow profile
2788 * @e: double pointer to the flow entry
2790 static enum ice_status
2791 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2792 struct ice_flow_entry **e)
2794 enum ice_status status;
2796 ice_acquire_lock(&prof->entries_lock);
2797 status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
2798 ice_release_lock(&prof->entries_lock);
2804 * ice_flow_add_entry - Add a flow entry
2805 * @hw: pointer to the HW struct
2806 * @blk: classification stage
2807 * @prof_id: ID of the profile to add a new flow entry to
2808 * @entry_id: unique ID to identify this flow entry
2809 * @vsi_handle: software VSI handle for the flow entry
2810 * @prio: priority of the flow entry
2811 * @data: pointer to a data buffer containing flow entry's match values/masks
2812 * @acts: arrays of actions to be performed on a match
2813 * @acts_cnt: number of actions
2814 * @entry_h: pointer to buffer that receives the new flow entry's handle
2817 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2818 u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
2819 void *data, struct ice_flow_action *acts, u8 acts_cnt,
2822 struct ice_flow_entry *e = NULL;
2823 struct ice_flow_prof *prof;
2824 enum ice_status status = ICE_SUCCESS;
2826 /* ACL entries must indicate an action */
2827 if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
2828 return ICE_ERR_PARAM;
2830 /* No flow entry data is expected for RSS */
2831 if (!entry_h || (!data && blk != ICE_BLK_RSS))
2832 return ICE_ERR_BAD_PTR;
2834 if (!ice_is_vsi_valid(hw, vsi_handle))
2835 return ICE_ERR_PARAM;
2837 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2839 prof = ice_flow_find_prof_id(hw, blk, prof_id);
2841 status = ICE_ERR_DOES_NOT_EXIST;
2843 /* Allocate memory for the entry being added and associate
2844 * the VSI to the found flow profile
2846 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
2848 status = ICE_ERR_NO_MEMORY;
2850 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
2853 ice_release_lock(&hw->fl_profs_locks[blk]);
2858 e->vsi_handle = vsi_handle;
2867 /* ACL will handle the entry management */
2868 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
2873 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
2881 status = ICE_ERR_NOT_IMPL;
2885 if (blk != ICE_BLK_ACL) {
2886 /* ACL will handle the entry management */
2887 ice_acquire_lock(&prof->entries_lock);
2888 LIST_ADD(&e->l_entry, &prof->entries);
2889 ice_release_lock(&prof->entries_lock);
2892 *entry_h = ICE_FLOW_ENTRY_HNDL(e);
2897 ice_free(hw, e->entry);
2905 * ice_flow_rem_entry - Remove a flow entry
2906 * @hw: pointer to the HW struct
2907 * @blk: classification stage
2908 * @entry_h: handle to the flow entry to be removed
2910 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
2913 struct ice_flow_entry *entry;
2914 struct ice_flow_prof *prof;
2915 enum ice_status status = ICE_SUCCESS;
2917 if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
2918 return ICE_ERR_PARAM;
2920 entry = ICE_FLOW_ENTRY_PTR((unsigned long)entry_h);
2922 /* Retain the pointer to the flow profile as the entry will be freed */
2926 ice_acquire_lock(&prof->entries_lock);
2927 status = ice_flow_rem_entry_sync(hw, blk, entry);
2928 ice_release_lock(&prof->entries_lock);
2935 * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
2936 * @seg: packet segment the field being set belongs to
2937 * @fld: field to be set
2938 * @field_type: type of the field
2939 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
2940 * entry's input buffer
2941 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
2943 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
2944 * entry's input buffer
2946 * This helper function stores information of a field being matched, including
2947 * the type of the field and the locations of the value to match, the mask, and
2948 * and the upper-bound value in the start of the input buffer for a flow entry.
2949 * This function should only be used for fixed-size data structures.
2951 * This function also opportunistically determines the protocol headers to be
2952 * present based on the fields being set. Some fields cannot be used alone to
2953 * determine the protocol headers present. Sometimes, fields for particular
2954 * protocol headers are not matched. In those cases, the protocol headers
2955 * must be explicitly set.
2958 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
2959 enum ice_flow_fld_match_type field_type, u16 val_loc,
2960 u16 mask_loc, u16 last_loc)
2962 u64 bit = BIT_ULL(fld);
2965 if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
2968 seg->fields[fld].type = field_type;
2969 seg->fields[fld].src.val = val_loc;
2970 seg->fields[fld].src.mask = mask_loc;
2971 seg->fields[fld].src.last = last_loc;
2973 ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
2977 * ice_flow_set_fld - specifies locations of field from entry's input buffer
2978 * @seg: packet segment the field being set belongs to
2979 * @fld: field to be set
2980 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
2981 * entry's input buffer
2982 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
2984 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
2985 * entry's input buffer
2986 * @range: indicate if field being matched is to be in a range
2988 * This function specifies the locations, in the form of byte offsets from the
2989 * start of the input buffer for a flow entry, from where the value to match,
2990 * the mask value, and upper value can be extracted. These locations are then
2991 * stored in the flow profile. When adding a flow entry associated with the
2992 * flow profile, these locations will be used to quickly extract the values and
2993 * create the content of a match entry. This function should only be used for
2994 * fixed-size data structures.
2997 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
2998 u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3000 enum ice_flow_fld_match_type t = range ?
3001 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3003 ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3007 * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3008 * @seg: packet segment the field being set belongs to
3009 * @fld: field to be set
3010 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3011 * entry's input buffer
3012 * @pref_loc: location of prefix value from entry's input buffer
3013 * @pref_sz: size of the location holding the prefix value
3015 * This function specifies the locations, in the form of byte offsets from the
3016 * start of the input buffer for a flow entry, from where the value to match
3017 * and the IPv4 prefix value can be extracted. These locations are then stored
3018 * in the flow profile. When adding flow entries to the associated flow profile,
3019 * these locations can be used to quickly extract the values to create the
3020 * content of a match entry. This function should only be used for fixed-size
3024 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3025 u16 val_loc, u16 pref_loc, u8 pref_sz)
3027 /* For this type of field, the "mask" location is for the prefix value's
3028 * location and the "last" location is for the size of the location of
3031 ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3032 pref_loc, (u16)pref_sz);
3036 * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3037 * @seg: packet segment the field being set belongs to
3038 * @off: offset of the raw field from the beginning of the segment in bytes
3039 * @len: length of the raw pattern to be matched
3040 * @val_loc: location of the value to match from entry's input buffer
3041 * @mask_loc: location of mask value from entry's input buffer
3043 * This function specifies the offset of the raw field to be match from the
3044 * beginning of the specified packet segment, and the locations, in the form of
3045 * byte offsets from the start of the input buffer for a flow entry, from where
3046 * the value to match and the mask value to be extracted. These locations are
3047 * then stored in the flow profile. When adding flow entries to the associated
3048 * flow profile, these locations can be used to quickly extract the values to
3049 * create the content of a match entry. This function should only be used for
3050 * fixed-size data structures.
3053 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3054 u16 val_loc, u16 mask_loc)
3056 if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3057 seg->raws[seg->raws_cnt].off = off;
3058 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3059 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3060 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3061 /* The "last" field is used to store the length of the field */
3062 seg->raws[seg->raws_cnt].info.src.last = len;
3065 /* Overflows of "raws" will be handled as an error condition later in
3066 * the flow when this information is processed.
3071 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3072 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
3074 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3075 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3077 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3078 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
3079 ICE_FLOW_SEG_HDR_SCTP)
3081 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3082 (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3083 ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3084 ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3087 * ice_flow_set_rss_seg_info - setup packet segments for RSS
3088 * @segs: pointer to the flow field segment(s)
3089 * @hash_fields: fields to be hashed on for the segment(s)
3090 * @flow_hdr: protocol header fields within a packet segment
3092 * Helper function to extract fields from hash bitmap and use flow
3093 * header value to set flow field segment for further use in flow
3094 * profile entry or removal.
3096 static enum ice_status
3097 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
3100 u64 val = hash_fields;
3103 for (i = 0; val && i < ICE_FLOW_FIELD_IDX_MAX; i++) {
3104 u64 bit = BIT_ULL(i);
3107 ice_flow_set_fld(segs, (enum ice_flow_field)i,
3108 ICE_FLOW_FLD_OFF_INVAL,
3109 ICE_FLOW_FLD_OFF_INVAL,
3110 ICE_FLOW_FLD_OFF_INVAL, false);
3114 ICE_FLOW_SET_HDRS(segs, flow_hdr);
3116 if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3117 ~ICE_FLOW_RSS_HDRS_INNER_MASK)
3118 return ICE_ERR_PARAM;
3120 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3121 if (val && !ice_is_pow2(val))
3124 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3125 if (val && !ice_is_pow2(val))
3132 * ice_rem_vsi_rss_list - remove VSI from RSS list
3133 * @hw: pointer to the hardware structure
3134 * @vsi_handle: software VSI handle
3136 * Remove the VSI from all RSS configurations in the list.
3138 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3140 struct ice_rss_cfg *r, *tmp;
3142 if (LIST_EMPTY(&hw->rss_list_head))
3145 ice_acquire_lock(&hw->rss_locks);
3146 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3147 ice_rss_cfg, l_entry) {
3148 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3149 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3150 LIST_DEL(&r->l_entry);
3154 ice_release_lock(&hw->rss_locks);
3158 * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3159 * @hw: pointer to the hardware structure
3160 * @vsi_handle: software VSI handle
3162 * This function will iterate through all flow profiles and disassociate
3163 * the VSI from that profile. If the flow profile has no VSIs it will
3166 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3168 const enum ice_block blk = ICE_BLK_RSS;
3169 struct ice_flow_prof *p, *t;
3170 enum ice_status status = ICE_SUCCESS;
3172 if (!ice_is_vsi_valid(hw, vsi_handle))
3173 return ICE_ERR_PARAM;
3175 if (LIST_EMPTY(&hw->fl_profs[blk]))
3178 ice_acquire_lock(&hw->fl_profs_locks[blk]);
3179 LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3181 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3182 status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3186 if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3187 status = ice_flow_rem_prof_sync(hw, blk, p);
3193 ice_release_lock(&hw->fl_profs_locks[blk]);
3199 * ice_rem_rss_list - remove RSS configuration from list
3200 * @hw: pointer to the hardware structure
3201 * @vsi_handle: software VSI handle
3202 * @prof: pointer to flow profile
3204 * Assumption: lock has already been acquired for RSS list
3207 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3209 struct ice_rss_cfg *r, *tmp;
3211 /* Search for RSS hash fields associated to the VSI that match the
3212 * hash configurations associated to the flow profile. If found
3213 * remove from the RSS entry list of the VSI context and delete entry.
3215 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3216 ice_rss_cfg, l_entry) {
3217 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3218 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3219 ice_clear_bit(vsi_handle, r->vsis);
3220 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3221 LIST_DEL(&r->l_entry);
3230 * ice_add_rss_list - add RSS configuration to list
3231 * @hw: pointer to the hardware structure
3232 * @vsi_handle: software VSI handle
3233 * @prof: pointer to flow profile
3235 * Assumption: lock has already been acquired for RSS list
3237 static enum ice_status
3238 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3240 struct ice_rss_cfg *r, *rss_cfg;
3242 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3243 ice_rss_cfg, l_entry)
3244 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3245 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3246 ice_set_bit(vsi_handle, r->vsis);
3250 rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3252 return ICE_ERR_NO_MEMORY;
3254 rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
3255 rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
3256 rss_cfg->symm = prof->cfg.symm;
3257 ice_set_bit(vsi_handle, rss_cfg->vsis);
3259 LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3264 #define ICE_FLOW_PROF_HASH_S 0
3265 #define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3266 #define ICE_FLOW_PROF_HDR_S 32
3267 #define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3268 #define ICE_FLOW_PROF_ENCAP_S 63
3269 #define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
3271 #define ICE_RSS_OUTER_HEADERS 1
3272 #define ICE_RSS_INNER_HEADERS 2
3274 /* Flow profile ID format:
3275 * [0:31] - Packet match fields
3276 * [32:62] - Protocol header
3277 * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
3279 #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
3280 (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3281 (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3282 ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
3285 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3287 u32 s = ((src % 4) << 3); /* byte shift */
3288 u32 v = dst | 0x80; /* value to program */
3289 u8 i = src / 4; /* register index */
3292 reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3293 reg = (reg & ~(0xff << s)) | (v << s);
3294 wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3298 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3301 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3304 for (i = 0; i < len; i++) {
3305 ice_rss_config_xor_word(hw, prof_id,
3306 /* Yes, field vector in GLQF_HSYMM and
3307 * GLQF_HINSET is inversed!
3309 fv_last_word - (src + i),
3310 fv_last_word - (dst + i));
3311 ice_rss_config_xor_word(hw, prof_id,
3312 fv_last_word - (dst + i),
3313 fv_last_word - (src + i));
3318 ice_rss_update_symm(struct ice_hw *hw,
3319 struct ice_flow_prof *prof)
3321 struct ice_prof_map *map;
3324 map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3325 prof_id = map->prof_id;
3327 /* clear to default */
3328 for (m = 0; m < 6; m++)
3329 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3330 if (prof->cfg.symm) {
3331 struct ice_flow_seg_info *seg =
3332 &prof->segs[prof->segs_cnt - 1];
3334 struct ice_flow_seg_xtrct *ipv4_src =
3335 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3336 struct ice_flow_seg_xtrct *ipv4_dst =
3337 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3338 struct ice_flow_seg_xtrct *ipv6_src =
3339 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3340 struct ice_flow_seg_xtrct *ipv6_dst =
3341 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3343 struct ice_flow_seg_xtrct *tcp_src =
3344 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3345 struct ice_flow_seg_xtrct *tcp_dst =
3346 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3348 struct ice_flow_seg_xtrct *udp_src =
3349 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3350 struct ice_flow_seg_xtrct *udp_dst =
3351 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3353 struct ice_flow_seg_xtrct *sctp_src =
3354 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3355 struct ice_flow_seg_xtrct *sctp_dst =
3356 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3359 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3360 ice_rss_config_xor(hw, prof_id,
3361 ipv4_src->idx, ipv4_dst->idx, 2);
3364 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3365 ice_rss_config_xor(hw, prof_id,
3366 ipv6_src->idx, ipv6_dst->idx, 8);
3369 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3370 ice_rss_config_xor(hw, prof_id,
3371 tcp_src->idx, tcp_dst->idx, 1);
3374 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3375 ice_rss_config_xor(hw, prof_id,
3376 udp_src->idx, udp_dst->idx, 1);
3379 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3380 ice_rss_config_xor(hw, prof_id,
3381 sctp_src->idx, sctp_dst->idx, 1);
3386 * ice_add_rss_cfg_sync - add an RSS configuration
3387 * @hw: pointer to the hardware structure
3388 * @vsi_handle: software VSI handle
3389 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3390 * @addl_hdrs: protocol header fields
3391 * @segs_cnt: packet segment count
3392 * @symm: symmetric hash enable/disable
3394 * Assumption: lock has already been acquired for RSS list
3396 static enum ice_status
3397 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3398 u32 addl_hdrs, u8 segs_cnt, bool symm)
3400 const enum ice_block blk = ICE_BLK_RSS;
3401 struct ice_flow_prof *prof = NULL;
3402 struct ice_flow_seg_info *segs;
3403 enum ice_status status;
3405 if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
3406 return ICE_ERR_PARAM;
3408 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3411 return ICE_ERR_NO_MEMORY;
3413 /* Construct the packet segment info from the hashed fields */
3414 status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3419 /* Search for a flow profile that has matching headers, hash fields
3420 * and has the input VSI associated to it. If found, no further
3421 * operations required and exit.
3423 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3425 ICE_FLOW_FIND_PROF_CHK_FLDS |
3426 ICE_FLOW_FIND_PROF_CHK_VSI);
3428 if (prof->cfg.symm == symm)
3430 prof->cfg.symm = symm;
3434 /* Check if a flow profile exists with the same protocol headers and
3435 * associated with the input VSI. If so disasscociate the VSI from
3436 * this profile. The VSI will be added to a new profile created with
3437 * the protocol header and new hash field configuration.
3439 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3440 vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3442 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3444 ice_rem_rss_list(hw, vsi_handle, prof);
3448 /* Remove profile if it has no VSIs associated */
3449 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3450 status = ice_flow_rem_prof(hw, blk, prof->id);
3456 /* Search for a profile that has same match fields only. If this
3457 * exists then associate the VSI to this profile.
3459 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3461 ICE_FLOW_FIND_PROF_CHK_FLDS);
3463 if (prof->cfg.symm == symm) {
3464 status = ice_flow_assoc_prof(hw, blk, prof,
3467 status = ice_add_rss_list(hw, vsi_handle,
3470 /* if a profile exist but with different symmetric
3471 * requirement, just return error.
3473 status = ICE_ERR_NOT_SUPPORTED;
3478 /* Create a new flow profile with generated profile and packet
3479 * segment information.
3481 status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3482 ICE_FLOW_GEN_PROFID(hashed_flds,
3483 segs[segs_cnt - 1].hdrs,
3485 segs, segs_cnt, NULL, 0, &prof);
3489 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3490 /* If association to a new flow profile failed then this profile can
3494 ice_flow_rem_prof(hw, blk, prof->id);
3498 status = ice_add_rss_list(hw, vsi_handle, prof);
3500 prof->cfg.symm = symm;
3503 ice_rss_update_symm(hw, prof);
3511 * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3512 * @hw: pointer to the hardware structure
3513 * @vsi_handle: software VSI handle
3514 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3515 * @addl_hdrs: protocol header fields
3516 * @symm: symmetric hash enable/disable
3518 * This function will generate a flow profile based on fields associated with
3519 * the input fields to hash on, the flow type and use the VSI number to add
3520 * a flow entry to the profile.
3523 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3524 u32 addl_hdrs, bool symm)
3526 enum ice_status status;
3528 if (hashed_flds == ICE_HASH_INVALID ||
3529 !ice_is_vsi_valid(hw, vsi_handle))
3530 return ICE_ERR_PARAM;
3532 ice_acquire_lock(&hw->rss_locks);
3533 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3534 ICE_RSS_OUTER_HEADERS, symm);
3536 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3537 addl_hdrs, ICE_RSS_INNER_HEADERS,
3539 ice_release_lock(&hw->rss_locks);
3545 * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3546 * @hw: pointer to the hardware structure
3547 * @vsi_handle: software VSI handle
3548 * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3549 * @addl_hdrs: Protocol header fields within a packet segment
3550 * @segs_cnt: packet segment count
3552 * Assumption: lock has already been acquired for RSS list
3554 static enum ice_status
3555 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3556 u32 addl_hdrs, u8 segs_cnt)
3558 const enum ice_block blk = ICE_BLK_RSS;
3559 struct ice_flow_seg_info *segs;
3560 struct ice_flow_prof *prof;
3561 enum ice_status status;
3563 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3566 return ICE_ERR_NO_MEMORY;
3568 /* Construct the packet segment info from the hashed fields */
3569 status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3574 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3576 ICE_FLOW_FIND_PROF_CHK_FLDS);
3578 status = ICE_ERR_DOES_NOT_EXIST;
3582 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3586 /* Remove RSS configuration from VSI context before deleting
3589 ice_rem_rss_list(hw, vsi_handle, prof);
3591 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
3592 status = ice_flow_rem_prof(hw, blk, prof->id);
3600 * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
3601 * @hw: pointer to the hardware structure
3602 * @vsi_handle: software VSI handle
3603 * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3604 * @addl_hdrs: Protocol header fields within a packet segment
3606 * This function will lookup the flow profile based on the input
3607 * hash field bitmap, iterate through the profile entry list of
3608 * that profile and find entry associated with input VSI to be
3609 * removed. Calls are made to underlying flow apis which will in
3610 * turn build or update buffers for RSS XLT1 section.
3613 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3616 enum ice_status status;
3618 if (hashed_flds == ICE_HASH_INVALID ||
3619 !ice_is_vsi_valid(hw, vsi_handle))
3620 return ICE_ERR_PARAM;
3622 ice_acquire_lock(&hw->rss_locks);
3623 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3624 ICE_RSS_OUTER_HEADERS);
3626 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3627 addl_hdrs, ICE_RSS_INNER_HEADERS);
3628 ice_release_lock(&hw->rss_locks);
3634 * ice_replay_rss_cfg - replay RSS configurations associated with VSI
3635 * @hw: pointer to the hardware structure
3636 * @vsi_handle: software VSI handle
3638 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3640 enum ice_status status = ICE_SUCCESS;
3641 struct ice_rss_cfg *r;
3643 if (!ice_is_vsi_valid(hw, vsi_handle))
3644 return ICE_ERR_PARAM;
3646 ice_acquire_lock(&hw->rss_locks);
3647 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3648 ice_rss_cfg, l_entry) {
3649 if (ice_is_bit_set(r->vsis, vsi_handle)) {
3650 status = ice_add_rss_cfg_sync(hw, vsi_handle,
3653 ICE_RSS_OUTER_HEADERS,
3657 status = ice_add_rss_cfg_sync(hw, vsi_handle,
3660 ICE_RSS_INNER_HEADERS,
3666 ice_release_lock(&hw->rss_locks);
3672 * ice_get_rss_cfg - returns hashed fields for the given header types
3673 * @hw: pointer to the hardware structure
3674 * @vsi_handle: software VSI handle
3675 * @hdrs: protocol header type
3677 * This function will return the match fields of the first instance of flow
3678 * profile having the given header types and containing input VSI
3680 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
3682 struct ice_rss_cfg *r, *rss_cfg = NULL;
3684 /* verify if the protocol header is non zero and VSI is valid */
3685 if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
3686 return ICE_HASH_INVALID;
3688 ice_acquire_lock(&hw->rss_locks);
3689 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3690 ice_rss_cfg, l_entry)
3691 if (ice_is_bit_set(r->vsis, vsi_handle) &&
3692 r->packet_hdr == hdrs) {
3696 ice_release_lock(&hw->rss_locks);
3698 return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID;