1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020
5 #include "ice_common.h"
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE 2
10 #define ICE_FLOW_FLD_SZ_VLAN 2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR 4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR 16
13 #define ICE_FLOW_FLD_SZ_IP_DSCP 1
14 #define ICE_FLOW_FLD_SZ_IP_TTL 1
15 #define ICE_FLOW_FLD_SZ_IP_PROT 1
16 #define ICE_FLOW_FLD_SZ_PORT 2
17 #define ICE_FLOW_FLD_SZ_TCP_FLAGS 1
18 #define ICE_FLOW_FLD_SZ_ICMP_TYPE 1
19 #define ICE_FLOW_FLD_SZ_ICMP_CODE 1
20 #define ICE_FLOW_FLD_SZ_ARP_OPER 2
21 #define ICE_FLOW_FLD_SZ_GRE_KEYID 4
22 #define ICE_FLOW_FLD_SZ_GTP_TEID 4
23 #define ICE_FLOW_FLD_SZ_GTP_QFI 2
24 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID 2
25 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
26 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID 4
27 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
28 #define ICE_FLOW_FLD_SZ_AH_SPI 4
29 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI 4
31 /* Describe properties of a protocol header field */
32 struct ice_flow_field_info {
33 enum ice_flow_seg_hdr hdr;
34 s16 off; /* Offset from start of a protocol header, in bits */
35 u16 size; /* Size of fields in bits */
36 u16 mask; /* 16-bit mask for field */
39 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
41 .off = (_offset_bytes) * BITS_PER_BYTE, \
42 .size = (_size_bytes) * BITS_PER_BYTE, \
46 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
48 .off = (_offset_bytes) * BITS_PER_BYTE, \
49 .size = (_size_bytes) * BITS_PER_BYTE, \
53 /* Table containing properties of supported protocol header fields */
55 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
57 /* ICE_FLOW_FIELD_IDX_ETH_DA */
58 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
59 /* ICE_FLOW_FIELD_IDX_ETH_SA */
60 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
61 /* ICE_FLOW_FIELD_IDX_S_VLAN */
62 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
63 /* ICE_FLOW_FIELD_IDX_C_VLAN */
64 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
65 /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
66 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 12, ICE_FLOW_FLD_SZ_ETH_TYPE),
68 /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
69 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
71 /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
72 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
74 /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
75 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
76 ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
77 /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
78 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
79 ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
80 /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
81 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
82 ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
83 /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
84 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
85 ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
86 /* ICE_FLOW_FIELD_IDX_IPV4_SA */
87 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
88 /* ICE_FLOW_FIELD_IDX_IPV4_DA */
89 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
90 /* ICE_FLOW_FIELD_IDX_IPV6_SA */
91 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
92 /* ICE_FLOW_FIELD_IDX_IPV6_DA */
93 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
95 /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
96 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
97 /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
98 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
99 /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
100 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
101 /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
102 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
103 /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
104 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
105 /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
106 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
107 /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
108 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
110 /* ICE_FLOW_FIELD_IDX_ARP_SIP */
111 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
112 /* ICE_FLOW_FIELD_IDX_ARP_DIP */
113 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
114 /* ICE_FLOW_FIELD_IDX_ARP_SHA */
115 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
116 /* ICE_FLOW_FIELD_IDX_ARP_DHA */
117 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
118 /* ICE_FLOW_FIELD_IDX_ARP_OP */
119 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
121 /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
122 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
123 /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
124 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
126 /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
127 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
129 /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
130 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
131 ICE_FLOW_FLD_SZ_GTP_TEID),
132 /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
133 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
134 ICE_FLOW_FLD_SZ_GTP_TEID),
135 /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
136 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
137 ICE_FLOW_FLD_SZ_GTP_TEID),
138 /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
139 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
140 ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
141 /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
142 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
143 ICE_FLOW_FLD_SZ_GTP_TEID),
144 /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
145 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
146 ICE_FLOW_FLD_SZ_GTP_TEID),
148 /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
149 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
150 ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
152 /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
153 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
154 ICE_FLOW_FLD_SZ_PFCP_SEID),
156 /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
157 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
158 ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
160 /* ICE_FLOW_FIELD_IDX_ESP_SPI */
161 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
162 ICE_FLOW_FLD_SZ_ESP_SPI),
164 /* ICE_FLOW_FIELD_IDX_AH_SPI */
165 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
166 ICE_FLOW_FLD_SZ_AH_SPI),
168 /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
169 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
170 ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
173 /* Bitmaps indicating relevant packet types for a particular protocol header
175 * Packet types for packets with an Outer/First/Single MAC header
177 static const u32 ice_ptypes_mac_ofos[] = {
178 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
179 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
180 0x00000000, 0x03FFF000, 0x7FFFFFE0, 0x00000000,
181 0x00000000, 0x00000000, 0x00000000, 0x00000000,
182 0x00000000, 0x00000000, 0x00000000, 0x00000000,
183 0x00000000, 0x00000000, 0x00000000, 0x00000000,
184 0x00000000, 0x00000000, 0x00000000, 0x00000000,
185 0x00000000, 0x00000000, 0x00000000, 0x00000000,
188 /* Packet types for packets with an Innermost/Last MAC VLAN header */
189 static const u32 ice_ptypes_macvlan_il[] = {
190 0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
191 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
192 0x00000000, 0x00000000, 0x00000000, 0x00000000,
193 0x00000000, 0x00000000, 0x00000000, 0x00000000,
194 0x00000000, 0x00000000, 0x00000000, 0x00000000,
195 0x00000000, 0x00000000, 0x00000000, 0x00000000,
196 0x00000000, 0x00000000, 0x00000000, 0x00000000,
197 0x00000000, 0x00000000, 0x00000000, 0x00000000,
200 /* Packet types for packets with an Outer/First/Single IPv4 header */
201 static const u32 ice_ptypes_ipv4_ofos[] = {
202 0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
203 0x00000000, 0x00000155, 0x00000000, 0x00000000,
204 0x0003000F, 0x000FC000, 0x83E0F800, 0x00000101,
205 0x00000000, 0x00000000, 0x00000000, 0x00000000,
206 0x00000000, 0x00000000, 0x00000000, 0x00000000,
207 0x00000000, 0x00000000, 0x00000000, 0x00000000,
208 0x00000000, 0x00000000, 0x00000000, 0x00000000,
209 0x00000000, 0x00000000, 0x00000000, 0x00000000,
212 /* Packet types for packets with an Innermost/Last IPv4 header */
213 static const u32 ice_ptypes_ipv4_il[] = {
214 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
215 0x0000000E, 0x00000000, 0x00000000, 0x00000000,
216 0x00000000, 0x00000000, 0x001FF800, 0x00000000,
217 0x00000000, 0x00000000, 0x00000000, 0x00000000,
218 0x00000000, 0x00000000, 0x00000000, 0x00000000,
219 0x00000000, 0x00000000, 0x00000000, 0x00000000,
220 0x00000000, 0x00000000, 0x00000000, 0x00000000,
221 0x00000000, 0x00000000, 0x00000000, 0x00000000,
224 /* Packet types for packets with an Outer/First/Single IPv6 header */
225 static const u32 ice_ptypes_ipv6_ofos[] = {
226 0x00000000, 0x00000000, 0x77000000, 0x10002000,
227 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
228 0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000206,
229 0x00000000, 0x00000000, 0x00000000, 0x00000000,
230 0x00000000, 0x00000000, 0x00000000, 0x00000000,
231 0x00000000, 0x00000000, 0x00000000, 0x00000000,
232 0x00000000, 0x00000000, 0x00000000, 0x00000000,
233 0x00000000, 0x00000000, 0x00000000, 0x00000000,
236 /* Packet types for packets with an Innermost/Last IPv6 header */
237 static const u32 ice_ptypes_ipv6_il[] = {
238 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
239 0x00000770, 0x00000000, 0x00000000, 0x00000000,
240 0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
241 0x00000000, 0x00000000, 0x00000000, 0x00000000,
242 0x00000000, 0x00000000, 0x00000000, 0x00000000,
243 0x00000000, 0x00000000, 0x00000000, 0x00000000,
244 0x00000000, 0x00000000, 0x00000000, 0x00000000,
245 0x00000000, 0x00000000, 0x00000000, 0x00000000,
248 /* Packet types for packets with an Outermost/First ARP header */
249 static const u32 ice_ptypes_arp_of[] = {
250 0x00000800, 0x00000000, 0x00000000, 0x00000000,
251 0x00000000, 0x00000000, 0x00000000, 0x00000000,
252 0x00000000, 0x00000000, 0x00000000, 0x00000000,
253 0x00000000, 0x00000000, 0x00000000, 0x00000000,
254 0x00000000, 0x00000000, 0x00000000, 0x00000000,
255 0x00000000, 0x00000000, 0x00000000, 0x00000000,
256 0x00000000, 0x00000000, 0x00000000, 0x00000000,
257 0x00000000, 0x00000000, 0x00000000, 0x00000000,
260 /* UDP Packet types for non-tunneled packets or tunneled
261 * packets with inner UDP.
263 static const u32 ice_ptypes_udp_il[] = {
264 0x81000000, 0x20204040, 0x04000010, 0x80810102,
265 0x00000040, 0x00000000, 0x00000000, 0x00000000,
266 0x00000000, 0x00410000, 0x90842000, 0x00000007,
267 0x00000000, 0x00000000, 0x00000000, 0x00000000,
268 0x00000000, 0x00000000, 0x00000000, 0x00000000,
269 0x00000000, 0x00000000, 0x00000000, 0x00000000,
270 0x00000000, 0x00000000, 0x00000000, 0x00000000,
271 0x00000000, 0x00000000, 0x00000000, 0x00000000,
274 /* Packet types for packets with an Innermost/Last TCP header */
275 static const u32 ice_ptypes_tcp_il[] = {
276 0x04000000, 0x80810102, 0x10000040, 0x02040408,
277 0x00000102, 0x00000000, 0x00000000, 0x00000000,
278 0x00000000, 0x00820000, 0x21084000, 0x00000000,
279 0x00000000, 0x00000000, 0x00000000, 0x00000000,
280 0x00000000, 0x00000000, 0x00000000, 0x00000000,
281 0x00000000, 0x00000000, 0x00000000, 0x00000000,
282 0x00000000, 0x00000000, 0x00000000, 0x00000000,
283 0x00000000, 0x00000000, 0x00000000, 0x00000000,
286 /* Packet types for packets with an Innermost/Last SCTP header */
287 static const u32 ice_ptypes_sctp_il[] = {
288 0x08000000, 0x01020204, 0x20000081, 0x04080810,
289 0x00000204, 0x00000000, 0x00000000, 0x00000000,
290 0x00000000, 0x01040000, 0x00000000, 0x00000000,
291 0x00000000, 0x00000000, 0x00000000, 0x00000000,
292 0x00000000, 0x00000000, 0x00000000, 0x00000000,
293 0x00000000, 0x00000000, 0x00000000, 0x00000000,
294 0x00000000, 0x00000000, 0x00000000, 0x00000000,
295 0x00000000, 0x00000000, 0x00000000, 0x00000000,
298 /* Packet types for packets with an Outermost/First ICMP header */
299 static const u32 ice_ptypes_icmp_of[] = {
300 0x10000000, 0x00000000, 0x00000000, 0x00000000,
301 0x00000000, 0x00000000, 0x00000000, 0x00000000,
302 0x00000000, 0x00000000, 0x00000000, 0x00000000,
303 0x00000000, 0x00000000, 0x00000000, 0x00000000,
304 0x00000000, 0x00000000, 0x00000000, 0x00000000,
305 0x00000000, 0x00000000, 0x00000000, 0x00000000,
306 0x00000000, 0x00000000, 0x00000000, 0x00000000,
307 0x00000000, 0x00000000, 0x00000000, 0x00000000,
310 /* Packet types for packets with an Innermost/Last ICMP header */
311 static const u32 ice_ptypes_icmp_il[] = {
312 0x00000000, 0x02040408, 0x40000102, 0x08101020,
313 0x00000408, 0x00000000, 0x00000000, 0x00000000,
314 0x00000000, 0x00000000, 0x42108000, 0x00000000,
315 0x00000000, 0x00000000, 0x00000000, 0x00000000,
316 0x00000000, 0x00000000, 0x00000000, 0x00000000,
317 0x00000000, 0x00000000, 0x00000000, 0x00000000,
318 0x00000000, 0x00000000, 0x00000000, 0x00000000,
319 0x00000000, 0x00000000, 0x00000000, 0x00000000,
322 /* Packet types for packets with an Outermost/First GRE header */
323 static const u32 ice_ptypes_gre_of[] = {
324 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
325 0x0000017E, 0x00000000, 0x00000000, 0x00000000,
326 0x00000000, 0x00000000, 0x00000000, 0x00000000,
327 0x00000000, 0x00000000, 0x00000000, 0x00000000,
328 0x00000000, 0x00000000, 0x00000000, 0x00000000,
329 0x00000000, 0x00000000, 0x00000000, 0x00000000,
330 0x00000000, 0x00000000, 0x00000000, 0x00000000,
331 0x00000000, 0x00000000, 0x00000000, 0x00000000,
334 /* Packet types for packets with an Innermost/Last MAC header */
335 static const u32 ice_ptypes_mac_il[] = {
336 0x00000000, 0x00000000, 0x00000000, 0x00000000,
337 0x00000000, 0x00000000, 0x00000000, 0x00000000,
338 0x00000000, 0x00000000, 0x00000000, 0x00000000,
339 0x00000000, 0x00000000, 0x00000000, 0x00000000,
340 0x00000000, 0x00000000, 0x00000000, 0x00000000,
341 0x00000000, 0x00000000, 0x00000000, 0x00000000,
342 0x00000000, 0x00000000, 0x00000000, 0x00000000,
343 0x00000000, 0x00000000, 0x00000000, 0x00000000,
346 /* Packet types for GTPC */
347 static const u32 ice_ptypes_gtpc[] = {
348 0x00000000, 0x00000000, 0x00000000, 0x00000000,
349 0x00000000, 0x00000000, 0x00000000, 0x00000000,
350 0x00000000, 0x00000000, 0x00000180, 0x00000000,
351 0x00000000, 0x00000000, 0x00000000, 0x00000000,
352 0x00000000, 0x00000000, 0x00000000, 0x00000000,
353 0x00000000, 0x00000000, 0x00000000, 0x00000000,
354 0x00000000, 0x00000000, 0x00000000, 0x00000000,
355 0x00000000, 0x00000000, 0x00000000, 0x00000000,
358 /* Packet types for GTPC with TEID */
359 static const u32 ice_ptypes_gtpc_tid[] = {
360 0x00000000, 0x00000000, 0x00000000, 0x00000000,
361 0x00000000, 0x00000000, 0x00000000, 0x00000000,
362 0x00000000, 0x00000000, 0x00000060, 0x00000000,
363 0x00000000, 0x00000000, 0x00000000, 0x00000000,
364 0x00000000, 0x00000000, 0x00000000, 0x00000000,
365 0x00000000, 0x00000000, 0x00000000, 0x00000000,
366 0x00000000, 0x00000000, 0x00000000, 0x00000000,
367 0x00000000, 0x00000000, 0x00000000, 0x00000000,
370 /* Packet types for GTPU */
371 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
372 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
373 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
374 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
375 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
376 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
377 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
378 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
379 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
380 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
381 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
382 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
383 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
384 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
385 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
386 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
387 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
388 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
389 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
390 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
391 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
394 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
395 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
396 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
397 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
398 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
399 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
400 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
401 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
402 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
403 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
404 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
405 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
406 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
407 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
408 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
409 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
410 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
411 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
412 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
413 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
414 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
417 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
418 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
419 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
420 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
421 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
422 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
423 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
424 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
425 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
426 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
427 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
428 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
429 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
430 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
431 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
432 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
433 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
434 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
435 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
436 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
437 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
440 static const u32 ice_ptypes_gtpu[] = {
441 0x00000000, 0x00000000, 0x00000000, 0x00000000,
442 0x00000000, 0x00000000, 0x00000000, 0x00000000,
443 0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
444 0x00000000, 0x00000000, 0x00000000, 0x00000000,
445 0x00000000, 0x00000000, 0x00000000, 0x00000000,
446 0x00000000, 0x00000000, 0x00000000, 0x00000000,
447 0x00000000, 0x00000000, 0x00000000, 0x00000000,
448 0x00000000, 0x00000000, 0x00000000, 0x00000000,
451 /* Packet types for pppoe */
452 static const u32 ice_ptypes_pppoe[] = {
453 0x00000000, 0x00000000, 0x00000000, 0x00000000,
454 0x00000000, 0x00000000, 0x00000000, 0x00000000,
455 0x00000000, 0x03FFF000, 0x00000000, 0x00000000,
456 0x00000000, 0x00000000, 0x00000000, 0x00000000,
457 0x00000000, 0x00000000, 0x00000000, 0x00000000,
458 0x00000000, 0x00000000, 0x00000000, 0x00000000,
459 0x00000000, 0x00000000, 0x00000000, 0x00000000,
460 0x00000000, 0x00000000, 0x00000000, 0x00000000,
463 /* Packet types for packets with PFCP NODE header */
464 static const u32 ice_ptypes_pfcp_node[] = {
465 0x00000000, 0x00000000, 0x00000000, 0x00000000,
466 0x00000000, 0x00000000, 0x00000000, 0x00000000,
467 0x00000000, 0x00000000, 0x80000000, 0x00000002,
468 0x00000000, 0x00000000, 0x00000000, 0x00000000,
469 0x00000000, 0x00000000, 0x00000000, 0x00000000,
470 0x00000000, 0x00000000, 0x00000000, 0x00000000,
471 0x00000000, 0x00000000, 0x00000000, 0x00000000,
472 0x00000000, 0x00000000, 0x00000000, 0x00000000,
475 /* Packet types for packets with PFCP SESSION header */
476 static const u32 ice_ptypes_pfcp_session[] = {
477 0x00000000, 0x00000000, 0x00000000, 0x00000000,
478 0x00000000, 0x00000000, 0x00000000, 0x00000000,
479 0x00000000, 0x00000000, 0x00000000, 0x00000005,
480 0x00000000, 0x00000000, 0x00000000, 0x00000000,
481 0x00000000, 0x00000000, 0x00000000, 0x00000000,
482 0x00000000, 0x00000000, 0x00000000, 0x00000000,
483 0x00000000, 0x00000000, 0x00000000, 0x00000000,
484 0x00000000, 0x00000000, 0x00000000, 0x00000000,
487 /* Packet types for l2tpv3 */
488 static const u32 ice_ptypes_l2tpv3[] = {
489 0x00000000, 0x00000000, 0x00000000, 0x00000000,
490 0x00000000, 0x00000000, 0x00000000, 0x00000000,
491 0x00000000, 0x00000000, 0x00000000, 0x00000300,
492 0x00000000, 0x00000000, 0x00000000, 0x00000000,
493 0x00000000, 0x00000000, 0x00000000, 0x00000000,
494 0x00000000, 0x00000000, 0x00000000, 0x00000000,
495 0x00000000, 0x00000000, 0x00000000, 0x00000000,
496 0x00000000, 0x00000000, 0x00000000, 0x00000000,
499 /* Packet types for esp */
500 static const u32 ice_ptypes_esp[] = {
501 0x00000000, 0x00000000, 0x00000000, 0x00000000,
502 0x00000000, 0x00000003, 0x00000000, 0x00000000,
503 0x00000000, 0x00000000, 0x00000000, 0x00000000,
504 0x00000000, 0x00000000, 0x00000000, 0x00000000,
505 0x00000000, 0x00000000, 0x00000000, 0x00000000,
506 0x00000000, 0x00000000, 0x00000000, 0x00000000,
507 0x00000000, 0x00000000, 0x00000000, 0x00000000,
508 0x00000000, 0x00000000, 0x00000000, 0x00000000,
511 /* Packet types for ah */
512 static const u32 ice_ptypes_ah[] = {
513 0x00000000, 0x00000000, 0x00000000, 0x00000000,
514 0x00000000, 0x0000000C, 0x00000000, 0x00000000,
515 0x00000000, 0x00000000, 0x00000000, 0x00000000,
516 0x00000000, 0x00000000, 0x00000000, 0x00000000,
517 0x00000000, 0x00000000, 0x00000000, 0x00000000,
518 0x00000000, 0x00000000, 0x00000000, 0x00000000,
519 0x00000000, 0x00000000, 0x00000000, 0x00000000,
520 0x00000000, 0x00000000, 0x00000000, 0x00000000,
523 /* Packet types for packets with NAT_T ESP header */
524 static const u32 ice_ptypes_nat_t_esp[] = {
525 0x00000000, 0x00000000, 0x00000000, 0x00000000,
526 0x00000000, 0x00000030, 0x00000000, 0x00000000,
527 0x00000000, 0x00000000, 0x00000000, 0x00000000,
528 0x00000000, 0x00000000, 0x00000000, 0x00000000,
529 0x00000000, 0x00000000, 0x00000000, 0x00000000,
530 0x00000000, 0x00000000, 0x00000000, 0x00000000,
531 0x00000000, 0x00000000, 0x00000000, 0x00000000,
532 0x00000000, 0x00000000, 0x00000000, 0x00000000,
535 /* Manage parameters and info. used during the creation of a flow profile */
536 struct ice_flow_prof_params {
538 u16 entry_length; /* # of bytes formatted entry will require */
540 struct ice_flow_prof *prof;
542 /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
543 * This will give us the direction flags.
545 struct ice_fv_word es[ICE_MAX_FV_WORDS];
546 /* attributes can be used to add attributes to a particular PTYPE */
547 const struct ice_ptype_attributes *attr;
550 u16 mask[ICE_MAX_FV_WORDS];
551 ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
554 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
555 (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
556 ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
557 ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
558 ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
559 ICE_FLOW_SEG_HDR_NAT_T_ESP)
561 #define ICE_FLOW_SEG_HDRS_L2_MASK \
562 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
563 #define ICE_FLOW_SEG_HDRS_L3_MASK \
564 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
565 ICE_FLOW_SEG_HDR_ARP)
566 #define ICE_FLOW_SEG_HDRS_L4_MASK \
567 (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
568 ICE_FLOW_SEG_HDR_SCTP)
571 * ice_flow_val_hdrs - validates packet segments for valid protocol headers
572 * @segs: array of one or more packet segments that describe the flow
573 * @segs_cnt: number of packet segments provided
575 static enum ice_status
576 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
580 for (i = 0; i < segs_cnt; i++) {
581 /* Multiple L3 headers */
582 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
583 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
584 return ICE_ERR_PARAM;
586 /* Multiple L4 headers */
587 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
588 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
589 return ICE_ERR_PARAM;
595 /* Sizes of fixed known protocol headers without header options */
596 #define ICE_FLOW_PROT_HDR_SZ_MAC 14
597 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
598 #define ICE_FLOW_PROT_HDR_SZ_IPV4 20
599 #define ICE_FLOW_PROT_HDR_SZ_IPV6 40
600 #define ICE_FLOW_PROT_HDR_SZ_ARP 28
601 #define ICE_FLOW_PROT_HDR_SZ_ICMP 8
602 #define ICE_FLOW_PROT_HDR_SZ_TCP 20
603 #define ICE_FLOW_PROT_HDR_SZ_UDP 8
604 #define ICE_FLOW_PROT_HDR_SZ_SCTP 12
607 * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
608 * @params: information about the flow to be processed
609 * @seg: index of packet segment whose header size is to be determined
611 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
616 sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
617 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
620 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
621 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
622 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
623 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
624 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
625 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
626 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
627 /* A L3 header is required if L4 is specified */
631 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
632 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
633 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
634 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
635 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
636 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
637 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
638 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
644 * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
645 * @params: information about the flow to be processed
647 * This function identifies the packet types associated with the protocol
648 * headers being present in packet segments of the specified flow profile.
650 static enum ice_status
651 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
653 struct ice_flow_prof *prof;
656 ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
661 for (i = 0; i < params->prof->segs_cnt; i++) {
662 const ice_bitmap_t *src;
665 hdrs = prof->segs[i].hdrs;
667 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
668 src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
669 (const ice_bitmap_t *)ice_ptypes_mac_il;
670 ice_and_bitmap(params->ptypes, params->ptypes, src,
674 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
675 src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
676 ice_and_bitmap(params->ptypes, params->ptypes, src,
680 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
681 ice_and_bitmap(params->ptypes, params->ptypes,
682 (const ice_bitmap_t *)ice_ptypes_arp_of,
686 if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
687 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
688 ice_and_bitmap(params->ptypes, params->ptypes, src,
692 if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
693 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
694 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
695 ice_and_bitmap(params->ptypes, params->ptypes, src,
697 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
698 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
699 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
700 ice_and_bitmap(params->ptypes, params->ptypes, src,
704 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
705 src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
706 (const ice_bitmap_t *)ice_ptypes_icmp_il;
707 ice_and_bitmap(params->ptypes, params->ptypes, src,
709 } else if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
710 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
711 ice_and_bitmap(params->ptypes, params->ptypes, src,
713 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
714 ice_and_bitmap(params->ptypes, params->ptypes,
715 (const ice_bitmap_t *)ice_ptypes_tcp_il,
717 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
718 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
719 ice_and_bitmap(params->ptypes, params->ptypes, src,
721 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
723 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
724 ice_and_bitmap(params->ptypes, params->ptypes,
725 src, ICE_FLOW_PTYPE_MAX);
727 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
728 src = (const ice_bitmap_t *)ice_ptypes_gtpc;
729 ice_and_bitmap(params->ptypes, params->ptypes,
730 src, ICE_FLOW_PTYPE_MAX);
731 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
732 src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
733 ice_and_bitmap(params->ptypes, params->ptypes,
734 src, ICE_FLOW_PTYPE_MAX);
735 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
736 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
737 ice_and_bitmap(params->ptypes, params->ptypes,
738 src, ICE_FLOW_PTYPE_MAX);
740 /* Attributes for GTP packet with downlink */
741 params->attr = ice_attr_gtpu_down;
742 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
743 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
744 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
745 ice_and_bitmap(params->ptypes, params->ptypes,
746 src, ICE_FLOW_PTYPE_MAX);
748 /* Attributes for GTP packet with uplink */
749 params->attr = ice_attr_gtpu_up;
750 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
751 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
752 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
753 ice_and_bitmap(params->ptypes, params->ptypes,
754 src, ICE_FLOW_PTYPE_MAX);
756 /* Attributes for GTP packet with Extension Header */
757 params->attr = ice_attr_gtpu_eh;
758 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
759 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
760 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
761 ice_and_bitmap(params->ptypes, params->ptypes,
762 src, ICE_FLOW_PTYPE_MAX);
763 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
764 src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
765 ice_and_bitmap(params->ptypes, params->ptypes,
766 src, ICE_FLOW_PTYPE_MAX);
767 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
768 src = (const ice_bitmap_t *)ice_ptypes_esp;
769 ice_and_bitmap(params->ptypes, params->ptypes,
770 src, ICE_FLOW_PTYPE_MAX);
771 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
772 src = (const ice_bitmap_t *)ice_ptypes_ah;
773 ice_and_bitmap(params->ptypes, params->ptypes,
774 src, ICE_FLOW_PTYPE_MAX);
775 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
776 src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
777 ice_and_bitmap(params->ptypes, params->ptypes,
778 src, ICE_FLOW_PTYPE_MAX);
781 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
782 if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
784 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
787 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
789 ice_and_bitmap(params->ptypes, params->ptypes,
790 src, ICE_FLOW_PTYPE_MAX);
792 src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
793 ice_andnot_bitmap(params->ptypes, params->ptypes,
794 src, ICE_FLOW_PTYPE_MAX);
796 src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
797 ice_andnot_bitmap(params->ptypes, params->ptypes,
798 src, ICE_FLOW_PTYPE_MAX);
806 * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
807 * @hw: pointer to the HW struct
808 * @params: information about the flow to be processed
809 * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
811 * This function will allocate an extraction sequence entries for a DWORD size
812 * chunk of the packet flags.
814 static enum ice_status
815 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
816 struct ice_flow_prof_params *params,
817 enum ice_flex_mdid_pkt_flags flags)
819 u8 fv_words = hw->blk[params->blk].es.fvw;
822 /* Make sure the number of extraction sequence entries required does not
823 * exceed the block's capacity.
825 if (params->es_cnt >= fv_words)
826 return ICE_ERR_MAX_LIMIT;
828 /* some blocks require a reversed field vector layout */
829 if (hw->blk[params->blk].es.reverse)
830 idx = fv_words - params->es_cnt - 1;
832 idx = params->es_cnt;
834 params->es[idx].prot_id = ICE_PROT_META_ID;
835 params->es[idx].off = flags;
842 * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
843 * @hw: pointer to the HW struct
844 * @params: information about the flow to be processed
845 * @seg: packet segment index of the field to be extracted
846 * @fld: ID of field to be extracted
847 * @match: bitfield of all fields
849 * This function determines the protocol ID, offset, and size of the given
850 * field. It then allocates one or more extraction sequence entries for the
851 * given field, and fill the entries with protocol ID and offset information.
853 static enum ice_status
854 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
855 u8 seg, enum ice_flow_field fld, u64 match)
857 enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
858 enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
859 u8 fv_words = hw->blk[params->blk].es.fvw;
860 struct ice_flow_fld_info *flds;
861 u16 cnt, ese_bits, i;
867 flds = params->prof->segs[seg].fields;
870 case ICE_FLOW_FIELD_IDX_ETH_DA:
871 case ICE_FLOW_FIELD_IDX_ETH_SA:
872 case ICE_FLOW_FIELD_IDX_S_VLAN:
873 case ICE_FLOW_FIELD_IDX_C_VLAN:
874 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
876 case ICE_FLOW_FIELD_IDX_ETH_TYPE:
877 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
879 case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
880 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
882 case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
883 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
885 case ICE_FLOW_FIELD_IDX_IPV4_TTL:
886 case ICE_FLOW_FIELD_IDX_IPV4_PROT:
887 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
889 /* TTL and PROT share the same extraction seq. entry.
890 * Each is considered a sibling to the other in terms of sharing
891 * the same extraction sequence entry.
893 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
894 sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
895 else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
896 sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
898 /* If the sibling field is also included, that field's
899 * mask needs to be included.
901 if (match & BIT(sib))
902 sib_mask = ice_flds_info[sib].mask;
904 case ICE_FLOW_FIELD_IDX_IPV6_TTL:
905 case ICE_FLOW_FIELD_IDX_IPV6_PROT:
906 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
908 /* TTL and PROT share the same extraction seq. entry.
909 * Each is considered a sibling to the other in terms of sharing
910 * the same extraction sequence entry.
912 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
913 sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
914 else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
915 sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
917 /* If the sibling field is also included, that field's
918 * mask needs to be included.
920 if (match & BIT(sib))
921 sib_mask = ice_flds_info[sib].mask;
923 case ICE_FLOW_FIELD_IDX_IPV4_SA:
924 case ICE_FLOW_FIELD_IDX_IPV4_DA:
925 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
927 case ICE_FLOW_FIELD_IDX_IPV6_SA:
928 case ICE_FLOW_FIELD_IDX_IPV6_DA:
929 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
931 case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
932 case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
933 case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
934 prot_id = ICE_PROT_TCP_IL;
936 case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
937 case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
938 prot_id = ICE_PROT_UDP_IL_OR_S;
940 case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
941 case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
942 prot_id = ICE_PROT_SCTP_IL;
944 case ICE_FLOW_FIELD_IDX_GTPC_TEID:
945 case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
946 case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
947 case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
948 case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
949 case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
950 /* GTP is accessed through UDP OF protocol */
951 prot_id = ICE_PROT_UDP_OF;
953 case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
954 prot_id = ICE_PROT_PPPOE;
956 case ICE_FLOW_FIELD_IDX_PFCP_SEID:
957 prot_id = ICE_PROT_UDP_IL_OR_S;
959 case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
960 prot_id = ICE_PROT_L2TPV3;
962 case ICE_FLOW_FIELD_IDX_ESP_SPI:
963 prot_id = ICE_PROT_ESP_F;
965 case ICE_FLOW_FIELD_IDX_AH_SPI:
966 prot_id = ICE_PROT_ESP_2;
968 case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
969 prot_id = ICE_PROT_UDP_IL_OR_S;
971 case ICE_FLOW_FIELD_IDX_ARP_SIP:
972 case ICE_FLOW_FIELD_IDX_ARP_DIP:
973 case ICE_FLOW_FIELD_IDX_ARP_SHA:
974 case ICE_FLOW_FIELD_IDX_ARP_DHA:
975 case ICE_FLOW_FIELD_IDX_ARP_OP:
976 prot_id = ICE_PROT_ARP_OF;
978 case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
979 case ICE_FLOW_FIELD_IDX_ICMP_CODE:
980 /* ICMP type and code share the same extraction seq. entry */
981 prot_id = (params->prof->segs[seg].hdrs &
982 ICE_FLOW_SEG_HDR_IPV4) ?
983 ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
984 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
985 ICE_FLOW_FIELD_IDX_ICMP_CODE :
986 ICE_FLOW_FIELD_IDX_ICMP_TYPE;
988 case ICE_FLOW_FIELD_IDX_GRE_KEYID:
989 prot_id = ICE_PROT_GRE_OF;
992 return ICE_ERR_NOT_IMPL;
995 /* Each extraction sequence entry is a word in size, and extracts a
996 * word-aligned offset from a protocol header.
998 ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1000 flds[fld].xtrct.prot_id = prot_id;
1001 flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1002 ICE_FLOW_FV_EXTRACT_SZ;
1003 flds[fld].xtrct.disp = (u8)((ice_flds_info[fld].off + adj) % ese_bits);
1004 flds[fld].xtrct.idx = params->es_cnt;
1005 flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1007 /* Adjust the next field-entry index after accommodating the number of
1008 * entries this field consumes
1010 cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1011 ice_flds_info[fld].size, ese_bits);
1013 /* Fill in the extraction sequence entries needed for this field */
1014 off = flds[fld].xtrct.off;
1015 mask = flds[fld].xtrct.mask;
1016 for (i = 0; i < cnt; i++) {
1017 /* Only consume an extraction sequence entry if there is no
1018 * sibling field associated with this field or the sibling entry
1019 * already extracts the word shared with this field.
1021 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1022 flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1023 flds[sib].xtrct.off != off) {
1026 /* Make sure the number of extraction sequence required
1027 * does not exceed the block's capability
1029 if (params->es_cnt >= fv_words)
1030 return ICE_ERR_MAX_LIMIT;
1032 /* some blocks require a reversed field vector layout */
1033 if (hw->blk[params->blk].es.reverse)
1034 idx = fv_words - params->es_cnt - 1;
1036 idx = params->es_cnt;
1038 params->es[idx].prot_id = prot_id;
1039 params->es[idx].off = off;
1040 params->mask[idx] = mask | sib_mask;
1044 off += ICE_FLOW_FV_EXTRACT_SZ;
1051 * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1052 * @hw: pointer to the HW struct
1053 * @params: information about the flow to be processed
1054 * @seg: index of packet segment whose raw fields are to be be extracted
1056 static enum ice_status
1057 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1064 if (!params->prof->segs[seg].raws_cnt)
1067 if (params->prof->segs[seg].raws_cnt >
1068 ARRAY_SIZE(params->prof->segs[seg].raws))
1069 return ICE_ERR_MAX_LIMIT;
1071 /* Offsets within the segment headers are not supported */
1072 hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1074 return ICE_ERR_PARAM;
1076 fv_words = hw->blk[params->blk].es.fvw;
1078 for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1079 struct ice_flow_seg_fld_raw *raw;
1082 raw = ¶ms->prof->segs[seg].raws[i];
1084 /* Storing extraction information */
1085 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1086 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1087 ICE_FLOW_FV_EXTRACT_SZ;
1088 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1090 raw->info.xtrct.idx = params->es_cnt;
1092 /* Determine the number of field vector entries this raw field
1095 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1096 (raw->info.src.last * BITS_PER_BYTE),
1097 (ICE_FLOW_FV_EXTRACT_SZ *
1099 off = raw->info.xtrct.off;
1100 for (j = 0; j < cnt; j++) {
1103 /* Make sure the number of extraction sequence required
1104 * does not exceed the block's capability
1106 if (params->es_cnt >= hw->blk[params->blk].es.count ||
1107 params->es_cnt >= ICE_MAX_FV_WORDS)
1108 return ICE_ERR_MAX_LIMIT;
1110 /* some blocks require a reversed field vector layout */
1111 if (hw->blk[params->blk].es.reverse)
1112 idx = fv_words - params->es_cnt - 1;
1114 idx = params->es_cnt;
1116 params->es[idx].prot_id = raw->info.xtrct.prot_id;
1117 params->es[idx].off = off;
1119 off += ICE_FLOW_FV_EXTRACT_SZ;
1127 * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1128 * @hw: pointer to the HW struct
1129 * @params: information about the flow to be processed
1131 * This function iterates through all matched fields in the given segments, and
1132 * creates an extraction sequence for the fields.
1134 static enum ice_status
1135 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1136 struct ice_flow_prof_params *params)
1138 enum ice_status status = ICE_SUCCESS;
1141 /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1144 if (params->blk == ICE_BLK_ACL) {
1145 status = ice_flow_xtract_pkt_flags(hw, params,
1146 ICE_RX_MDID_PKT_FLAGS_15_0);
1151 for (i = 0; i < params->prof->segs_cnt; i++) {
1152 u64 match = params->prof->segs[i].match;
1153 enum ice_flow_field j;
1155 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1156 const u64 bit = BIT_ULL(j);
1159 status = ice_flow_xtract_fld(hw, params, i, j,
1167 /* Process raw matching bytes */
1168 status = ice_flow_xtract_raws(hw, params, i);
1177 * ice_flow_sel_acl_scen - returns the specific scenario
1178 * @hw: pointer to the hardware structure
1179 * @params: information about the flow to be processed
1181 * This function will return the specific scenario based on the
1182 * params passed to it
1184 static enum ice_status
1185 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1187 /* Find the best-fit scenario for the provided match width */
1188 struct ice_acl_scen *cand_scen = NULL, *scen;
1191 return ICE_ERR_DOES_NOT_EXIST;
1193 /* Loop through each scenario and match against the scenario width
1194 * to select the specific scenario
1196 LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1197 if (scen->eff_width >= params->entry_length &&
1198 (!cand_scen || cand_scen->eff_width > scen->eff_width))
1201 return ICE_ERR_DOES_NOT_EXIST;
1203 params->prof->cfg.scen = cand_scen;
1209 * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1210 * @params: information about the flow to be processed
1212 static enum ice_status
1213 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1215 u16 index, i, range_idx = 0;
1217 index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1219 for (i = 0; i < params->prof->segs_cnt; i++) {
1220 struct ice_flow_seg_info *seg = ¶ms->prof->segs[i];
1221 u64 match = seg->match;
1224 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1225 struct ice_flow_fld_info *fld;
1226 const u64 bit = BIT_ULL(j);
1231 fld = &seg->fields[j];
1232 fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1234 if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1235 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1237 /* Range checking only supported for single
1240 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1242 BITS_PER_BYTE * 2) > 1)
1243 return ICE_ERR_PARAM;
1245 /* Ranges must define low and high values */
1246 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1247 fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1248 return ICE_ERR_PARAM;
1250 fld->entry.val = range_idx++;
1252 /* Store adjusted byte-length of field for later
1253 * use, taking into account potential
1254 * non-byte-aligned displacement
1256 fld->entry.last = DIVIDE_AND_ROUND_UP
1257 (ice_flds_info[j].size +
1258 (fld->xtrct.disp % BITS_PER_BYTE),
1260 fld->entry.val = index;
1261 index += fld->entry.last;
1267 for (j = 0; j < seg->raws_cnt; j++) {
1268 struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1270 raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1271 raw->info.entry.val = index;
1272 raw->info.entry.last = raw->info.src.last;
1273 index += raw->info.entry.last;
1277 /* Currently only support using the byte selection base, which only
1278 * allows for an effective entry size of 30 bytes. Reject anything
1281 if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1282 return ICE_ERR_PARAM;
1284 /* Only 8 range checkers per profile, reject anything trying to use
1287 if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1288 return ICE_ERR_PARAM;
1290 /* Store # bytes required for entry for later use */
1291 params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1297 * ice_flow_proc_segs - process all packet segments associated with a profile
1298 * @hw: pointer to the HW struct
1299 * @params: information about the flow to be processed
1301 static enum ice_status
1302 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1304 enum ice_status status;
1306 status = ice_flow_proc_seg_hdrs(params);
1310 status = ice_flow_create_xtrct_seq(hw, params);
1314 switch (params->blk) {
1317 status = ICE_SUCCESS;
1320 status = ice_flow_acl_def_entry_frmt(params);
1323 status = ice_flow_sel_acl_scen(hw, params);
1329 return ICE_ERR_NOT_IMPL;
1335 #define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001
1336 #define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002
1337 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004
1340 * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1341 * @hw: pointer to the HW struct
1342 * @blk: classification stage
1343 * @dir: flow direction
1344 * @segs: array of one or more packet segments that describe the flow
1345 * @segs_cnt: number of packet segments provided
1346 * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1347 * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1349 static struct ice_flow_prof *
1350 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1351 enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1352 u8 segs_cnt, u16 vsi_handle, u32 conds)
1354 struct ice_flow_prof *p, *prof = NULL;
1356 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1357 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
1358 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1359 segs_cnt && segs_cnt == p->segs_cnt) {
1362 /* Check for profile-VSI association if specified */
1363 if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1364 ice_is_vsi_valid(hw, vsi_handle) &&
1365 !ice_is_bit_set(p->vsis, vsi_handle))
1368 /* Protocol headers must be checked. Matched fields are
1369 * checked if specified.
1371 for (i = 0; i < segs_cnt; i++)
1372 if (segs[i].hdrs != p->segs[i].hdrs ||
1373 ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1374 segs[i].match != p->segs[i].match))
1377 /* A match is found if all segments are matched */
1378 if (i == segs_cnt) {
1384 ice_release_lock(&hw->fl_profs_locks[blk]);
1390 * ice_flow_find_prof - Look up a profile matching headers and matched fields
1391 * @hw: pointer to the HW struct
1392 * @blk: classification stage
1393 * @dir: flow direction
1394 * @segs: array of one or more packet segments that describe the flow
1395 * @segs_cnt: number of packet segments provided
1398 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1399 struct ice_flow_seg_info *segs, u8 segs_cnt)
1401 struct ice_flow_prof *p;
1403 p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1404 ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1406 return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1410 * ice_flow_find_prof_id - Look up a profile with given profile ID
1411 * @hw: pointer to the HW struct
1412 * @blk: classification stage
1413 * @prof_id: unique ID to identify this flow profile
1415 static struct ice_flow_prof *
1416 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1418 struct ice_flow_prof *p;
1420 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
1421 if (p->id == prof_id)
1429 * ice_dealloc_flow_entry - Deallocate flow entry memory
1430 * @hw: pointer to the HW struct
1431 * @entry: flow entry to be removed
1434 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1440 ice_free(hw, entry->entry);
1442 if (entry->range_buf) {
1443 ice_free(hw, entry->range_buf);
1444 entry->range_buf = NULL;
1448 ice_free(hw, entry->acts);
1450 entry->acts_cnt = 0;
1453 ice_free(hw, entry);
1456 #define ICE_ACL_INVALID_SCEN 0x3f
1459 * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any pf
1460 * @hw: pointer to the hardware structure
1461 * @prof: pointer to flow profile
1462 * @buf: destination buffer function writes partial xtrct sequence to
1464 * returns ICE_SUCCESS if no pf is associated to the given profile
1465 * returns ICE_ERR_IN_USE if at least one pf is associated to the given profile
1466 * returns other error code for real error
1468 static enum ice_status
1469 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1470 struct ice_aqc_acl_prof_generic_frmt *buf)
1472 enum ice_status status;
1475 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1479 status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1483 /* If all pf's associated scenarios are all 0 or all
1484 * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1485 * not been configured yet.
1487 if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1488 buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1489 buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1490 buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1493 if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1494 buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1495 buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1496 buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1497 buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1498 buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1499 buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1500 buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1503 return ICE_ERR_IN_USE;
1507 * ice_flow_acl_free_act_cntr - Free the acl rule's actions
1508 * @hw: pointer to the hardware structure
1509 * @acts: array of actions to be performed on a match
1510 * @acts_cnt: number of actions
1512 static enum ice_status
1513 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1518 for (i = 0; i < acts_cnt; i++) {
1519 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1520 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1521 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1522 struct ice_acl_cntrs cntrs;
1523 enum ice_status status;
1525 cntrs.bank = 0; /* Only bank0 for the moment */
1527 LE16_TO_CPU(acts[i].data.acl_act.value);
1529 LE16_TO_CPU(acts[i].data.acl_act.value);
1531 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1532 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1534 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1536 status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1545 * ice_flow_acl_disassoc_scen - Disassociate the scenario to the Profile
1546 * @hw: pointer to the hardware structure
1547 * @prof: pointer to flow profile
1549 * Disassociate the scenario to the Profile for the PF of the VSI.
1551 static enum ice_status
1552 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1554 struct ice_aqc_acl_prof_generic_frmt buf;
1555 enum ice_status status = ICE_SUCCESS;
1558 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1560 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1564 status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1568 /* Clear scenario for this pf */
1569 buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1570 status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1576 * ice_flow_rem_entry_sync - Remove a flow entry
1577 * @hw: pointer to the HW struct
1578 * @blk: classification stage
1579 * @entry: flow entry to be removed
1581 static enum ice_status
1582 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1583 struct ice_flow_entry *entry)
1586 return ICE_ERR_BAD_PTR;
1588 if (blk == ICE_BLK_ACL) {
1589 enum ice_status status;
1592 return ICE_ERR_BAD_PTR;
1594 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1595 entry->scen_entry_idx);
1599 /* Checks if we need to release an ACL counter. */
1600 if (entry->acts_cnt && entry->acts)
1601 ice_flow_acl_free_act_cntr(hw, entry->acts,
1605 LIST_DEL(&entry->l_entry);
1607 ice_dealloc_flow_entry(hw, entry);
1613 * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1614 * @hw: pointer to the HW struct
1615 * @blk: classification stage
1616 * @dir: flow direction
1617 * @prof_id: unique ID to identify this flow profile
1618 * @segs: array of one or more packet segments that describe the flow
1619 * @segs_cnt: number of packet segments provided
1620 * @acts: array of default actions
1621 * @acts_cnt: number of default actions
1622 * @prof: stores the returned flow profile added
1624 * Assumption: the caller has acquired the lock to the profile list
1626 static enum ice_status
1627 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1628 enum ice_flow_dir dir, u64 prof_id,
1629 struct ice_flow_seg_info *segs, u8 segs_cnt,
1630 struct ice_flow_action *acts, u8 acts_cnt,
1631 struct ice_flow_prof **prof)
1633 struct ice_flow_prof_params params;
1634 enum ice_status status;
1637 if (!prof || (acts_cnt && !acts))
1638 return ICE_ERR_BAD_PTR;
1640 ice_memset(¶ms, 0, sizeof(params), ICE_NONDMA_MEM);
1641 params.prof = (struct ice_flow_prof *)
1642 ice_malloc(hw, sizeof(*params.prof));
1644 return ICE_ERR_NO_MEMORY;
1646 /* initialize extraction sequence to all invalid (0xff) */
1647 for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1648 params.es[i].prot_id = ICE_PROT_INVALID;
1649 params.es[i].off = ICE_FV_OFFSET_INVAL;
1653 params.prof->id = prof_id;
1654 params.prof->dir = dir;
1655 params.prof->segs_cnt = segs_cnt;
1657 /* Make a copy of the segments that need to be persistent in the flow
1660 for (i = 0; i < segs_cnt; i++)
1661 ice_memcpy(¶ms.prof->segs[i], &segs[i], sizeof(*segs),
1662 ICE_NONDMA_TO_NONDMA);
1664 /* Make a copy of the actions that need to be persistent in the flow
1668 params.prof->acts = (struct ice_flow_action *)
1669 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
1670 ICE_NONDMA_TO_NONDMA);
1672 if (!params.prof->acts) {
1673 status = ICE_ERR_NO_MEMORY;
1678 status = ice_flow_proc_segs(hw, ¶ms);
1680 ice_debug(hw, ICE_DBG_FLOW,
1681 "Error processing a flow's packet segments\n");
1685 /* Add a HW profile for this flow profile */
1686 status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes,
1687 params.attr, params.attr_cnt, params.es,
1690 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
1694 INIT_LIST_HEAD(¶ms.prof->entries);
1695 ice_init_lock(¶ms.prof->entries_lock);
1696 *prof = params.prof;
1700 if (params.prof->acts)
1701 ice_free(hw, params.prof->acts);
1702 ice_free(hw, params.prof);
1709 * ice_flow_rem_prof_sync - remove a flow profile
1710 * @hw: pointer to the hardware structure
1711 * @blk: classification stage
1712 * @prof: pointer to flow profile to remove
1714 * Assumption: the caller has acquired the lock to the profile list
1716 static enum ice_status
1717 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
1718 struct ice_flow_prof *prof)
1720 enum ice_status status;
1722 /* Remove all remaining flow entries before removing the flow profile */
1723 if (!LIST_EMPTY(&prof->entries)) {
1724 struct ice_flow_entry *e, *t;
1726 ice_acquire_lock(&prof->entries_lock);
1728 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
1730 status = ice_flow_rem_entry_sync(hw, blk, e);
1735 ice_release_lock(&prof->entries_lock);
1738 if (blk == ICE_BLK_ACL) {
1739 struct ice_aqc_acl_profile_ranges query_rng_buf;
1740 struct ice_aqc_acl_prof_generic_frmt buf;
1743 /* Deassociate the scenario to the Profile for the PF */
1744 status = ice_flow_acl_disassoc_scen(hw, prof);
1748 /* Clear the range-checker if the profile ID is no longer
1751 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1752 if (status && status != ICE_ERR_IN_USE) {
1754 } else if (!status) {
1755 /* Clear the range-checker value for profile ID */
1756 ice_memset(&query_rng_buf, 0,
1757 sizeof(struct ice_aqc_acl_profile_ranges),
1760 status = ice_flow_get_hw_prof(hw, blk, prof->id,
1765 status = ice_prog_acl_prof_ranges(hw, prof_id,
1766 &query_rng_buf, NULL);
1772 /* Remove all hardware profiles associated with this flow profile */
1773 status = ice_rem_prof(hw, blk, prof->id);
1775 LIST_DEL(&prof->l_entry);
1776 ice_destroy_lock(&prof->entries_lock);
1778 ice_free(hw, prof->acts);
1786 * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
1787 * @buf: Destination buffer function writes partial xtrct sequence to
1788 * @info: Info about field
1791 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
1792 struct ice_flow_fld_info *info)
1797 src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
1798 info->xtrct.disp / BITS_PER_BYTE;
1799 dst = info->entry.val;
1800 for (i = 0; i < info->entry.last; i++)
1801 /* HW stores field vector words in LE, convert words back to BE
1802 * so constructed entries will end up in network order
1804 buf->byte_selection[dst++] = src++ ^ 1;
1808 * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
1809 * @hw: pointer to the hardware structure
1810 * @prof: pointer to flow profile
1812 static enum ice_status
1813 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
1815 struct ice_aqc_acl_prof_generic_frmt buf;
1816 struct ice_flow_fld_info *info;
1817 enum ice_status status;
1821 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1823 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1827 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1828 if (status && status != ICE_ERR_IN_USE)
1832 /* Program the profile dependent configuration. This is done
1833 * only once regardless of the number of PFs using that profile
1835 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1837 for (i = 0; i < prof->segs_cnt; i++) {
1838 struct ice_flow_seg_info *seg = &prof->segs[i];
1839 u64 match = seg->match;
1842 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1843 const u64 bit = BIT_ULL(j);
1848 info = &seg->fields[j];
1850 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
1851 buf.word_selection[info->entry.val] =
1854 ice_flow_acl_set_xtrct_seq_fld(&buf,
1860 for (j = 0; j < seg->raws_cnt; j++) {
1861 info = &seg->raws[j].info;
1862 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
1866 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
1867 ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
1871 /* Update the current PF */
1872 buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
1873 status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1879 * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
1880 * @hw: pointer to the hardware structure
1881 * @blk: classification stage
1882 * @vsi_handle: software VSI handle
1883 * @vsig: target VSI group
1885 * Assumption: the caller has already verified that the VSI to
1886 * be added has the same characteristics as the VSIG and will
1887 * thereby have access to all resources added to that VSIG.
1890 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
1893 enum ice_status status;
1895 if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
1896 return ICE_ERR_PARAM;
1898 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1899 status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
1901 ice_release_lock(&hw->fl_profs_locks[blk]);
1907 * ice_flow_assoc_prof - associate a VSI with a flow profile
1908 * @hw: pointer to the hardware structure
1909 * @blk: classification stage
1910 * @prof: pointer to flow profile
1911 * @vsi_handle: software VSI handle
1913 * Assumption: the caller has acquired the lock to the profile list
1914 * and the software VSI handle has been validated
1916 static enum ice_status
1917 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
1918 struct ice_flow_prof *prof, u16 vsi_handle)
1920 enum ice_status status = ICE_SUCCESS;
1922 if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
1923 if (blk == ICE_BLK_ACL) {
1924 status = ice_flow_acl_set_xtrct_seq(hw, prof);
1928 status = ice_add_prof_id_flow(hw, blk,
1929 ice_get_hw_vsi_num(hw,
1933 ice_set_bit(vsi_handle, prof->vsis);
1935 ice_debug(hw, ICE_DBG_FLOW,
1936 "HW profile add failed, %d\n",
1944 * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
1945 * @hw: pointer to the hardware structure
1946 * @blk: classification stage
1947 * @prof: pointer to flow profile
1948 * @vsi_handle: software VSI handle
1950 * Assumption: the caller has acquired the lock to the profile list
1951 * and the software VSI handle has been validated
1953 static enum ice_status
1954 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
1955 struct ice_flow_prof *prof, u16 vsi_handle)
1957 enum ice_status status = ICE_SUCCESS;
1959 if (ice_is_bit_set(prof->vsis, vsi_handle)) {
1960 status = ice_rem_prof_id_flow(hw, blk,
1961 ice_get_hw_vsi_num(hw,
1965 ice_clear_bit(vsi_handle, prof->vsis);
1967 ice_debug(hw, ICE_DBG_FLOW,
1968 "HW profile remove failed, %d\n",
1976 * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
1977 * @hw: pointer to the HW struct
1978 * @blk: classification stage
1979 * @dir: flow direction
1980 * @prof_id: unique ID to identify this flow profile
1981 * @segs: array of one or more packet segments that describe the flow
1982 * @segs_cnt: number of packet segments provided
1983 * @acts: array of default actions
1984 * @acts_cnt: number of default actions
1985 * @prof: stores the returned flow profile added
1988 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1989 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
1990 struct ice_flow_action *acts, u8 acts_cnt,
1991 struct ice_flow_prof **prof)
1993 enum ice_status status;
1995 if (segs_cnt > ICE_FLOW_SEG_MAX)
1996 return ICE_ERR_MAX_LIMIT;
1999 return ICE_ERR_PARAM;
2002 return ICE_ERR_BAD_PTR;
2004 status = ice_flow_val_hdrs(segs, segs_cnt);
2008 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2010 status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2011 acts, acts_cnt, prof);
2013 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2015 ice_release_lock(&hw->fl_profs_locks[blk]);
2021 * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2022 * @hw: pointer to the HW struct
2023 * @blk: the block for which the flow profile is to be removed
2024 * @prof_id: unique ID of the flow profile to be removed
2027 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2029 struct ice_flow_prof *prof;
2030 enum ice_status status;
2032 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2034 prof = ice_flow_find_prof_id(hw, blk, prof_id);
2036 status = ICE_ERR_DOES_NOT_EXIST;
2040 /* prof becomes invalid after the call */
2041 status = ice_flow_rem_prof_sync(hw, blk, prof);
2044 ice_release_lock(&hw->fl_profs_locks[blk]);
2050 * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
2051 * @hw: pointer to the HW struct
2052 * @blk: classification stage
2053 * @prof_id: the profile ID handle
2054 * @hw_prof_id: pointer to variable to receive the HW profile ID
2057 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2060 struct ice_prof_map *map;
2062 map = ice_search_prof_id(hw, blk, prof_id);
2064 *hw_prof_id = map->prof_id;
2068 return ICE_ERR_DOES_NOT_EXIST;
2072 * ice_flow_find_entry - look for a flow entry using its unique ID
2073 * @hw: pointer to the HW struct
2074 * @blk: classification stage
2075 * @entry_id: unique ID to identify this flow entry
2077 * This function looks for the flow entry with the specified unique ID in all
2078 * flow profiles of the specified classification stage. If the entry is found,
2079 * and it returns the handle to the flow entry. Otherwise, it returns
2080 * ICE_FLOW_ENTRY_ID_INVAL.
2082 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2084 struct ice_flow_entry *found = NULL;
2085 struct ice_flow_prof *p;
2087 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2089 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2090 struct ice_flow_entry *e;
2092 ice_acquire_lock(&p->entries_lock);
2093 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2094 if (e->id == entry_id) {
2098 ice_release_lock(&p->entries_lock);
2104 ice_release_lock(&hw->fl_profs_locks[blk]);
2106 return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2110 * ice_flow_acl_check_actions - Checks the acl rule's actions
2111 * @hw: pointer to the hardware structure
2112 * @acts: array of actions to be performed on a match
2113 * @acts_cnt: number of actions
2114 * @cnt_alloc: indicates if a ACL counter has been allocated.
2116 static enum ice_status
2117 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2118 u8 acts_cnt, bool *cnt_alloc)
2120 ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2123 ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2126 if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2127 return ICE_ERR_OUT_OF_RANGE;
2129 for (i = 0; i < acts_cnt; i++) {
2130 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2131 acts[i].type != ICE_FLOW_ACT_DROP &&
2132 acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2133 acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2136 /* If the caller want to add two actions of the same type, then
2137 * it is considered invalid configuration.
2139 if (ice_test_and_set_bit(acts[i].type, dup_check))
2140 return ICE_ERR_PARAM;
2143 /* Checks if ACL counters are needed. */
2144 for (i = 0; i < acts_cnt; i++) {
2145 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2146 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2147 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2148 struct ice_acl_cntrs cntrs;
2149 enum ice_status status;
2152 cntrs.bank = 0; /* Only bank0 for the moment */
2154 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2155 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2157 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2159 status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2162 /* Counter index within the bank */
2163 acts[i].data.acl_act.value =
2164 CPU_TO_LE16(cntrs.first_cntr);
2173 * ice_flow_acl_frmt_entry_range - Format an acl range checker for a given field
2174 * @fld: number of the given field
2175 * @info: info about field
2176 * @range_buf: range checker configuration buffer
2177 * @data: pointer to a data buffer containing flow entry's match values/masks
2178 * @range: Input/output param indicating which range checkers are being used
2181 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2182 struct ice_aqc_acl_profile_ranges *range_buf,
2183 u8 *data, u8 *range)
2187 /* If not specified, default mask is all bits in field */
2188 new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2189 BIT(ice_flds_info[fld].size) - 1 :
2190 (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2192 /* If the mask is 0, then we don't need to worry about this input
2193 * range checker value.
2197 (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2199 (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2200 u8 range_idx = info->entry.val;
2202 range_buf->checker_cfg[range_idx].low_boundary =
2203 CPU_TO_BE16(new_low);
2204 range_buf->checker_cfg[range_idx].high_boundary =
2205 CPU_TO_BE16(new_high);
2206 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2208 /* Indicate which range checker is being used */
2209 *range |= BIT(range_idx);
2214 * ice_flow_acl_frmt_entry_fld - Partially format acl entry for a given field
2215 * @fld: number of the given field
2216 * @info: info about the field
2217 * @buf: buffer containing the entry
2218 * @dontcare: buffer containing don't care mask for entry
2219 * @data: pointer to a data buffer containing flow entry's match values/masks
2222 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2223 u8 *dontcare, u8 *data)
2225 u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2226 bool use_mask = false;
2229 src = info->src.val;
2230 mask = info->src.mask;
2231 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2232 disp = info->xtrct.disp % BITS_PER_BYTE;
2234 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2237 for (k = 0; k < info->entry.last; k++, dst++) {
2238 /* Add overflow bits from previous byte */
2239 buf[dst] = (tmp_s & 0xff00) >> 8;
2241 /* If mask is not valid, tmp_m is always zero, so just setting
2242 * dontcare to 0 (no masked bits). If mask is valid, pulls in
2243 * overflow bits of mask from prev byte
2245 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2247 /* If there is displacement, last byte will only contain
2248 * displaced data, but there is no more data to read from user
2249 * buffer, so skip so as not to potentially read beyond end of
2252 if (!disp || k < info->entry.last - 1) {
2253 /* Store shifted data to use in next byte */
2254 tmp_s = data[src++] << disp;
2256 /* Add current (shifted) byte */
2257 buf[dst] |= tmp_s & 0xff;
2259 /* Handle mask if valid */
2261 tmp_m = (~data[mask++] & 0xff) << disp;
2262 dontcare[dst] |= tmp_m & 0xff;
2267 /* Fill in don't care bits at beginning of field */
2269 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2270 for (k = 0; k < disp; k++)
2271 dontcare[dst] |= BIT(k);
2274 end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2276 /* Fill in don't care bits at end of field */
2278 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2279 info->entry.last - 1;
2280 for (k = end_disp; k < BITS_PER_BYTE; k++)
2281 dontcare[dst] |= BIT(k);
2286 * ice_flow_acl_frmt_entry - Format acl entry
2287 * @hw: pointer to the hardware structure
2288 * @prof: pointer to flow profile
2289 * @e: pointer to the flow entry
2290 * @data: pointer to a data buffer containing flow entry's match values/masks
2291 * @acts: array of actions to be performed on a match
2292 * @acts_cnt: number of actions
2294 * Formats the key (and key_inverse) to be matched from the data passed in,
2295 * along with data from the flow profile. This key/key_inverse pair makes up
2296 * the 'entry' for an acl flow entry.
2298 static enum ice_status
2299 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2300 struct ice_flow_entry *e, u8 *data,
2301 struct ice_flow_action *acts, u8 acts_cnt)
2303 u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2304 struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2305 enum ice_status status;
2310 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2314 /* Format the result action */
2316 status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2320 status = ICE_ERR_NO_MEMORY;
2322 e->acts = (struct ice_flow_action *)
2323 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2324 ICE_NONDMA_TO_NONDMA);
2329 e->acts_cnt = acts_cnt;
2331 /* Format the matching data */
2332 buf_sz = prof->cfg.scen->width;
2333 buf = (u8 *)ice_malloc(hw, buf_sz);
2337 dontcare = (u8 *)ice_malloc(hw, buf_sz);
2341 /* 'key' buffer will store both key and key_inverse, so must be twice
2344 key = (u8 *)ice_malloc(hw, buf_sz * 2);
2348 range_buf = (struct ice_aqc_acl_profile_ranges *)
2349 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2353 /* Set don't care mask to all 1's to start, will zero out used bytes */
2354 ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2356 for (i = 0; i < prof->segs_cnt; i++) {
2357 struct ice_flow_seg_info *seg = &prof->segs[i];
2358 u64 match = seg->match;
2361 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
2362 struct ice_flow_fld_info *info;
2363 const u64 bit = BIT_ULL(j);
2368 info = &seg->fields[j];
2370 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2371 ice_flow_acl_frmt_entry_range(j, info,
2375 ice_flow_acl_frmt_entry_fld(j, info, buf,
2381 for (j = 0; j < seg->raws_cnt; j++) {
2382 struct ice_flow_fld_info *info = &seg->raws[j].info;
2383 u16 dst, src, mask, k;
2384 bool use_mask = false;
2386 src = info->src.val;
2387 dst = info->entry.val -
2388 ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2389 mask = info->src.mask;
2391 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2394 for (k = 0; k < info->entry.last; k++, dst++) {
2395 buf[dst] = data[src++];
2397 dontcare[dst] = ~data[mask++];
2404 buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2405 dontcare[prof->cfg.scen->pid_idx] = 0;
2407 /* Format the buffer for direction flags */
2408 dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2410 if (prof->dir == ICE_FLOW_RX)
2411 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2414 buf[prof->cfg.scen->rng_chk_idx] = range;
2415 /* Mark any unused range checkers as don't care */
2416 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2417 e->range_buf = range_buf;
2419 ice_free(hw, range_buf);
2422 status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2428 e->entry_sz = buf_sz * 2;
2435 ice_free(hw, dontcare);
2440 if (status && range_buf) {
2441 ice_free(hw, range_buf);
2442 e->range_buf = NULL;
2445 if (status && e->acts) {
2446 ice_free(hw, e->acts);
2451 if (status && cnt_alloc)
2452 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2458 * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2459 * the compared data.
2460 * @prof: pointer to flow profile
2461 * @e: pointer to the comparing flow entry
2462 * @do_chg_action: decide if we want to change the ACL action
2463 * @do_add_entry: decide if we want to add the new ACL entry
2464 * @do_rem_entry: decide if we want to remove the current ACL entry
2466 * Find an ACL scenario entry that matches the compared data. In the same time,
2467 * this function also figure out:
2468 * a/ If we want to change the ACL action
2469 * b/ If we want to add the new ACL entry
2470 * c/ If we want to remove the current ACL entry
2472 static struct ice_flow_entry *
2473 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2474 struct ice_flow_entry *e, bool *do_chg_action,
2475 bool *do_add_entry, bool *do_rem_entry)
2477 struct ice_flow_entry *p, *return_entry = NULL;
2481 * a/ There exists an entry with same matching data, but different
2482 * priority, then we remove this existing ACL entry. Then, we
2483 * will add the new entry to the ACL scenario.
2484 * b/ There exists an entry with same matching data, priority, and
2485 * result action, then we do nothing
2486 * c/ There exists an entry with same matching data, priority, but
2487 * different, action, then do only change the action's entry.
2488 * d/ Else, we add this new entry to the ACL scenario.
2490 *do_chg_action = false;
2491 *do_add_entry = true;
2492 *do_rem_entry = false;
2493 LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2494 if (memcmp(p->entry, e->entry, p->entry_sz))
2497 /* From this point, we have the same matching_data. */
2498 *do_add_entry = false;
2501 if (p->priority != e->priority) {
2502 /* matching data && !priority */
2503 *do_add_entry = true;
2504 *do_rem_entry = true;
2508 /* From this point, we will have matching_data && priority */
2509 if (p->acts_cnt != e->acts_cnt)
2510 *do_chg_action = true;
2511 for (i = 0; i < p->acts_cnt; i++) {
2512 bool found_not_match = false;
2514 for (j = 0; j < e->acts_cnt; j++)
2515 if (memcmp(&p->acts[i], &e->acts[j],
2516 sizeof(struct ice_flow_action))) {
2517 found_not_match = true;
2521 if (found_not_match) {
2522 *do_chg_action = true;
2527 /* (do_chg_action = true) means :
2528 * matching_data && priority && !result_action
2529 * (do_chg_action = false) means :
2530 * matching_data && priority && result_action
2535 return return_entry;
2539 * ice_flow_acl_convert_to_acl_prior - Convert to ACL priority
2542 static enum ice_acl_entry_prior
2543 ice_flow_acl_convert_to_acl_prior(enum ice_flow_priority p)
2545 enum ice_acl_entry_prior acl_prior;
2548 case ICE_FLOW_PRIO_LOW:
2549 acl_prior = ICE_LOW;
2551 case ICE_FLOW_PRIO_NORMAL:
2552 acl_prior = ICE_NORMAL;
2554 case ICE_FLOW_PRIO_HIGH:
2555 acl_prior = ICE_HIGH;
2558 acl_prior = ICE_NORMAL;
2566 * ice_flow_acl_union_rng_chk - Perform union operation between two
2567 * range-range checker buffers
2568 * @dst_buf: pointer to destination range checker buffer
2569 * @src_buf: pointer to source range checker buffer
2571 * For this function, we do the union between dst_buf and src_buf
2572 * range checker buffer, and we will save the result back to dst_buf
2574 static enum ice_status
2575 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2576 struct ice_aqc_acl_profile_ranges *src_buf)
2580 if (!dst_buf || !src_buf)
2581 return ICE_ERR_BAD_PTR;
2583 for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2584 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2585 bool will_populate = false;
2587 in_data = &src_buf->checker_cfg[i];
2592 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2593 cfg_data = &dst_buf->checker_cfg[j];
2595 if (!cfg_data->mask ||
2596 !memcmp(cfg_data, in_data,
2597 sizeof(struct ice_acl_rng_data))) {
2598 will_populate = true;
2603 if (will_populate) {
2604 ice_memcpy(cfg_data, in_data,
2605 sizeof(struct ice_acl_rng_data),
2606 ICE_NONDMA_TO_NONDMA);
2608 /* No available slot left to program range checker */
2609 return ICE_ERR_MAX_LIMIT;
2617 * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2618 * @hw: pointer to the hardware structure
2619 * @prof: pointer to flow profile
2620 * @entry: double pointer to the flow entry
2622 * For this function, we will look at the current added entries in the
2623 * corresponding ACL scenario. Then, we will perform matching logic to
2624 * see if we want to add/modify/do nothing with this new entry.
2626 static enum ice_status
2627 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2628 struct ice_flow_entry **entry)
2630 bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2631 struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2632 struct ice_acl_act_entry *acts = NULL;
2633 struct ice_flow_entry *exist;
2634 enum ice_status status = ICE_SUCCESS;
2635 struct ice_flow_entry *e;
2638 if (!entry || !(*entry) || !prof)
2639 return ICE_ERR_BAD_PTR;
2643 do_chg_rng_chk = false;
2647 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2652 /* Query the current range-checker value in FW */
2653 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2657 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2658 sizeof(struct ice_aqc_acl_profile_ranges),
2659 ICE_NONDMA_TO_NONDMA);
2661 /* Generate the new range-checker value */
2662 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2666 /* Reconfigure the range check if the buffer is changed. */
2667 do_chg_rng_chk = false;
2668 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2669 sizeof(struct ice_aqc_acl_profile_ranges))) {
2670 status = ice_prog_acl_prof_ranges(hw, prof_id,
2671 &cfg_rng_buf, NULL);
2675 do_chg_rng_chk = true;
2679 /* Figure out if we want to (change the ACL action) and/or
2680 * (Add the new ACL entry) and/or (Remove the current ACL entry)
2682 exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
2683 &do_add_entry, &do_rem_entry);
2686 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
2691 /* Prepare the result action buffer */
2692 acts = (struct ice_acl_act_entry *)ice_calloc
2693 (hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
2694 for (i = 0; i < e->acts_cnt; i++)
2695 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
2696 sizeof(struct ice_acl_act_entry),
2697 ICE_NONDMA_TO_NONDMA);
2700 enum ice_acl_entry_prior prior;
2704 keys = (u8 *)e->entry;
2705 inverts = keys + (e->entry_sz / 2);
2706 prior = ice_flow_acl_convert_to_acl_prior(e->priority);
2708 status = ice_acl_add_entry(hw, prof->cfg.scen, prior, keys,
2709 inverts, acts, e->acts_cnt,
2714 e->scen_entry_idx = entry_idx;
2715 LIST_ADD(&e->l_entry, &prof->entries);
2717 if (do_chg_action) {
2718 /* For the action memory info, update the SW's copy of
2719 * exist entry with e's action memory info
2721 ice_free(hw, exist->acts);
2722 exist->acts_cnt = e->acts_cnt;
2723 exist->acts = (struct ice_flow_action *)
2724 ice_calloc(hw, exist->acts_cnt,
2725 sizeof(struct ice_flow_action));
2728 status = ICE_ERR_NO_MEMORY;
2732 ice_memcpy(exist->acts, e->acts,
2733 sizeof(struct ice_flow_action) * e->acts_cnt,
2734 ICE_NONDMA_TO_NONDMA);
2736 status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
2738 exist->scen_entry_idx);
2743 if (do_chg_rng_chk) {
2744 /* In this case, we want to update the range checker
2745 * information of the exist entry
2747 status = ice_flow_acl_union_rng_chk(exist->range_buf,
2753 /* As we don't add the new entry to our SW DB, deallocate its
2754 * memories, and return the exist entry to the caller
2756 ice_dealloc_flow_entry(hw, e);
2767 * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
2768 * @hw: pointer to the hardware structure
2769 * @prof: pointer to flow profile
2770 * @e: double pointer to the flow entry
2772 static enum ice_status
2773 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2774 struct ice_flow_entry **e)
2776 enum ice_status status;
2778 ice_acquire_lock(&prof->entries_lock);
2779 status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
2780 ice_release_lock(&prof->entries_lock);
2786 * ice_flow_add_entry - Add a flow entry
2787 * @hw: pointer to the HW struct
2788 * @blk: classification stage
2789 * @prof_id: ID of the profile to add a new flow entry to
2790 * @entry_id: unique ID to identify this flow entry
2791 * @vsi_handle: software VSI handle for the flow entry
2792 * @prio: priority of the flow entry
2793 * @data: pointer to a data buffer containing flow entry's match values/masks
2794 * @acts: arrays of actions to be performed on a match
2795 * @acts_cnt: number of actions
2796 * @entry_h: pointer to buffer that receives the new flow entry's handle
2799 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2800 u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
2801 void *data, struct ice_flow_action *acts, u8 acts_cnt,
2804 struct ice_flow_entry *e = NULL;
2805 struct ice_flow_prof *prof;
2806 enum ice_status status = ICE_SUCCESS;
2808 /* ACL entries must indicate an action */
2809 if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
2810 return ICE_ERR_PARAM;
2812 /* No flow entry data is expected for RSS */
2813 if (!entry_h || (!data && blk != ICE_BLK_RSS))
2814 return ICE_ERR_BAD_PTR;
2816 if (!ice_is_vsi_valid(hw, vsi_handle))
2817 return ICE_ERR_PARAM;
2819 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2821 prof = ice_flow_find_prof_id(hw, blk, prof_id);
2823 status = ICE_ERR_DOES_NOT_EXIST;
2825 /* Allocate memory for the entry being added and associate
2826 * the VSI to the found flow profile
2828 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
2830 status = ICE_ERR_NO_MEMORY;
2832 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
2835 ice_release_lock(&hw->fl_profs_locks[blk]);
2840 e->vsi_handle = vsi_handle;
2849 /* ACL will handle the entry management */
2850 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
2855 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
2863 status = ICE_ERR_NOT_IMPL;
2867 if (blk != ICE_BLK_ACL) {
2868 /* ACL will handle the entry management */
2869 ice_acquire_lock(&prof->entries_lock);
2870 LIST_ADD(&e->l_entry, &prof->entries);
2871 ice_release_lock(&prof->entries_lock);
2874 *entry_h = ICE_FLOW_ENTRY_HNDL(e);
2879 ice_free(hw, e->entry);
2887 * ice_flow_rem_entry - Remove a flow entry
2888 * @hw: pointer to the HW struct
2889 * @blk: classification stage
2890 * @entry_h: handle to the flow entry to be removed
2892 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
2895 struct ice_flow_entry *entry;
2896 struct ice_flow_prof *prof;
2897 enum ice_status status = ICE_SUCCESS;
2899 if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
2900 return ICE_ERR_PARAM;
2902 entry = ICE_FLOW_ENTRY_PTR((unsigned long)entry_h);
2904 /* Retain the pointer to the flow profile as the entry will be freed */
2908 ice_acquire_lock(&prof->entries_lock);
2909 status = ice_flow_rem_entry_sync(hw, blk, entry);
2910 ice_release_lock(&prof->entries_lock);
2917 * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
2918 * @seg: packet segment the field being set belongs to
2919 * @fld: field to be set
2920 * @field_type: type of the field
2921 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
2922 * entry's input buffer
2923 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
2925 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
2926 * entry's input buffer
2928 * This helper function stores information of a field being matched, including
2929 * the type of the field and the locations of the value to match, the mask, and
2930 * and the upper-bound value in the start of the input buffer for a flow entry.
2931 * This function should only be used for fixed-size data structures.
2933 * This function also opportunistically determines the protocol headers to be
2934 * present based on the fields being set. Some fields cannot be used alone to
2935 * determine the protocol headers present. Sometimes, fields for particular
2936 * protocol headers are not matched. In those cases, the protocol headers
2937 * must be explicitly set.
2940 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
2941 enum ice_flow_fld_match_type field_type, u16 val_loc,
2942 u16 mask_loc, u16 last_loc)
2944 u64 bit = BIT_ULL(fld);
2947 if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
2950 seg->fields[fld].type = field_type;
2951 seg->fields[fld].src.val = val_loc;
2952 seg->fields[fld].src.mask = mask_loc;
2953 seg->fields[fld].src.last = last_loc;
2955 ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
2959 * ice_flow_set_fld - specifies locations of field from entry's input buffer
2960 * @seg: packet segment the field being set belongs to
2961 * @fld: field to be set
2962 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
2963 * entry's input buffer
2964 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
2966 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
2967 * entry's input buffer
2968 * @range: indicate if field being matched is to be in a range
2970 * This function specifies the locations, in the form of byte offsets from the
2971 * start of the input buffer for a flow entry, from where the value to match,
2972 * the mask value, and upper value can be extracted. These locations are then
2973 * stored in the flow profile. When adding a flow entry associated with the
2974 * flow profile, these locations will be used to quickly extract the values and
2975 * create the content of a match entry. This function should only be used for
2976 * fixed-size data structures.
2979 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
2980 u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
2982 enum ice_flow_fld_match_type t = range ?
2983 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
2985 ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
2989 * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
2990 * @seg: packet segment the field being set belongs to
2991 * @fld: field to be set
2992 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
2993 * entry's input buffer
2994 * @pref_loc: location of prefix value from entry's input buffer
2995 * @pref_sz: size of the location holding the prefix value
2997 * This function specifies the locations, in the form of byte offsets from the
2998 * start of the input buffer for a flow entry, from where the value to match
2999 * and the IPv4 prefix value can be extracted. These locations are then stored
3000 * in the flow profile. When adding flow entries to the associated flow profile,
3001 * these locations can be used to quickly extract the values to create the
3002 * content of a match entry. This function should only be used for fixed-size
3006 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3007 u16 val_loc, u16 pref_loc, u8 pref_sz)
3009 /* For this type of field, the "mask" location is for the prefix value's
3010 * location and the "last" location is for the size of the location of
3013 ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3014 pref_loc, (u16)pref_sz);
3018 * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3019 * @seg: packet segment the field being set belongs to
3020 * @off: offset of the raw field from the beginning of the segment in bytes
3021 * @len: length of the raw pattern to be matched
3022 * @val_loc: location of the value to match from entry's input buffer
3023 * @mask_loc: location of mask value from entry's input buffer
3025 * This function specifies the offset of the raw field to be match from the
3026 * beginning of the specified packet segment, and the locations, in the form of
3027 * byte offsets from the start of the input buffer for a flow entry, from where
3028 * the value to match and the mask value to be extracted. These locations are
3029 * then stored in the flow profile. When adding flow entries to the associated
3030 * flow profile, these locations can be used to quickly extract the values to
3031 * create the content of a match entry. This function should only be used for
3032 * fixed-size data structures.
3035 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3036 u16 val_loc, u16 mask_loc)
3038 if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3039 seg->raws[seg->raws_cnt].off = off;
3040 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3041 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3042 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3043 /* The "last" field is used to store the length of the field */
3044 seg->raws[seg->raws_cnt].info.src.last = len;
3047 /* Overflows of "raws" will be handled as an error condition later in
3048 * the flow when this information is processed.
3053 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3054 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
3056 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3057 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3059 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3060 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
3061 ICE_FLOW_SEG_HDR_SCTP)
3063 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3064 (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3065 ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3066 ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3069 * ice_flow_set_rss_seg_info - setup packet segments for RSS
3070 * @segs: pointer to the flow field segment(s)
3071 * @hash_fields: fields to be hashed on for the segment(s)
3072 * @flow_hdr: protocol header fields within a packet segment
3074 * Helper function to extract fields from hash bitmap and use flow
3075 * header value to set flow field segment for further use in flow
3076 * profile entry or removal.
3078 static enum ice_status
3079 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
3082 u64 val = hash_fields;
3085 for (i = 0; val && i < ICE_FLOW_FIELD_IDX_MAX; i++) {
3086 u64 bit = BIT_ULL(i);
3089 ice_flow_set_fld(segs, (enum ice_flow_field)i,
3090 ICE_FLOW_FLD_OFF_INVAL,
3091 ICE_FLOW_FLD_OFF_INVAL,
3092 ICE_FLOW_FLD_OFF_INVAL, false);
3096 ICE_FLOW_SET_HDRS(segs, flow_hdr);
3098 if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3099 ~ICE_FLOW_RSS_HDRS_INNER_MASK)
3100 return ICE_ERR_PARAM;
3102 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3103 if (val && !ice_is_pow2(val))
3106 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3107 if (val && !ice_is_pow2(val))
3114 * ice_rem_vsi_rss_list - remove VSI from RSS list
3115 * @hw: pointer to the hardware structure
3116 * @vsi_handle: software VSI handle
3118 * Remove the VSI from all RSS configurations in the list.
3120 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3122 struct ice_rss_cfg *r, *tmp;
3124 if (LIST_EMPTY(&hw->rss_list_head))
3127 ice_acquire_lock(&hw->rss_locks);
3128 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3129 ice_rss_cfg, l_entry) {
3130 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3131 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3132 LIST_DEL(&r->l_entry);
3136 ice_release_lock(&hw->rss_locks);
3140 * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3141 * @hw: pointer to the hardware structure
3142 * @vsi_handle: software VSI handle
3144 * This function will iterate through all flow profiles and disassociate
3145 * the VSI from that profile. If the flow profile has no VSIs it will
3148 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3150 const enum ice_block blk = ICE_BLK_RSS;
3151 struct ice_flow_prof *p, *t;
3152 enum ice_status status = ICE_SUCCESS;
3154 if (!ice_is_vsi_valid(hw, vsi_handle))
3155 return ICE_ERR_PARAM;
3157 if (LIST_EMPTY(&hw->fl_profs[blk]))
3160 ice_acquire_lock(&hw->fl_profs_locks[blk]);
3161 LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3163 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3164 status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3168 if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3169 status = ice_flow_rem_prof_sync(hw, blk, p);
3175 ice_release_lock(&hw->fl_profs_locks[blk]);
3181 * ice_rem_rss_list - remove RSS configuration from list
3182 * @hw: pointer to the hardware structure
3183 * @vsi_handle: software VSI handle
3184 * @prof: pointer to flow profile
3186 * Assumption: lock has already been acquired for RSS list
3189 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3191 struct ice_rss_cfg *r, *tmp;
3193 /* Search for RSS hash fields associated to the VSI that match the
3194 * hash configurations associated to the flow profile. If found
3195 * remove from the RSS entry list of the VSI context and delete entry.
3197 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3198 ice_rss_cfg, l_entry) {
3199 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3200 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3201 ice_clear_bit(vsi_handle, r->vsis);
3202 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3203 LIST_DEL(&r->l_entry);
3212 * ice_add_rss_list - add RSS configuration to list
3213 * @hw: pointer to the hardware structure
3214 * @vsi_handle: software VSI handle
3215 * @prof: pointer to flow profile
3217 * Assumption: lock has already been acquired for RSS list
3219 static enum ice_status
3220 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3222 struct ice_rss_cfg *r, *rss_cfg;
3224 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3225 ice_rss_cfg, l_entry)
3226 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3227 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3228 ice_set_bit(vsi_handle, r->vsis);
3232 rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3234 return ICE_ERR_NO_MEMORY;
3236 rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
3237 rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
3238 rss_cfg->symm = prof->cfg.symm;
3239 ice_set_bit(vsi_handle, rss_cfg->vsis);
3241 LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3246 #define ICE_FLOW_PROF_HASH_S 0
3247 #define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3248 #define ICE_FLOW_PROF_HDR_S 32
3249 #define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3250 #define ICE_FLOW_PROF_ENCAP_S 63
3251 #define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
3253 #define ICE_RSS_OUTER_HEADERS 1
3254 #define ICE_RSS_INNER_HEADERS 2
3256 /* Flow profile ID format:
3257 * [0:31] - Packet match fields
3258 * [32:62] - Protocol header
3259 * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
3261 #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
3262 (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3263 (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3264 ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
3267 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3269 u32 s = ((src % 4) << 3); /* byte shift */
3270 u32 v = dst | 0x80; /* value to program */
3271 u8 i = src / 4; /* register index */
3274 reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3275 reg = (reg & ~(0xff << s)) | (v << s);
3276 wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3280 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3283 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3286 for (i = 0; i < len; i++) {
3287 ice_rss_config_xor_word(hw, prof_id,
3288 /* Yes, field vector in GLQF_HSYMM and
3289 * GLQF_HINSET is inversed!
3291 fv_last_word - (src + i),
3292 fv_last_word - (dst + i));
3293 ice_rss_config_xor_word(hw, prof_id,
3294 fv_last_word - (dst + i),
3295 fv_last_word - (src + i));
3300 ice_rss_update_symm(struct ice_hw *hw,
3301 struct ice_flow_prof *prof)
3303 struct ice_prof_map *map;
3306 map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3307 prof_id = map->prof_id;
3309 /* clear to default */
3310 for (m = 0; m < 6; m++)
3311 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3312 if (prof->cfg.symm) {
3313 struct ice_flow_seg_info *seg =
3314 &prof->segs[prof->segs_cnt - 1];
3316 struct ice_flow_seg_xtrct *ipv4_src =
3317 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3318 struct ice_flow_seg_xtrct *ipv4_dst =
3319 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3320 struct ice_flow_seg_xtrct *ipv6_src =
3321 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3322 struct ice_flow_seg_xtrct *ipv6_dst =
3323 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3325 struct ice_flow_seg_xtrct *tcp_src =
3326 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3327 struct ice_flow_seg_xtrct *tcp_dst =
3328 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3330 struct ice_flow_seg_xtrct *udp_src =
3331 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3332 struct ice_flow_seg_xtrct *udp_dst =
3333 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3335 struct ice_flow_seg_xtrct *sctp_src =
3336 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3337 struct ice_flow_seg_xtrct *sctp_dst =
3338 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3341 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3342 ice_rss_config_xor(hw, prof_id,
3343 ipv4_src->idx, ipv4_dst->idx, 2);
3346 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3347 ice_rss_config_xor(hw, prof_id,
3348 ipv6_src->idx, ipv6_dst->idx, 8);
3351 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3352 ice_rss_config_xor(hw, prof_id,
3353 tcp_src->idx, tcp_dst->idx, 1);
3356 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3357 ice_rss_config_xor(hw, prof_id,
3358 udp_src->idx, udp_dst->idx, 1);
3361 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3362 ice_rss_config_xor(hw, prof_id,
3363 sctp_src->idx, sctp_dst->idx, 1);
3368 * ice_add_rss_cfg_sync - add an RSS configuration
3369 * @hw: pointer to the hardware structure
3370 * @vsi_handle: software VSI handle
3371 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3372 * @addl_hdrs: protocol header fields
3373 * @segs_cnt: packet segment count
3374 * @symm: symmetric hash enable/disable
3376 * Assumption: lock has already been acquired for RSS list
3378 static enum ice_status
3379 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3380 u32 addl_hdrs, u8 segs_cnt, bool symm)
3382 const enum ice_block blk = ICE_BLK_RSS;
3383 struct ice_flow_prof *prof = NULL;
3384 struct ice_flow_seg_info *segs;
3385 enum ice_status status;
3387 if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
3388 return ICE_ERR_PARAM;
3390 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3393 return ICE_ERR_NO_MEMORY;
3395 /* Construct the packet segment info from the hashed fields */
3396 status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3401 /* Search for a flow profile that has matching headers, hash fields
3402 * and has the input VSI associated to it. If found, no further
3403 * operations required and exit.
3405 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3407 ICE_FLOW_FIND_PROF_CHK_FLDS |
3408 ICE_FLOW_FIND_PROF_CHK_VSI);
3410 if (prof->cfg.symm == symm)
3412 prof->cfg.symm = symm;
3416 /* Check if a flow profile exists with the same protocol headers and
3417 * associated with the input VSI. If so disasscociate the VSI from
3418 * this profile. The VSI will be added to a new profile created with
3419 * the protocol header and new hash field configuration.
3421 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3422 vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3424 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3426 ice_rem_rss_list(hw, vsi_handle, prof);
3430 /* Remove profile if it has no VSIs associated */
3431 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3432 status = ice_flow_rem_prof(hw, blk, prof->id);
3438 /* Search for a profile that has same match fields only. If this
3439 * exists then associate the VSI to this profile.
3441 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3443 ICE_FLOW_FIND_PROF_CHK_FLDS);
3445 if (prof->cfg.symm == symm) {
3446 status = ice_flow_assoc_prof(hw, blk, prof,
3449 status = ice_add_rss_list(hw, vsi_handle,
3452 /* if a profile exist but with different symmetric
3453 * requirement, just return error.
3455 status = ICE_ERR_NOT_SUPPORTED;
3460 /* Create a new flow profile with generated profile and packet
3461 * segment information.
3463 status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3464 ICE_FLOW_GEN_PROFID(hashed_flds,
3465 segs[segs_cnt - 1].hdrs,
3467 segs, segs_cnt, NULL, 0, &prof);
3471 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3472 /* If association to a new flow profile failed then this profile can
3476 ice_flow_rem_prof(hw, blk, prof->id);
3480 status = ice_add_rss_list(hw, vsi_handle, prof);
3482 prof->cfg.symm = symm;
3485 ice_rss_update_symm(hw, prof);
3493 * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3494 * @hw: pointer to the hardware structure
3495 * @vsi_handle: software VSI handle
3496 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3497 * @addl_hdrs: protocol header fields
3498 * @symm: symmetric hash enable/disable
3500 * This function will generate a flow profile based on fields associated with
3501 * the input fields to hash on, the flow type and use the VSI number to add
3502 * a flow entry to the profile.
3505 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3506 u32 addl_hdrs, bool symm)
3508 enum ice_status status;
3510 if (hashed_flds == ICE_HASH_INVALID ||
3511 !ice_is_vsi_valid(hw, vsi_handle))
3512 return ICE_ERR_PARAM;
3514 ice_acquire_lock(&hw->rss_locks);
3515 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3516 ICE_RSS_OUTER_HEADERS, symm);
3518 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3519 addl_hdrs, ICE_RSS_INNER_HEADERS,
3521 ice_release_lock(&hw->rss_locks);
3527 * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3528 * @hw: pointer to the hardware structure
3529 * @vsi_handle: software VSI handle
3530 * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3531 * @addl_hdrs: Protocol header fields within a packet segment
3532 * @segs_cnt: packet segment count
3534 * Assumption: lock has already been acquired for RSS list
3536 static enum ice_status
3537 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3538 u32 addl_hdrs, u8 segs_cnt)
3540 const enum ice_block blk = ICE_BLK_RSS;
3541 struct ice_flow_seg_info *segs;
3542 struct ice_flow_prof *prof;
3543 enum ice_status status;
3545 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3548 return ICE_ERR_NO_MEMORY;
3550 /* Construct the packet segment info from the hashed fields */
3551 status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3556 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3558 ICE_FLOW_FIND_PROF_CHK_FLDS);
3560 status = ICE_ERR_DOES_NOT_EXIST;
3564 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3568 /* Remove RSS configuration from VSI context before deleting
3571 ice_rem_rss_list(hw, vsi_handle, prof);
3573 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
3574 status = ice_flow_rem_prof(hw, blk, prof->id);
3582 * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
3583 * @hw: pointer to the hardware structure
3584 * @vsi_handle: software VSI handle
3585 * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3586 * @addl_hdrs: Protocol header fields within a packet segment
3588 * This function will lookup the flow profile based on the input
3589 * hash field bitmap, iterate through the profile entry list of
3590 * that profile and find entry associated with input VSI to be
3591 * removed. Calls are made to underlying flow apis which will in
3592 * turn build or update buffers for RSS XLT1 section.
3595 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3598 enum ice_status status;
3600 if (hashed_flds == ICE_HASH_INVALID ||
3601 !ice_is_vsi_valid(hw, vsi_handle))
3602 return ICE_ERR_PARAM;
3604 ice_acquire_lock(&hw->rss_locks);
3605 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3606 ICE_RSS_OUTER_HEADERS);
3608 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3609 addl_hdrs, ICE_RSS_INNER_HEADERS);
3610 ice_release_lock(&hw->rss_locks);
3616 * ice_replay_rss_cfg - replay RSS configurations associated with VSI
3617 * @hw: pointer to the hardware structure
3618 * @vsi_handle: software VSI handle
3620 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3622 enum ice_status status = ICE_SUCCESS;
3623 struct ice_rss_cfg *r;
3625 if (!ice_is_vsi_valid(hw, vsi_handle))
3626 return ICE_ERR_PARAM;
3628 ice_acquire_lock(&hw->rss_locks);
3629 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3630 ice_rss_cfg, l_entry) {
3631 if (ice_is_bit_set(r->vsis, vsi_handle)) {
3632 status = ice_add_rss_cfg_sync(hw, vsi_handle,
3635 ICE_RSS_OUTER_HEADERS,
3639 status = ice_add_rss_cfg_sync(hw, vsi_handle,
3642 ICE_RSS_INNER_HEADERS,
3648 ice_release_lock(&hw->rss_locks);
3654 * ice_get_rss_cfg - returns hashed fields for the given header types
3655 * @hw: pointer to the hardware structure
3656 * @vsi_handle: software VSI handle
3657 * @hdrs: protocol header type
3659 * This function will return the match fields of the first instance of flow
3660 * profile having the given header types and containing input VSI
3662 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
3664 struct ice_rss_cfg *r, *rss_cfg = NULL;
3666 /* verify if the protocol header is non zero and VSI is valid */
3667 if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
3668 return ICE_HASH_INVALID;
3670 ice_acquire_lock(&hw->rss_locks);
3671 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3672 ice_rss_cfg, l_entry)
3673 if (ice_is_bit_set(r->vsis, vsi_handle) &&
3674 r->packet_hdr == hdrs) {
3678 ice_release_lock(&hw->rss_locks);
3680 return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID;