1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
5 #include "ice_common.h"
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE 2
10 #define ICE_FLOW_FLD_SZ_VLAN 2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR 4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR 16
13 #define ICE_FLOW_FLD_SZ_IP_DSCP 1
14 #define ICE_FLOW_FLD_SZ_IP_TTL 1
15 #define ICE_FLOW_FLD_SZ_IP_PROT 1
16 #define ICE_FLOW_FLD_SZ_PORT 2
17 #define ICE_FLOW_FLD_SZ_TCP_FLAGS 1
18 #define ICE_FLOW_FLD_SZ_ICMP_TYPE 1
19 #define ICE_FLOW_FLD_SZ_ICMP_CODE 1
20 #define ICE_FLOW_FLD_SZ_ARP_OPER 2
21 #define ICE_FLOW_FLD_SZ_GRE_KEYID 4
22 #define ICE_FLOW_FLD_SZ_GTP_TEID 4
23 #define ICE_FLOW_FLD_SZ_GTP_QFI 2
24 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID 2
25 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
26 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID 4
27 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
28 #define ICE_FLOW_FLD_SZ_AH_SPI 4
29 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI 4
31 /* Describe properties of a protocol header field */
32 struct ice_flow_field_info {
33 enum ice_flow_seg_hdr hdr;
34 s16 off; /* Offset from start of a protocol header, in bits */
35 u16 size; /* Size of fields in bits */
36 u16 mask; /* 16-bit mask for field */
39 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
41 .off = (_offset_bytes) * BITS_PER_BYTE, \
42 .size = (_size_bytes) * BITS_PER_BYTE, \
46 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
48 .off = (_offset_bytes) * BITS_PER_BYTE, \
49 .size = (_size_bytes) * BITS_PER_BYTE, \
53 /* Table containing properties of supported protocol header fields */
55 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
57 /* ICE_FLOW_FIELD_IDX_ETH_DA */
58 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
59 /* ICE_FLOW_FIELD_IDX_ETH_SA */
60 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
61 /* ICE_FLOW_FIELD_IDX_S_VLAN */
62 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
63 /* ICE_FLOW_FIELD_IDX_C_VLAN */
64 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
65 /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
66 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
68 /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
69 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
71 /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
72 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
74 /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
75 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
76 ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
77 /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
78 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
79 ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
80 /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
81 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
82 ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
83 /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
84 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
85 ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
86 /* ICE_FLOW_FIELD_IDX_IPV4_SA */
87 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
88 /* ICE_FLOW_FIELD_IDX_IPV4_DA */
89 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
90 /* ICE_FLOW_FIELD_IDX_IPV6_SA */
91 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
92 /* ICE_FLOW_FIELD_IDX_IPV6_DA */
93 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
95 /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
96 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
97 /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
98 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
99 /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
100 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
101 /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
102 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
103 /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
104 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
105 /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
106 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
107 /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
108 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
110 /* ICE_FLOW_FIELD_IDX_ARP_SIP */
111 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
112 /* ICE_FLOW_FIELD_IDX_ARP_DIP */
113 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
114 /* ICE_FLOW_FIELD_IDX_ARP_SHA */
115 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
116 /* ICE_FLOW_FIELD_IDX_ARP_DHA */
117 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
118 /* ICE_FLOW_FIELD_IDX_ARP_OP */
119 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
121 /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
122 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
123 /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
124 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
126 /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
127 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
129 /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
130 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
131 ICE_FLOW_FLD_SZ_GTP_TEID),
132 /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
133 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
134 ICE_FLOW_FLD_SZ_GTP_TEID),
135 /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
136 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
137 ICE_FLOW_FLD_SZ_GTP_TEID),
138 /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
139 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
140 ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
141 /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
142 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
143 ICE_FLOW_FLD_SZ_GTP_TEID),
144 /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
145 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
146 ICE_FLOW_FLD_SZ_GTP_TEID),
148 /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
149 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
150 ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
152 /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
153 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
154 ICE_FLOW_FLD_SZ_PFCP_SEID),
156 /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
157 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
158 ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
160 /* ICE_FLOW_FIELD_IDX_ESP_SPI */
161 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
162 ICE_FLOW_FLD_SZ_ESP_SPI),
164 /* ICE_FLOW_FIELD_IDX_AH_SPI */
165 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
166 ICE_FLOW_FLD_SZ_AH_SPI),
168 /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
169 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
170 ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
173 /* Bitmaps indicating relevant packet types for a particular protocol header
175 * Packet types for packets with an Outer/First/Single MAC header
177 static const u32 ice_ptypes_mac_ofos[] = {
178 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
179 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
180 0x00000000, 0x03FFF000, 0x7FFFFFE0, 0x00000000,
181 0x00000000, 0x00000000, 0x00000000, 0x00000000,
182 0x00000000, 0x00000000, 0x00000000, 0x00000000,
183 0x00000000, 0x00000000, 0x00000000, 0x00000000,
184 0x00000000, 0x00000000, 0x00000000, 0x00000000,
185 0x00000000, 0x00000000, 0x00000000, 0x00000000,
188 /* Packet types for packets with an Innermost/Last MAC VLAN header */
189 static const u32 ice_ptypes_macvlan_il[] = {
190 0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
191 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
192 0x00000000, 0x00000000, 0x00000000, 0x00000000,
193 0x00000000, 0x00000000, 0x00000000, 0x00000000,
194 0x00000000, 0x00000000, 0x00000000, 0x00000000,
195 0x00000000, 0x00000000, 0x00000000, 0x00000000,
196 0x00000000, 0x00000000, 0x00000000, 0x00000000,
197 0x00000000, 0x00000000, 0x00000000, 0x00000000,
200 /* Packet types for packets with an Outer/First/Single IPv4 header */
201 static const u32 ice_ptypes_ipv4_ofos[] = {
202 0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
203 0x00000000, 0x00000155, 0x00000000, 0x00000000,
204 0x0003000F, 0x000FC000, 0x83E0F800, 0x00000101,
205 0x00000000, 0x00000000, 0x00000000, 0x00000000,
206 0x00000000, 0x00000000, 0x00000000, 0x00000000,
207 0x00000000, 0x00000000, 0x00000000, 0x00000000,
208 0x00000000, 0x00000000, 0x00000000, 0x00000000,
209 0x00000000, 0x00000000, 0x00000000, 0x00000000,
212 /* Packet types for packets with an Innermost/Last IPv4 header */
213 static const u32 ice_ptypes_ipv4_il[] = {
214 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
215 0x0000000E, 0x00000000, 0x00000000, 0x00000000,
216 0x00000000, 0x00000000, 0x001FF800, 0x00000000,
217 0x00000000, 0x00000000, 0x00000000, 0x00000000,
218 0x00000000, 0x00000000, 0x00000000, 0x00000000,
219 0x00000000, 0x00000000, 0x00000000, 0x00000000,
220 0x00000000, 0x00000000, 0x00000000, 0x00000000,
221 0x00000000, 0x00000000, 0x00000000, 0x00000000,
224 /* Packet types for packets with an Outer/First/Single IPv6 header */
225 static const u32 ice_ptypes_ipv6_ofos[] = {
226 0x00000000, 0x00000000, 0x77000000, 0x10002000,
227 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
228 0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000206,
229 0x00000000, 0x00000000, 0x00000000, 0x00000000,
230 0x00000000, 0x00000000, 0x00000000, 0x00000000,
231 0x00000000, 0x00000000, 0x00000000, 0x00000000,
232 0x00000000, 0x00000000, 0x00000000, 0x00000000,
233 0x00000000, 0x00000000, 0x00000000, 0x00000000,
236 /* Packet types for packets with an Innermost/Last IPv6 header */
237 static const u32 ice_ptypes_ipv6_il[] = {
238 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
239 0x00000770, 0x00000000, 0x00000000, 0x00000000,
240 0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
241 0x00000000, 0x00000000, 0x00000000, 0x00000000,
242 0x00000000, 0x00000000, 0x00000000, 0x00000000,
243 0x00000000, 0x00000000, 0x00000000, 0x00000000,
244 0x00000000, 0x00000000, 0x00000000, 0x00000000,
245 0x00000000, 0x00000000, 0x00000000, 0x00000000,
248 /* Packet types for packets with an Outermost/First ARP header */
249 static const u32 ice_ptypes_arp_of[] = {
250 0x00000800, 0x00000000, 0x00000000, 0x00000000,
251 0x00000000, 0x00000000, 0x00000000, 0x00000000,
252 0x00000000, 0x00000000, 0x00000000, 0x00000000,
253 0x00000000, 0x00000000, 0x00000000, 0x00000000,
254 0x00000000, 0x00000000, 0x00000000, 0x00000000,
255 0x00000000, 0x00000000, 0x00000000, 0x00000000,
256 0x00000000, 0x00000000, 0x00000000, 0x00000000,
257 0x00000000, 0x00000000, 0x00000000, 0x00000000,
260 /* UDP Packet types for non-tunneled packets or tunneled
261 * packets with inner UDP.
263 static const u32 ice_ptypes_udp_il[] = {
264 0x81000000, 0x20204040, 0x04000010, 0x80810102,
265 0x00000040, 0x00000000, 0x00000000, 0x00000000,
266 0x00000000, 0x00410000, 0x90842000, 0x00000007,
267 0x00000000, 0x00000000, 0x00000000, 0x00000000,
268 0x00000000, 0x00000000, 0x00000000, 0x00000000,
269 0x00000000, 0x00000000, 0x00000000, 0x00000000,
270 0x00000000, 0x00000000, 0x00000000, 0x00000000,
271 0x00000000, 0x00000000, 0x00000000, 0x00000000,
274 /* Packet types for packets with an Innermost/Last TCP header */
275 static const u32 ice_ptypes_tcp_il[] = {
276 0x04000000, 0x80810102, 0x10000040, 0x02040408,
277 0x00000102, 0x00000000, 0x00000000, 0x00000000,
278 0x00000000, 0x00820000, 0x21084000, 0x00000000,
279 0x00000000, 0x00000000, 0x00000000, 0x00000000,
280 0x00000000, 0x00000000, 0x00000000, 0x00000000,
281 0x00000000, 0x00000000, 0x00000000, 0x00000000,
282 0x00000000, 0x00000000, 0x00000000, 0x00000000,
283 0x00000000, 0x00000000, 0x00000000, 0x00000000,
286 /* Packet types for packets with an Innermost/Last SCTP header */
287 static const u32 ice_ptypes_sctp_il[] = {
288 0x08000000, 0x01020204, 0x20000081, 0x04080810,
289 0x00000204, 0x00000000, 0x00000000, 0x00000000,
290 0x00000000, 0x01040000, 0x00000000, 0x00000000,
291 0x00000000, 0x00000000, 0x00000000, 0x00000000,
292 0x00000000, 0x00000000, 0x00000000, 0x00000000,
293 0x00000000, 0x00000000, 0x00000000, 0x00000000,
294 0x00000000, 0x00000000, 0x00000000, 0x00000000,
295 0x00000000, 0x00000000, 0x00000000, 0x00000000,
298 /* Packet types for packets with an Outermost/First ICMP header */
299 static const u32 ice_ptypes_icmp_of[] = {
300 0x10000000, 0x00000000, 0x00000000, 0x00000000,
301 0x00000000, 0x00000000, 0x00000000, 0x00000000,
302 0x00000000, 0x00000000, 0x00000000, 0x00000000,
303 0x00000000, 0x00000000, 0x00000000, 0x00000000,
304 0x00000000, 0x00000000, 0x00000000, 0x00000000,
305 0x00000000, 0x00000000, 0x00000000, 0x00000000,
306 0x00000000, 0x00000000, 0x00000000, 0x00000000,
307 0x00000000, 0x00000000, 0x00000000, 0x00000000,
310 /* Packet types for packets with an Innermost/Last ICMP header */
311 static const u32 ice_ptypes_icmp_il[] = {
312 0x00000000, 0x02040408, 0x40000102, 0x08101020,
313 0x00000408, 0x00000000, 0x00000000, 0x00000000,
314 0x00000000, 0x00000000, 0x42108000, 0x00000000,
315 0x00000000, 0x00000000, 0x00000000, 0x00000000,
316 0x00000000, 0x00000000, 0x00000000, 0x00000000,
317 0x00000000, 0x00000000, 0x00000000, 0x00000000,
318 0x00000000, 0x00000000, 0x00000000, 0x00000000,
319 0x00000000, 0x00000000, 0x00000000, 0x00000000,
322 /* Packet types for packets with an Outermost/First GRE header */
323 static const u32 ice_ptypes_gre_of[] = {
324 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
325 0x0000017E, 0x00000000, 0x00000000, 0x00000000,
326 0x00000000, 0x00000000, 0x00000000, 0x00000000,
327 0x00000000, 0x00000000, 0x00000000, 0x00000000,
328 0x00000000, 0x00000000, 0x00000000, 0x00000000,
329 0x00000000, 0x00000000, 0x00000000, 0x00000000,
330 0x00000000, 0x00000000, 0x00000000, 0x00000000,
331 0x00000000, 0x00000000, 0x00000000, 0x00000000,
334 /* Packet types for packets with an Innermost/Last MAC header */
335 static const u32 ice_ptypes_mac_il[] = {
336 0x00000000, 0x00000000, 0x00000000, 0x00000000,
337 0x00000000, 0x00000000, 0x00000000, 0x00000000,
338 0x00000000, 0x00000000, 0x00000000, 0x00000000,
339 0x00000000, 0x00000000, 0x00000000, 0x00000000,
340 0x00000000, 0x00000000, 0x00000000, 0x00000000,
341 0x00000000, 0x00000000, 0x00000000, 0x00000000,
342 0x00000000, 0x00000000, 0x00000000, 0x00000000,
343 0x00000000, 0x00000000, 0x00000000, 0x00000000,
346 /* Packet types for GTPC */
347 static const u32 ice_ptypes_gtpc[] = {
348 0x00000000, 0x00000000, 0x00000000, 0x00000000,
349 0x00000000, 0x00000000, 0x00000000, 0x00000000,
350 0x00000000, 0x00000000, 0x00000180, 0x00000000,
351 0x00000000, 0x00000000, 0x00000000, 0x00000000,
352 0x00000000, 0x00000000, 0x00000000, 0x00000000,
353 0x00000000, 0x00000000, 0x00000000, 0x00000000,
354 0x00000000, 0x00000000, 0x00000000, 0x00000000,
355 0x00000000, 0x00000000, 0x00000000, 0x00000000,
358 /* Packet types for GTPC with TEID */
359 static const u32 ice_ptypes_gtpc_tid[] = {
360 0x00000000, 0x00000000, 0x00000000, 0x00000000,
361 0x00000000, 0x00000000, 0x00000000, 0x00000000,
362 0x00000000, 0x00000000, 0x00000060, 0x00000000,
363 0x00000000, 0x00000000, 0x00000000, 0x00000000,
364 0x00000000, 0x00000000, 0x00000000, 0x00000000,
365 0x00000000, 0x00000000, 0x00000000, 0x00000000,
366 0x00000000, 0x00000000, 0x00000000, 0x00000000,
367 0x00000000, 0x00000000, 0x00000000, 0x00000000,
370 /* Packet types for GTPU */
371 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
372 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
373 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
374 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
375 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
376 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
377 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
378 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
379 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
380 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
381 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
382 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
383 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
384 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
385 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
386 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
387 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
388 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
389 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
390 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
391 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
394 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
395 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
396 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
397 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
398 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
399 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
400 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
401 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
402 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
403 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
404 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
405 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
406 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
407 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
408 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
409 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
410 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
411 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
412 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
413 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
414 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
417 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
418 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
419 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
420 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
421 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
422 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
423 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
424 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
425 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
426 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
427 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
428 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
429 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
430 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
431 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
432 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
433 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
434 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
435 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
436 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
437 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
440 static const u32 ice_ptypes_gtpu[] = {
441 0x00000000, 0x00000000, 0x00000000, 0x00000000,
442 0x00000000, 0x00000000, 0x00000000, 0x00000000,
443 0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
444 0x00000000, 0x00000000, 0x00000000, 0x00000000,
445 0x00000000, 0x00000000, 0x00000000, 0x00000000,
446 0x00000000, 0x00000000, 0x00000000, 0x00000000,
447 0x00000000, 0x00000000, 0x00000000, 0x00000000,
448 0x00000000, 0x00000000, 0x00000000, 0x00000000,
451 /* Packet types for pppoe */
452 static const u32 ice_ptypes_pppoe[] = {
453 0x00000000, 0x00000000, 0x00000000, 0x00000000,
454 0x00000000, 0x00000000, 0x00000000, 0x00000000,
455 0x00000000, 0x03FFF000, 0x00000000, 0x00000000,
456 0x00000000, 0x00000000, 0x00000000, 0x00000000,
457 0x00000000, 0x00000000, 0x00000000, 0x00000000,
458 0x00000000, 0x00000000, 0x00000000, 0x00000000,
459 0x00000000, 0x00000000, 0x00000000, 0x00000000,
460 0x00000000, 0x00000000, 0x00000000, 0x00000000,
463 /* Packet types for packets with PFCP NODE header */
464 static const u32 ice_ptypes_pfcp_node[] = {
465 0x00000000, 0x00000000, 0x00000000, 0x00000000,
466 0x00000000, 0x00000000, 0x00000000, 0x00000000,
467 0x00000000, 0x00000000, 0x80000000, 0x00000002,
468 0x00000000, 0x00000000, 0x00000000, 0x00000000,
469 0x00000000, 0x00000000, 0x00000000, 0x00000000,
470 0x00000000, 0x00000000, 0x00000000, 0x00000000,
471 0x00000000, 0x00000000, 0x00000000, 0x00000000,
472 0x00000000, 0x00000000, 0x00000000, 0x00000000,
475 /* Packet types for packets with PFCP SESSION header */
476 static const u32 ice_ptypes_pfcp_session[] = {
477 0x00000000, 0x00000000, 0x00000000, 0x00000000,
478 0x00000000, 0x00000000, 0x00000000, 0x00000000,
479 0x00000000, 0x00000000, 0x00000000, 0x00000005,
480 0x00000000, 0x00000000, 0x00000000, 0x00000000,
481 0x00000000, 0x00000000, 0x00000000, 0x00000000,
482 0x00000000, 0x00000000, 0x00000000, 0x00000000,
483 0x00000000, 0x00000000, 0x00000000, 0x00000000,
484 0x00000000, 0x00000000, 0x00000000, 0x00000000,
487 /* Packet types for l2tpv3 */
488 static const u32 ice_ptypes_l2tpv3[] = {
489 0x00000000, 0x00000000, 0x00000000, 0x00000000,
490 0x00000000, 0x00000000, 0x00000000, 0x00000000,
491 0x00000000, 0x00000000, 0x00000000, 0x00000300,
492 0x00000000, 0x00000000, 0x00000000, 0x00000000,
493 0x00000000, 0x00000000, 0x00000000, 0x00000000,
494 0x00000000, 0x00000000, 0x00000000, 0x00000000,
495 0x00000000, 0x00000000, 0x00000000, 0x00000000,
496 0x00000000, 0x00000000, 0x00000000, 0x00000000,
499 /* Packet types for esp */
500 static const u32 ice_ptypes_esp[] = {
501 0x00000000, 0x00000000, 0x00000000, 0x00000000,
502 0x00000000, 0x00000003, 0x00000000, 0x00000000,
503 0x00000000, 0x00000000, 0x00000000, 0x00000000,
504 0x00000000, 0x00000000, 0x00000000, 0x00000000,
505 0x00000000, 0x00000000, 0x00000000, 0x00000000,
506 0x00000000, 0x00000000, 0x00000000, 0x00000000,
507 0x00000000, 0x00000000, 0x00000000, 0x00000000,
508 0x00000000, 0x00000000, 0x00000000, 0x00000000,
511 /* Packet types for ah */
512 static const u32 ice_ptypes_ah[] = {
513 0x00000000, 0x00000000, 0x00000000, 0x00000000,
514 0x00000000, 0x0000000C, 0x00000000, 0x00000000,
515 0x00000000, 0x00000000, 0x00000000, 0x00000000,
516 0x00000000, 0x00000000, 0x00000000, 0x00000000,
517 0x00000000, 0x00000000, 0x00000000, 0x00000000,
518 0x00000000, 0x00000000, 0x00000000, 0x00000000,
519 0x00000000, 0x00000000, 0x00000000, 0x00000000,
520 0x00000000, 0x00000000, 0x00000000, 0x00000000,
523 /* Packet types for packets with NAT_T ESP header */
524 static const u32 ice_ptypes_nat_t_esp[] = {
525 0x00000000, 0x00000000, 0x00000000, 0x00000000,
526 0x00000000, 0x00000030, 0x00000000, 0x00000000,
527 0x00000000, 0x00000000, 0x00000000, 0x00000000,
528 0x00000000, 0x00000000, 0x00000000, 0x00000000,
529 0x00000000, 0x00000000, 0x00000000, 0x00000000,
530 0x00000000, 0x00000000, 0x00000000, 0x00000000,
531 0x00000000, 0x00000000, 0x00000000, 0x00000000,
532 0x00000000, 0x00000000, 0x00000000, 0x00000000,
535 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
536 0x00000846, 0x00000000, 0x00000000, 0x00000000,
537 0x00000000, 0x00000000, 0x00000000, 0x00000000,
538 0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
539 0x00000000, 0x00000000, 0x00000000, 0x00000000,
540 0x00000000, 0x00000000, 0x00000000, 0x00000000,
541 0x00000000, 0x00000000, 0x00000000, 0x00000000,
542 0x00000000, 0x00000000, 0x00000000, 0x00000000,
543 0x00000000, 0x00000000, 0x00000000, 0x00000000,
546 /* Manage parameters and info. used during the creation of a flow profile */
547 struct ice_flow_prof_params {
549 u16 entry_length; /* # of bytes formatted entry will require */
551 struct ice_flow_prof *prof;
553 /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
554 * This will give us the direction flags.
556 struct ice_fv_word es[ICE_MAX_FV_WORDS];
557 /* attributes can be used to add attributes to a particular PTYPE */
558 const struct ice_ptype_attributes *attr;
561 u16 mask[ICE_MAX_FV_WORDS];
562 ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
565 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
566 (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
567 ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
568 ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
569 ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
570 ICE_FLOW_SEG_HDR_NAT_T_ESP)
572 #define ICE_FLOW_SEG_HDRS_L2_MASK \
573 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
574 #define ICE_FLOW_SEG_HDRS_L3_MASK \
575 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
576 ICE_FLOW_SEG_HDR_ARP)
577 #define ICE_FLOW_SEG_HDRS_L4_MASK \
578 (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
579 ICE_FLOW_SEG_HDR_SCTP)
582 * ice_flow_val_hdrs - validates packet segments for valid protocol headers
583 * @segs: array of one or more packet segments that describe the flow
584 * @segs_cnt: number of packet segments provided
586 static enum ice_status
587 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
591 for (i = 0; i < segs_cnt; i++) {
592 /* Multiple L3 headers */
593 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
594 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
595 return ICE_ERR_PARAM;
597 /* Multiple L4 headers */
598 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
599 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
600 return ICE_ERR_PARAM;
606 /* Sizes of fixed known protocol headers without header options */
607 #define ICE_FLOW_PROT_HDR_SZ_MAC 14
608 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
609 #define ICE_FLOW_PROT_HDR_SZ_IPV4 20
610 #define ICE_FLOW_PROT_HDR_SZ_IPV6 40
611 #define ICE_FLOW_PROT_HDR_SZ_ARP 28
612 #define ICE_FLOW_PROT_HDR_SZ_ICMP 8
613 #define ICE_FLOW_PROT_HDR_SZ_TCP 20
614 #define ICE_FLOW_PROT_HDR_SZ_UDP 8
615 #define ICE_FLOW_PROT_HDR_SZ_SCTP 12
618 * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
619 * @params: information about the flow to be processed
620 * @seg: index of packet segment whose header size is to be determined
622 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
627 sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
628 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
631 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
632 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
633 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
634 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
635 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
636 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
637 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
638 /* A L3 header is required if L4 is specified */
642 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
643 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
644 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
645 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
646 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
647 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
648 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
649 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
655 * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
656 * @params: information about the flow to be processed
658 * This function identifies the packet types associated with the protocol
659 * headers being present in packet segments of the specified flow profile.
661 static enum ice_status
662 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
664 struct ice_flow_prof *prof;
667 ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
672 for (i = 0; i < params->prof->segs_cnt; i++) {
673 const ice_bitmap_t *src;
676 hdrs = prof->segs[i].hdrs;
678 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
679 src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
680 (const ice_bitmap_t *)ice_ptypes_mac_il;
681 ice_and_bitmap(params->ptypes, params->ptypes, src,
685 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
686 src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
687 ice_and_bitmap(params->ptypes, params->ptypes, src,
691 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
692 ice_and_bitmap(params->ptypes, params->ptypes,
693 (const ice_bitmap_t *)ice_ptypes_arp_of,
697 if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
698 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
699 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
700 ice_and_bitmap(params->ptypes, params->ptypes, src,
702 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
703 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
704 ice_and_bitmap(params->ptypes,
707 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
708 ice_and_bitmap(params->ptypes, params->ptypes,
709 (const ice_bitmap_t *)
712 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
713 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
714 ice_and_bitmap(params->ptypes, params->ptypes,
715 src, ICE_FLOW_PTYPE_MAX);
717 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
718 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
719 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
720 ice_and_bitmap(params->ptypes, params->ptypes, src,
722 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
723 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
724 ice_and_bitmap(params->ptypes,
727 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
728 ice_and_bitmap(params->ptypes, params->ptypes,
729 (const ice_bitmap_t *)
732 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
733 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
734 ice_and_bitmap(params->ptypes, params->ptypes,
735 src, ICE_FLOW_PTYPE_MAX);
739 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
740 src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
741 ice_and_bitmap(params->ptypes, params->ptypes,
742 src, ICE_FLOW_PTYPE_MAX);
743 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
744 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
745 ice_and_bitmap(params->ptypes, params->ptypes, src,
749 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
750 src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
751 (const ice_bitmap_t *)ice_ptypes_icmp_il;
752 ice_and_bitmap(params->ptypes, params->ptypes, src,
754 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
756 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
757 ice_and_bitmap(params->ptypes, params->ptypes,
758 src, ICE_FLOW_PTYPE_MAX);
760 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
761 src = (const ice_bitmap_t *)ice_ptypes_gtpc;
762 ice_and_bitmap(params->ptypes, params->ptypes,
763 src, ICE_FLOW_PTYPE_MAX);
764 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
765 src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
766 ice_and_bitmap(params->ptypes, params->ptypes,
767 src, ICE_FLOW_PTYPE_MAX);
768 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
769 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
770 ice_and_bitmap(params->ptypes, params->ptypes,
771 src, ICE_FLOW_PTYPE_MAX);
773 /* Attributes for GTP packet with downlink */
774 params->attr = ice_attr_gtpu_down;
775 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
776 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
777 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
778 ice_and_bitmap(params->ptypes, params->ptypes,
779 src, ICE_FLOW_PTYPE_MAX);
781 /* Attributes for GTP packet with uplink */
782 params->attr = ice_attr_gtpu_up;
783 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
784 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
785 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
786 ice_and_bitmap(params->ptypes, params->ptypes,
787 src, ICE_FLOW_PTYPE_MAX);
789 /* Attributes for GTP packet with Extension Header */
790 params->attr = ice_attr_gtpu_eh;
791 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
792 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
793 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
794 ice_and_bitmap(params->ptypes, params->ptypes,
795 src, ICE_FLOW_PTYPE_MAX);
796 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
797 src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
798 ice_and_bitmap(params->ptypes, params->ptypes,
799 src, ICE_FLOW_PTYPE_MAX);
800 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
801 src = (const ice_bitmap_t *)ice_ptypes_esp;
802 ice_and_bitmap(params->ptypes, params->ptypes,
803 src, ICE_FLOW_PTYPE_MAX);
804 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
805 src = (const ice_bitmap_t *)ice_ptypes_ah;
806 ice_and_bitmap(params->ptypes, params->ptypes,
807 src, ICE_FLOW_PTYPE_MAX);
808 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
809 src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
810 ice_and_bitmap(params->ptypes, params->ptypes,
811 src, ICE_FLOW_PTYPE_MAX);
814 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
815 if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
817 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
820 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
822 ice_and_bitmap(params->ptypes, params->ptypes,
823 src, ICE_FLOW_PTYPE_MAX);
825 src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
826 ice_andnot_bitmap(params->ptypes, params->ptypes,
827 src, ICE_FLOW_PTYPE_MAX);
829 src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
830 ice_andnot_bitmap(params->ptypes, params->ptypes,
831 src, ICE_FLOW_PTYPE_MAX);
839 * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
840 * @hw: pointer to the HW struct
841 * @params: information about the flow to be processed
842 * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
844 * This function will allocate an extraction sequence entries for a DWORD size
845 * chunk of the packet flags.
847 static enum ice_status
848 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
849 struct ice_flow_prof_params *params,
850 enum ice_flex_mdid_pkt_flags flags)
852 u8 fv_words = hw->blk[params->blk].es.fvw;
855 /* Make sure the number of extraction sequence entries required does not
856 * exceed the block's capacity.
858 if (params->es_cnt >= fv_words)
859 return ICE_ERR_MAX_LIMIT;
861 /* some blocks require a reversed field vector layout */
862 if (hw->blk[params->blk].es.reverse)
863 idx = fv_words - params->es_cnt - 1;
865 idx = params->es_cnt;
867 params->es[idx].prot_id = ICE_PROT_META_ID;
868 params->es[idx].off = flags;
875 * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
876 * @hw: pointer to the HW struct
877 * @params: information about the flow to be processed
878 * @seg: packet segment index of the field to be extracted
879 * @fld: ID of field to be extracted
880 * @match: bitfield of all fields
882 * This function determines the protocol ID, offset, and size of the given
883 * field. It then allocates one or more extraction sequence entries for the
884 * given field, and fill the entries with protocol ID and offset information.
886 static enum ice_status
887 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
888 u8 seg, enum ice_flow_field fld, u64 match)
890 enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
891 enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
892 u8 fv_words = hw->blk[params->blk].es.fvw;
893 struct ice_flow_fld_info *flds;
894 u16 cnt, ese_bits, i;
900 flds = params->prof->segs[seg].fields;
903 case ICE_FLOW_FIELD_IDX_ETH_DA:
904 case ICE_FLOW_FIELD_IDX_ETH_SA:
905 case ICE_FLOW_FIELD_IDX_S_VLAN:
906 case ICE_FLOW_FIELD_IDX_C_VLAN:
907 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
909 case ICE_FLOW_FIELD_IDX_ETH_TYPE:
910 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
912 case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
913 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
915 case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
916 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
918 case ICE_FLOW_FIELD_IDX_IPV4_TTL:
919 case ICE_FLOW_FIELD_IDX_IPV4_PROT:
920 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
922 /* TTL and PROT share the same extraction seq. entry.
923 * Each is considered a sibling to the other in terms of sharing
924 * the same extraction sequence entry.
926 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
927 sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
928 else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
929 sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
931 /* If the sibling field is also included, that field's
932 * mask needs to be included.
934 if (match & BIT(sib))
935 sib_mask = ice_flds_info[sib].mask;
937 case ICE_FLOW_FIELD_IDX_IPV6_TTL:
938 case ICE_FLOW_FIELD_IDX_IPV6_PROT:
939 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
941 /* TTL and PROT share the same extraction seq. entry.
942 * Each is considered a sibling to the other in terms of sharing
943 * the same extraction sequence entry.
945 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
946 sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
947 else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
948 sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
950 /* If the sibling field is also included, that field's
951 * mask needs to be included.
953 if (match & BIT(sib))
954 sib_mask = ice_flds_info[sib].mask;
956 case ICE_FLOW_FIELD_IDX_IPV4_SA:
957 case ICE_FLOW_FIELD_IDX_IPV4_DA:
958 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
960 case ICE_FLOW_FIELD_IDX_IPV6_SA:
961 case ICE_FLOW_FIELD_IDX_IPV6_DA:
962 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
964 case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
965 case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
966 case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
967 prot_id = ICE_PROT_TCP_IL;
969 case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
970 case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
971 prot_id = ICE_PROT_UDP_IL_OR_S;
973 case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
974 case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
975 prot_id = ICE_PROT_SCTP_IL;
977 case ICE_FLOW_FIELD_IDX_GTPC_TEID:
978 case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
979 case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
980 case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
981 case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
982 case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
983 /* GTP is accessed through UDP OF protocol */
984 prot_id = ICE_PROT_UDP_OF;
986 case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
987 prot_id = ICE_PROT_PPPOE;
989 case ICE_FLOW_FIELD_IDX_PFCP_SEID:
990 prot_id = ICE_PROT_UDP_IL_OR_S;
992 case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
993 prot_id = ICE_PROT_L2TPV3;
995 case ICE_FLOW_FIELD_IDX_ESP_SPI:
996 prot_id = ICE_PROT_ESP_F;
998 case ICE_FLOW_FIELD_IDX_AH_SPI:
999 prot_id = ICE_PROT_ESP_2;
1001 case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1002 prot_id = ICE_PROT_UDP_IL_OR_S;
1004 case ICE_FLOW_FIELD_IDX_ARP_SIP:
1005 case ICE_FLOW_FIELD_IDX_ARP_DIP:
1006 case ICE_FLOW_FIELD_IDX_ARP_SHA:
1007 case ICE_FLOW_FIELD_IDX_ARP_DHA:
1008 case ICE_FLOW_FIELD_IDX_ARP_OP:
1009 prot_id = ICE_PROT_ARP_OF;
1011 case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1012 case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1013 /* ICMP type and code share the same extraction seq. entry */
1014 prot_id = (params->prof->segs[seg].hdrs &
1015 ICE_FLOW_SEG_HDR_IPV4) ?
1016 ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1017 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1018 ICE_FLOW_FIELD_IDX_ICMP_CODE :
1019 ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1021 case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1022 prot_id = ICE_PROT_GRE_OF;
1025 return ICE_ERR_NOT_IMPL;
1028 /* Each extraction sequence entry is a word in size, and extracts a
1029 * word-aligned offset from a protocol header.
1031 ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1033 flds[fld].xtrct.prot_id = prot_id;
1034 flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1035 ICE_FLOW_FV_EXTRACT_SZ;
1036 flds[fld].xtrct.disp = (u8)((ice_flds_info[fld].off + adj) % ese_bits);
1037 flds[fld].xtrct.idx = params->es_cnt;
1038 flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1040 /* Adjust the next field-entry index after accommodating the number of
1041 * entries this field consumes
1043 cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1044 ice_flds_info[fld].size, ese_bits);
1046 /* Fill in the extraction sequence entries needed for this field */
1047 off = flds[fld].xtrct.off;
1048 mask = flds[fld].xtrct.mask;
1049 for (i = 0; i < cnt; i++) {
1050 /* Only consume an extraction sequence entry if there is no
1051 * sibling field associated with this field or the sibling entry
1052 * already extracts the word shared with this field.
1054 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1055 flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1056 flds[sib].xtrct.off != off) {
1059 /* Make sure the number of extraction sequence required
1060 * does not exceed the block's capability
1062 if (params->es_cnt >= fv_words)
1063 return ICE_ERR_MAX_LIMIT;
1065 /* some blocks require a reversed field vector layout */
1066 if (hw->blk[params->blk].es.reverse)
1067 idx = fv_words - params->es_cnt - 1;
1069 idx = params->es_cnt;
1071 params->es[idx].prot_id = prot_id;
1072 params->es[idx].off = off;
1073 params->mask[idx] = mask | sib_mask;
1077 off += ICE_FLOW_FV_EXTRACT_SZ;
1084 * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1085 * @hw: pointer to the HW struct
1086 * @params: information about the flow to be processed
1087 * @seg: index of packet segment whose raw fields are to be be extracted
1089 static enum ice_status
1090 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1097 if (!params->prof->segs[seg].raws_cnt)
1100 if (params->prof->segs[seg].raws_cnt >
1101 ARRAY_SIZE(params->prof->segs[seg].raws))
1102 return ICE_ERR_MAX_LIMIT;
1104 /* Offsets within the segment headers are not supported */
1105 hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1107 return ICE_ERR_PARAM;
1109 fv_words = hw->blk[params->blk].es.fvw;
1111 for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1112 struct ice_flow_seg_fld_raw *raw;
1115 raw = ¶ms->prof->segs[seg].raws[i];
1117 /* Storing extraction information */
1118 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1119 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1120 ICE_FLOW_FV_EXTRACT_SZ;
1121 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1123 raw->info.xtrct.idx = params->es_cnt;
1125 /* Determine the number of field vector entries this raw field
1128 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1129 (raw->info.src.last * BITS_PER_BYTE),
1130 (ICE_FLOW_FV_EXTRACT_SZ *
1132 off = raw->info.xtrct.off;
1133 for (j = 0; j < cnt; j++) {
1136 /* Make sure the number of extraction sequence required
1137 * does not exceed the block's capability
1139 if (params->es_cnt >= hw->blk[params->blk].es.count ||
1140 params->es_cnt >= ICE_MAX_FV_WORDS)
1141 return ICE_ERR_MAX_LIMIT;
1143 /* some blocks require a reversed field vector layout */
1144 if (hw->blk[params->blk].es.reverse)
1145 idx = fv_words - params->es_cnt - 1;
1147 idx = params->es_cnt;
1149 params->es[idx].prot_id = raw->info.xtrct.prot_id;
1150 params->es[idx].off = off;
1152 off += ICE_FLOW_FV_EXTRACT_SZ;
1160 * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1161 * @hw: pointer to the HW struct
1162 * @params: information about the flow to be processed
1164 * This function iterates through all matched fields in the given segments, and
1165 * creates an extraction sequence for the fields.
1167 static enum ice_status
1168 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1169 struct ice_flow_prof_params *params)
1171 enum ice_status status = ICE_SUCCESS;
1174 /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1177 if (params->blk == ICE_BLK_ACL) {
1178 status = ice_flow_xtract_pkt_flags(hw, params,
1179 ICE_RX_MDID_PKT_FLAGS_15_0);
1184 for (i = 0; i < params->prof->segs_cnt; i++) {
1185 u64 match = params->prof->segs[i].match;
1186 enum ice_flow_field j;
1188 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1189 const u64 bit = BIT_ULL(j);
1192 status = ice_flow_xtract_fld(hw, params, i, j,
1200 /* Process raw matching bytes */
1201 status = ice_flow_xtract_raws(hw, params, i);
1210 * ice_flow_sel_acl_scen - returns the specific scenario
1211 * @hw: pointer to the hardware structure
1212 * @params: information about the flow to be processed
1214 * This function will return the specific scenario based on the
1215 * params passed to it
1217 static enum ice_status
1218 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1220 /* Find the best-fit scenario for the provided match width */
1221 struct ice_acl_scen *cand_scen = NULL, *scen;
1224 return ICE_ERR_DOES_NOT_EXIST;
1226 /* Loop through each scenario and match against the scenario width
1227 * to select the specific scenario
1229 LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1230 if (scen->eff_width >= params->entry_length &&
1231 (!cand_scen || cand_scen->eff_width > scen->eff_width))
1234 return ICE_ERR_DOES_NOT_EXIST;
1236 params->prof->cfg.scen = cand_scen;
1242 * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1243 * @params: information about the flow to be processed
1245 static enum ice_status
1246 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1248 u16 index, i, range_idx = 0;
1250 index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1252 for (i = 0; i < params->prof->segs_cnt; i++) {
1253 struct ice_flow_seg_info *seg = ¶ms->prof->segs[i];
1254 u64 match = seg->match;
1257 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1258 struct ice_flow_fld_info *fld;
1259 const u64 bit = BIT_ULL(j);
1264 fld = &seg->fields[j];
1265 fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1267 if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1268 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1270 /* Range checking only supported for single
1273 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1275 BITS_PER_BYTE * 2) > 1)
1276 return ICE_ERR_PARAM;
1278 /* Ranges must define low and high values */
1279 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1280 fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1281 return ICE_ERR_PARAM;
1283 fld->entry.val = range_idx++;
1285 /* Store adjusted byte-length of field for later
1286 * use, taking into account potential
1287 * non-byte-aligned displacement
1289 fld->entry.last = DIVIDE_AND_ROUND_UP
1290 (ice_flds_info[j].size +
1291 (fld->xtrct.disp % BITS_PER_BYTE),
1293 fld->entry.val = index;
1294 index += fld->entry.last;
1300 for (j = 0; j < seg->raws_cnt; j++) {
1301 struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1303 raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1304 raw->info.entry.val = index;
1305 raw->info.entry.last = raw->info.src.last;
1306 index += raw->info.entry.last;
1310 /* Currently only support using the byte selection base, which only
1311 * allows for an effective entry size of 30 bytes. Reject anything
1314 if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1315 return ICE_ERR_PARAM;
1317 /* Only 8 range checkers per profile, reject anything trying to use
1320 if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1321 return ICE_ERR_PARAM;
1323 /* Store # bytes required for entry for later use */
1324 params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1330 * ice_flow_proc_segs - process all packet segments associated with a profile
1331 * @hw: pointer to the HW struct
1332 * @params: information about the flow to be processed
1334 static enum ice_status
1335 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1337 enum ice_status status;
1339 status = ice_flow_proc_seg_hdrs(params);
1343 status = ice_flow_create_xtrct_seq(hw, params);
1347 switch (params->blk) {
1350 status = ICE_SUCCESS;
1353 status = ice_flow_acl_def_entry_frmt(params);
1356 status = ice_flow_sel_acl_scen(hw, params);
1362 return ICE_ERR_NOT_IMPL;
1368 #define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001
1369 #define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002
1370 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004
1373 * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1374 * @hw: pointer to the HW struct
1375 * @blk: classification stage
1376 * @dir: flow direction
1377 * @segs: array of one or more packet segments that describe the flow
1378 * @segs_cnt: number of packet segments provided
1379 * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1380 * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1382 static struct ice_flow_prof *
1383 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1384 enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1385 u8 segs_cnt, u16 vsi_handle, u32 conds)
1387 struct ice_flow_prof *p, *prof = NULL;
1389 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1390 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
1391 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1392 segs_cnt && segs_cnt == p->segs_cnt) {
1395 /* Check for profile-VSI association if specified */
1396 if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1397 ice_is_vsi_valid(hw, vsi_handle) &&
1398 !ice_is_bit_set(p->vsis, vsi_handle))
1401 /* Protocol headers must be checked. Matched fields are
1402 * checked if specified.
1404 for (i = 0; i < segs_cnt; i++)
1405 if (segs[i].hdrs != p->segs[i].hdrs ||
1406 ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1407 segs[i].match != p->segs[i].match))
1410 /* A match is found if all segments are matched */
1411 if (i == segs_cnt) {
1417 ice_release_lock(&hw->fl_profs_locks[blk]);
1423 * ice_flow_find_prof - Look up a profile matching headers and matched fields
1424 * @hw: pointer to the HW struct
1425 * @blk: classification stage
1426 * @dir: flow direction
1427 * @segs: array of one or more packet segments that describe the flow
1428 * @segs_cnt: number of packet segments provided
1431 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1432 struct ice_flow_seg_info *segs, u8 segs_cnt)
1434 struct ice_flow_prof *p;
1436 p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1437 ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1439 return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1443 * ice_flow_find_prof_id - Look up a profile with given profile ID
1444 * @hw: pointer to the HW struct
1445 * @blk: classification stage
1446 * @prof_id: unique ID to identify this flow profile
1448 static struct ice_flow_prof *
1449 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1451 struct ice_flow_prof *p;
1453 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
1454 if (p->id == prof_id)
1462 * ice_dealloc_flow_entry - Deallocate flow entry memory
1463 * @hw: pointer to the HW struct
1464 * @entry: flow entry to be removed
1467 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1473 ice_free(hw, entry->entry);
1475 if (entry->range_buf) {
1476 ice_free(hw, entry->range_buf);
1477 entry->range_buf = NULL;
1481 ice_free(hw, entry->acts);
1483 entry->acts_cnt = 0;
1486 ice_free(hw, entry);
1489 #define ICE_ACL_INVALID_SCEN 0x3f
1492 * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any pf
1493 * @hw: pointer to the hardware structure
1494 * @prof: pointer to flow profile
1495 * @buf: destination buffer function writes partial xtrct sequence to
1497 * returns ICE_SUCCESS if no pf is associated to the given profile
1498 * returns ICE_ERR_IN_USE if at least one pf is associated to the given profile
1499 * returns other error code for real error
1501 static enum ice_status
1502 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1503 struct ice_aqc_acl_prof_generic_frmt *buf)
1505 enum ice_status status;
1508 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1512 status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1516 /* If all pf's associated scenarios are all 0 or all
1517 * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1518 * not been configured yet.
1520 if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1521 buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1522 buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1523 buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1526 if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1527 buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1528 buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1529 buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1530 buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1531 buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1532 buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1533 buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1536 return ICE_ERR_IN_USE;
1540 * ice_flow_acl_free_act_cntr - Free the acl rule's actions
1541 * @hw: pointer to the hardware structure
1542 * @acts: array of actions to be performed on a match
1543 * @acts_cnt: number of actions
1545 static enum ice_status
1546 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1551 for (i = 0; i < acts_cnt; i++) {
1552 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1553 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1554 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1555 struct ice_acl_cntrs cntrs;
1556 enum ice_status status;
1558 cntrs.bank = 0; /* Only bank0 for the moment */
1560 LE16_TO_CPU(acts[i].data.acl_act.value);
1562 LE16_TO_CPU(acts[i].data.acl_act.value);
1564 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1565 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1567 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1569 status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1578 * ice_flow_acl_disassoc_scen - Disassociate the scenario to the Profile
1579 * @hw: pointer to the hardware structure
1580 * @prof: pointer to flow profile
1582 * Disassociate the scenario to the Profile for the PF of the VSI.
1584 static enum ice_status
1585 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1587 struct ice_aqc_acl_prof_generic_frmt buf;
1588 enum ice_status status = ICE_SUCCESS;
1591 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1593 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1597 status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1601 /* Clear scenario for this pf */
1602 buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1603 status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1609 * ice_flow_rem_entry_sync - Remove a flow entry
1610 * @hw: pointer to the HW struct
1611 * @blk: classification stage
1612 * @entry: flow entry to be removed
1614 static enum ice_status
1615 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1616 struct ice_flow_entry *entry)
1619 return ICE_ERR_BAD_PTR;
1621 if (blk == ICE_BLK_ACL) {
1622 enum ice_status status;
1625 return ICE_ERR_BAD_PTR;
1627 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1628 entry->scen_entry_idx);
1632 /* Checks if we need to release an ACL counter. */
1633 if (entry->acts_cnt && entry->acts)
1634 ice_flow_acl_free_act_cntr(hw, entry->acts,
1638 LIST_DEL(&entry->l_entry);
1640 ice_dealloc_flow_entry(hw, entry);
1646 * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1647 * @hw: pointer to the HW struct
1648 * @blk: classification stage
1649 * @dir: flow direction
1650 * @prof_id: unique ID to identify this flow profile
1651 * @segs: array of one or more packet segments that describe the flow
1652 * @segs_cnt: number of packet segments provided
1653 * @acts: array of default actions
1654 * @acts_cnt: number of default actions
1655 * @prof: stores the returned flow profile added
1657 * Assumption: the caller has acquired the lock to the profile list
1659 static enum ice_status
1660 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1661 enum ice_flow_dir dir, u64 prof_id,
1662 struct ice_flow_seg_info *segs, u8 segs_cnt,
1663 struct ice_flow_action *acts, u8 acts_cnt,
1664 struct ice_flow_prof **prof)
1666 struct ice_flow_prof_params params;
1667 enum ice_status status;
1670 if (!prof || (acts_cnt && !acts))
1671 return ICE_ERR_BAD_PTR;
1673 ice_memset(¶ms, 0, sizeof(params), ICE_NONDMA_MEM);
1674 params.prof = (struct ice_flow_prof *)
1675 ice_malloc(hw, sizeof(*params.prof));
1677 return ICE_ERR_NO_MEMORY;
1679 /* initialize extraction sequence to all invalid (0xff) */
1680 for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1681 params.es[i].prot_id = ICE_PROT_INVALID;
1682 params.es[i].off = ICE_FV_OFFSET_INVAL;
1686 params.prof->id = prof_id;
1687 params.prof->dir = dir;
1688 params.prof->segs_cnt = segs_cnt;
1690 /* Make a copy of the segments that need to be persistent in the flow
1693 for (i = 0; i < segs_cnt; i++)
1694 ice_memcpy(¶ms.prof->segs[i], &segs[i], sizeof(*segs),
1695 ICE_NONDMA_TO_NONDMA);
1697 /* Make a copy of the actions that need to be persistent in the flow
1701 params.prof->acts = (struct ice_flow_action *)
1702 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
1703 ICE_NONDMA_TO_NONDMA);
1705 if (!params.prof->acts) {
1706 status = ICE_ERR_NO_MEMORY;
1711 status = ice_flow_proc_segs(hw, ¶ms);
1713 ice_debug(hw, ICE_DBG_FLOW,
1714 "Error processing a flow's packet segments\n");
1718 /* Add a HW profile for this flow profile */
1719 status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes,
1720 params.attr, params.attr_cnt, params.es,
1723 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
1727 INIT_LIST_HEAD(¶ms.prof->entries);
1728 ice_init_lock(¶ms.prof->entries_lock);
1729 *prof = params.prof;
1733 if (params.prof->acts)
1734 ice_free(hw, params.prof->acts);
1735 ice_free(hw, params.prof);
1742 * ice_flow_rem_prof_sync - remove a flow profile
1743 * @hw: pointer to the hardware structure
1744 * @blk: classification stage
1745 * @prof: pointer to flow profile to remove
1747 * Assumption: the caller has acquired the lock to the profile list
1749 static enum ice_status
1750 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
1751 struct ice_flow_prof *prof)
1753 enum ice_status status;
1755 /* Remove all remaining flow entries before removing the flow profile */
1756 if (!LIST_EMPTY(&prof->entries)) {
1757 struct ice_flow_entry *e, *t;
1759 ice_acquire_lock(&prof->entries_lock);
1761 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
1763 status = ice_flow_rem_entry_sync(hw, blk, e);
1768 ice_release_lock(&prof->entries_lock);
1771 if (blk == ICE_BLK_ACL) {
1772 struct ice_aqc_acl_profile_ranges query_rng_buf;
1773 struct ice_aqc_acl_prof_generic_frmt buf;
1776 /* Deassociate the scenario to the Profile for the PF */
1777 status = ice_flow_acl_disassoc_scen(hw, prof);
1781 /* Clear the range-checker if the profile ID is no longer
1784 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1785 if (status && status != ICE_ERR_IN_USE) {
1787 } else if (!status) {
1788 /* Clear the range-checker value for profile ID */
1789 ice_memset(&query_rng_buf, 0,
1790 sizeof(struct ice_aqc_acl_profile_ranges),
1793 status = ice_flow_get_hw_prof(hw, blk, prof->id,
1798 status = ice_prog_acl_prof_ranges(hw, prof_id,
1799 &query_rng_buf, NULL);
1805 /* Remove all hardware profiles associated with this flow profile */
1806 status = ice_rem_prof(hw, blk, prof->id);
1808 LIST_DEL(&prof->l_entry);
1809 ice_destroy_lock(&prof->entries_lock);
1811 ice_free(hw, prof->acts);
1819 * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
1820 * @buf: Destination buffer function writes partial xtrct sequence to
1821 * @info: Info about field
1824 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
1825 struct ice_flow_fld_info *info)
1830 src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
1831 info->xtrct.disp / BITS_PER_BYTE;
1832 dst = info->entry.val;
1833 for (i = 0; i < info->entry.last; i++)
1834 /* HW stores field vector words in LE, convert words back to BE
1835 * so constructed entries will end up in network order
1837 buf->byte_selection[dst++] = src++ ^ 1;
1841 * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
1842 * @hw: pointer to the hardware structure
1843 * @prof: pointer to flow profile
1845 static enum ice_status
1846 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
1848 struct ice_aqc_acl_prof_generic_frmt buf;
1849 struct ice_flow_fld_info *info;
1850 enum ice_status status;
1854 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1856 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1860 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1861 if (status && status != ICE_ERR_IN_USE)
1865 /* Program the profile dependent configuration. This is done
1866 * only once regardless of the number of PFs using that profile
1868 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1870 for (i = 0; i < prof->segs_cnt; i++) {
1871 struct ice_flow_seg_info *seg = &prof->segs[i];
1872 u64 match = seg->match;
1875 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1876 const u64 bit = BIT_ULL(j);
1881 info = &seg->fields[j];
1883 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
1884 buf.word_selection[info->entry.val] =
1887 ice_flow_acl_set_xtrct_seq_fld(&buf,
1893 for (j = 0; j < seg->raws_cnt; j++) {
1894 info = &seg->raws[j].info;
1895 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
1899 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
1900 ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
1904 /* Update the current PF */
1905 buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
1906 status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1912 * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
1913 * @hw: pointer to the hardware structure
1914 * @blk: classification stage
1915 * @vsi_handle: software VSI handle
1916 * @vsig: target VSI group
1918 * Assumption: the caller has already verified that the VSI to
1919 * be added has the same characteristics as the VSIG and will
1920 * thereby have access to all resources added to that VSIG.
1923 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
1926 enum ice_status status;
1928 if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
1929 return ICE_ERR_PARAM;
1931 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1932 status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
1934 ice_release_lock(&hw->fl_profs_locks[blk]);
1940 * ice_flow_assoc_prof - associate a VSI with a flow profile
1941 * @hw: pointer to the hardware structure
1942 * @blk: classification stage
1943 * @prof: pointer to flow profile
1944 * @vsi_handle: software VSI handle
1946 * Assumption: the caller has acquired the lock to the profile list
1947 * and the software VSI handle has been validated
1949 static enum ice_status
1950 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
1951 struct ice_flow_prof *prof, u16 vsi_handle)
1953 enum ice_status status = ICE_SUCCESS;
1955 if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
1956 if (blk == ICE_BLK_ACL) {
1957 status = ice_flow_acl_set_xtrct_seq(hw, prof);
1961 status = ice_add_prof_id_flow(hw, blk,
1962 ice_get_hw_vsi_num(hw,
1966 ice_set_bit(vsi_handle, prof->vsis);
1968 ice_debug(hw, ICE_DBG_FLOW,
1969 "HW profile add failed, %d\n",
1977 * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
1978 * @hw: pointer to the hardware structure
1979 * @blk: classification stage
1980 * @prof: pointer to flow profile
1981 * @vsi_handle: software VSI handle
1983 * Assumption: the caller has acquired the lock to the profile list
1984 * and the software VSI handle has been validated
1986 static enum ice_status
1987 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
1988 struct ice_flow_prof *prof, u16 vsi_handle)
1990 enum ice_status status = ICE_SUCCESS;
1992 if (ice_is_bit_set(prof->vsis, vsi_handle)) {
1993 status = ice_rem_prof_id_flow(hw, blk,
1994 ice_get_hw_vsi_num(hw,
1998 ice_clear_bit(vsi_handle, prof->vsis);
2000 ice_debug(hw, ICE_DBG_FLOW,
2001 "HW profile remove failed, %d\n",
2009 * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2010 * @hw: pointer to the HW struct
2011 * @blk: classification stage
2012 * @dir: flow direction
2013 * @prof_id: unique ID to identify this flow profile
2014 * @segs: array of one or more packet segments that describe the flow
2015 * @segs_cnt: number of packet segments provided
2016 * @acts: array of default actions
2017 * @acts_cnt: number of default actions
2018 * @prof: stores the returned flow profile added
2021 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2022 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2023 struct ice_flow_action *acts, u8 acts_cnt,
2024 struct ice_flow_prof **prof)
2026 enum ice_status status;
2028 if (segs_cnt > ICE_FLOW_SEG_MAX)
2029 return ICE_ERR_MAX_LIMIT;
2032 return ICE_ERR_PARAM;
2035 return ICE_ERR_BAD_PTR;
2037 status = ice_flow_val_hdrs(segs, segs_cnt);
2041 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2043 status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2044 acts, acts_cnt, prof);
2046 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2048 ice_release_lock(&hw->fl_profs_locks[blk]);
2054 * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2055 * @hw: pointer to the HW struct
2056 * @blk: the block for which the flow profile is to be removed
2057 * @prof_id: unique ID of the flow profile to be removed
2060 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2062 struct ice_flow_prof *prof;
2063 enum ice_status status;
2065 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2067 prof = ice_flow_find_prof_id(hw, blk, prof_id);
2069 status = ICE_ERR_DOES_NOT_EXIST;
2073 /* prof becomes invalid after the call */
2074 status = ice_flow_rem_prof_sync(hw, blk, prof);
2077 ice_release_lock(&hw->fl_profs_locks[blk]);
2083 * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
2084 * @hw: pointer to the HW struct
2085 * @blk: classification stage
2086 * @prof_id: the profile ID handle
2087 * @hw_prof_id: pointer to variable to receive the HW profile ID
2090 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2093 struct ice_prof_map *map;
2095 map = ice_search_prof_id(hw, blk, prof_id);
2097 *hw_prof_id = map->prof_id;
2101 return ICE_ERR_DOES_NOT_EXIST;
2105 * ice_flow_find_entry - look for a flow entry using its unique ID
2106 * @hw: pointer to the HW struct
2107 * @blk: classification stage
2108 * @entry_id: unique ID to identify this flow entry
2110 * This function looks for the flow entry with the specified unique ID in all
2111 * flow profiles of the specified classification stage. If the entry is found,
2112 * and it returns the handle to the flow entry. Otherwise, it returns
2113 * ICE_FLOW_ENTRY_ID_INVAL.
2115 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2117 struct ice_flow_entry *found = NULL;
2118 struct ice_flow_prof *p;
2120 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2122 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2123 struct ice_flow_entry *e;
2125 ice_acquire_lock(&p->entries_lock);
2126 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2127 if (e->id == entry_id) {
2131 ice_release_lock(&p->entries_lock);
2137 ice_release_lock(&hw->fl_profs_locks[blk]);
2139 return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2143 * ice_flow_acl_check_actions - Checks the acl rule's actions
2144 * @hw: pointer to the hardware structure
2145 * @acts: array of actions to be performed on a match
2146 * @acts_cnt: number of actions
2147 * @cnt_alloc: indicates if a ACL counter has been allocated.
2149 static enum ice_status
2150 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2151 u8 acts_cnt, bool *cnt_alloc)
2153 ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2156 ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2159 if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2160 return ICE_ERR_OUT_OF_RANGE;
2162 for (i = 0; i < acts_cnt; i++) {
2163 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2164 acts[i].type != ICE_FLOW_ACT_DROP &&
2165 acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2166 acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2169 /* If the caller want to add two actions of the same type, then
2170 * it is considered invalid configuration.
2172 if (ice_test_and_set_bit(acts[i].type, dup_check))
2173 return ICE_ERR_PARAM;
2176 /* Checks if ACL counters are needed. */
2177 for (i = 0; i < acts_cnt; i++) {
2178 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2179 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2180 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2181 struct ice_acl_cntrs cntrs;
2182 enum ice_status status;
2185 cntrs.bank = 0; /* Only bank0 for the moment */
2187 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2188 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2190 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2192 status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2195 /* Counter index within the bank */
2196 acts[i].data.acl_act.value =
2197 CPU_TO_LE16(cntrs.first_cntr);
2206 * ice_flow_acl_frmt_entry_range - Format an acl range checker for a given field
2207 * @fld: number of the given field
2208 * @info: info about field
2209 * @range_buf: range checker configuration buffer
2210 * @data: pointer to a data buffer containing flow entry's match values/masks
2211 * @range: Input/output param indicating which range checkers are being used
2214 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2215 struct ice_aqc_acl_profile_ranges *range_buf,
2216 u8 *data, u8 *range)
2220 /* If not specified, default mask is all bits in field */
2221 new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2222 BIT(ice_flds_info[fld].size) - 1 :
2223 (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2225 /* If the mask is 0, then we don't need to worry about this input
2226 * range checker value.
2230 (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2232 (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2233 u8 range_idx = info->entry.val;
2235 range_buf->checker_cfg[range_idx].low_boundary =
2236 CPU_TO_BE16(new_low);
2237 range_buf->checker_cfg[range_idx].high_boundary =
2238 CPU_TO_BE16(new_high);
2239 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2241 /* Indicate which range checker is being used */
2242 *range |= BIT(range_idx);
2247 * ice_flow_acl_frmt_entry_fld - Partially format acl entry for a given field
2248 * @fld: number of the given field
2249 * @info: info about the field
2250 * @buf: buffer containing the entry
2251 * @dontcare: buffer containing don't care mask for entry
2252 * @data: pointer to a data buffer containing flow entry's match values/masks
2255 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2256 u8 *dontcare, u8 *data)
2258 u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2259 bool use_mask = false;
2262 src = info->src.val;
2263 mask = info->src.mask;
2264 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2265 disp = info->xtrct.disp % BITS_PER_BYTE;
2267 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2270 for (k = 0; k < info->entry.last; k++, dst++) {
2271 /* Add overflow bits from previous byte */
2272 buf[dst] = (tmp_s & 0xff00) >> 8;
2274 /* If mask is not valid, tmp_m is always zero, so just setting
2275 * dontcare to 0 (no masked bits). If mask is valid, pulls in
2276 * overflow bits of mask from prev byte
2278 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2280 /* If there is displacement, last byte will only contain
2281 * displaced data, but there is no more data to read from user
2282 * buffer, so skip so as not to potentially read beyond end of
2285 if (!disp || k < info->entry.last - 1) {
2286 /* Store shifted data to use in next byte */
2287 tmp_s = data[src++] << disp;
2289 /* Add current (shifted) byte */
2290 buf[dst] |= tmp_s & 0xff;
2292 /* Handle mask if valid */
2294 tmp_m = (~data[mask++] & 0xff) << disp;
2295 dontcare[dst] |= tmp_m & 0xff;
2300 /* Fill in don't care bits at beginning of field */
2302 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2303 for (k = 0; k < disp; k++)
2304 dontcare[dst] |= BIT(k);
2307 end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2309 /* Fill in don't care bits at end of field */
2311 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2312 info->entry.last - 1;
2313 for (k = end_disp; k < BITS_PER_BYTE; k++)
2314 dontcare[dst] |= BIT(k);
2319 * ice_flow_acl_frmt_entry - Format acl entry
2320 * @hw: pointer to the hardware structure
2321 * @prof: pointer to flow profile
2322 * @e: pointer to the flow entry
2323 * @data: pointer to a data buffer containing flow entry's match values/masks
2324 * @acts: array of actions to be performed on a match
2325 * @acts_cnt: number of actions
2327 * Formats the key (and key_inverse) to be matched from the data passed in,
2328 * along with data from the flow profile. This key/key_inverse pair makes up
2329 * the 'entry' for an acl flow entry.
2331 static enum ice_status
2332 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2333 struct ice_flow_entry *e, u8 *data,
2334 struct ice_flow_action *acts, u8 acts_cnt)
2336 u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2337 struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2338 enum ice_status status;
2343 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2347 /* Format the result action */
2349 status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2353 status = ICE_ERR_NO_MEMORY;
2355 e->acts = (struct ice_flow_action *)
2356 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2357 ICE_NONDMA_TO_NONDMA);
2362 e->acts_cnt = acts_cnt;
2364 /* Format the matching data */
2365 buf_sz = prof->cfg.scen->width;
2366 buf = (u8 *)ice_malloc(hw, buf_sz);
2370 dontcare = (u8 *)ice_malloc(hw, buf_sz);
2374 /* 'key' buffer will store both key and key_inverse, so must be twice
2377 key = (u8 *)ice_malloc(hw, buf_sz * 2);
2381 range_buf = (struct ice_aqc_acl_profile_ranges *)
2382 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2386 /* Set don't care mask to all 1's to start, will zero out used bytes */
2387 ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2389 for (i = 0; i < prof->segs_cnt; i++) {
2390 struct ice_flow_seg_info *seg = &prof->segs[i];
2391 u64 match = seg->match;
2394 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
2395 struct ice_flow_fld_info *info;
2396 const u64 bit = BIT_ULL(j);
2401 info = &seg->fields[j];
2403 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2404 ice_flow_acl_frmt_entry_range(j, info,
2408 ice_flow_acl_frmt_entry_fld(j, info, buf,
2414 for (j = 0; j < seg->raws_cnt; j++) {
2415 struct ice_flow_fld_info *info = &seg->raws[j].info;
2416 u16 dst, src, mask, k;
2417 bool use_mask = false;
2419 src = info->src.val;
2420 dst = info->entry.val -
2421 ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2422 mask = info->src.mask;
2424 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2427 for (k = 0; k < info->entry.last; k++, dst++) {
2428 buf[dst] = data[src++];
2430 dontcare[dst] = ~data[mask++];
2437 buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2438 dontcare[prof->cfg.scen->pid_idx] = 0;
2440 /* Format the buffer for direction flags */
2441 dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2443 if (prof->dir == ICE_FLOW_RX)
2444 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2447 buf[prof->cfg.scen->rng_chk_idx] = range;
2448 /* Mark any unused range checkers as don't care */
2449 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2450 e->range_buf = range_buf;
2452 ice_free(hw, range_buf);
2455 status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2461 e->entry_sz = buf_sz * 2;
2468 ice_free(hw, dontcare);
2473 if (status && range_buf) {
2474 ice_free(hw, range_buf);
2475 e->range_buf = NULL;
2478 if (status && e->acts) {
2479 ice_free(hw, e->acts);
2484 if (status && cnt_alloc)
2485 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2491 * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2492 * the compared data.
2493 * @prof: pointer to flow profile
2494 * @e: pointer to the comparing flow entry
2495 * @do_chg_action: decide if we want to change the ACL action
2496 * @do_add_entry: decide if we want to add the new ACL entry
2497 * @do_rem_entry: decide if we want to remove the current ACL entry
2499 * Find an ACL scenario entry that matches the compared data. In the same time,
2500 * this function also figure out:
2501 * a/ If we want to change the ACL action
2502 * b/ If we want to add the new ACL entry
2503 * c/ If we want to remove the current ACL entry
2505 static struct ice_flow_entry *
2506 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2507 struct ice_flow_entry *e, bool *do_chg_action,
2508 bool *do_add_entry, bool *do_rem_entry)
2510 struct ice_flow_entry *p, *return_entry = NULL;
2514 * a/ There exists an entry with same matching data, but different
2515 * priority, then we remove this existing ACL entry. Then, we
2516 * will add the new entry to the ACL scenario.
2517 * b/ There exists an entry with same matching data, priority, and
2518 * result action, then we do nothing
2519 * c/ There exists an entry with same matching data, priority, but
2520 * different, action, then do only change the action's entry.
2521 * d/ Else, we add this new entry to the ACL scenario.
2523 *do_chg_action = false;
2524 *do_add_entry = true;
2525 *do_rem_entry = false;
2526 LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2527 if (memcmp(p->entry, e->entry, p->entry_sz))
2530 /* From this point, we have the same matching_data. */
2531 *do_add_entry = false;
2534 if (p->priority != e->priority) {
2535 /* matching data && !priority */
2536 *do_add_entry = true;
2537 *do_rem_entry = true;
2541 /* From this point, we will have matching_data && priority */
2542 if (p->acts_cnt != e->acts_cnt)
2543 *do_chg_action = true;
2544 for (i = 0; i < p->acts_cnt; i++) {
2545 bool found_not_match = false;
2547 for (j = 0; j < e->acts_cnt; j++)
2548 if (memcmp(&p->acts[i], &e->acts[j],
2549 sizeof(struct ice_flow_action))) {
2550 found_not_match = true;
2554 if (found_not_match) {
2555 *do_chg_action = true;
2560 /* (do_chg_action = true) means :
2561 * matching_data && priority && !result_action
2562 * (do_chg_action = false) means :
2563 * matching_data && priority && result_action
2568 return return_entry;
2572 * ice_flow_acl_convert_to_acl_prior - Convert to ACL priority
2575 static enum ice_acl_entry_prior
2576 ice_flow_acl_convert_to_acl_prior(enum ice_flow_priority p)
2578 enum ice_acl_entry_prior acl_prior;
2581 case ICE_FLOW_PRIO_LOW:
2582 acl_prior = ICE_LOW;
2584 case ICE_FLOW_PRIO_NORMAL:
2585 acl_prior = ICE_NORMAL;
2587 case ICE_FLOW_PRIO_HIGH:
2588 acl_prior = ICE_HIGH;
2591 acl_prior = ICE_NORMAL;
2599 * ice_flow_acl_union_rng_chk - Perform union operation between two
2600 * range-range checker buffers
2601 * @dst_buf: pointer to destination range checker buffer
2602 * @src_buf: pointer to source range checker buffer
2604 * For this function, we do the union between dst_buf and src_buf
2605 * range checker buffer, and we will save the result back to dst_buf
2607 static enum ice_status
2608 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2609 struct ice_aqc_acl_profile_ranges *src_buf)
2613 if (!dst_buf || !src_buf)
2614 return ICE_ERR_BAD_PTR;
2616 for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2617 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2618 bool will_populate = false;
2620 in_data = &src_buf->checker_cfg[i];
2625 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2626 cfg_data = &dst_buf->checker_cfg[j];
2628 if (!cfg_data->mask ||
2629 !memcmp(cfg_data, in_data,
2630 sizeof(struct ice_acl_rng_data))) {
2631 will_populate = true;
2636 if (will_populate) {
2637 ice_memcpy(cfg_data, in_data,
2638 sizeof(struct ice_acl_rng_data),
2639 ICE_NONDMA_TO_NONDMA);
2641 /* No available slot left to program range checker */
2642 return ICE_ERR_MAX_LIMIT;
2650 * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2651 * @hw: pointer to the hardware structure
2652 * @prof: pointer to flow profile
2653 * @entry: double pointer to the flow entry
2655 * For this function, we will look at the current added entries in the
2656 * corresponding ACL scenario. Then, we will perform matching logic to
2657 * see if we want to add/modify/do nothing with this new entry.
2659 static enum ice_status
2660 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2661 struct ice_flow_entry **entry)
2663 bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2664 struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2665 struct ice_acl_act_entry *acts = NULL;
2666 struct ice_flow_entry *exist;
2667 enum ice_status status = ICE_SUCCESS;
2668 struct ice_flow_entry *e;
2671 if (!entry || !(*entry) || !prof)
2672 return ICE_ERR_BAD_PTR;
2676 do_chg_rng_chk = false;
2680 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2685 /* Query the current range-checker value in FW */
2686 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2690 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2691 sizeof(struct ice_aqc_acl_profile_ranges),
2692 ICE_NONDMA_TO_NONDMA);
2694 /* Generate the new range-checker value */
2695 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2699 /* Reconfigure the range check if the buffer is changed. */
2700 do_chg_rng_chk = false;
2701 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2702 sizeof(struct ice_aqc_acl_profile_ranges))) {
2703 status = ice_prog_acl_prof_ranges(hw, prof_id,
2704 &cfg_rng_buf, NULL);
2708 do_chg_rng_chk = true;
2712 /* Figure out if we want to (change the ACL action) and/or
2713 * (Add the new ACL entry) and/or (Remove the current ACL entry)
2715 exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
2716 &do_add_entry, &do_rem_entry);
2719 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
2724 /* Prepare the result action buffer */
2725 acts = (struct ice_acl_act_entry *)ice_calloc
2726 (hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
2727 for (i = 0; i < e->acts_cnt; i++)
2728 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
2729 sizeof(struct ice_acl_act_entry),
2730 ICE_NONDMA_TO_NONDMA);
2733 enum ice_acl_entry_prior prior;
2737 keys = (u8 *)e->entry;
2738 inverts = keys + (e->entry_sz / 2);
2739 prior = ice_flow_acl_convert_to_acl_prior(e->priority);
2741 status = ice_acl_add_entry(hw, prof->cfg.scen, prior, keys,
2742 inverts, acts, e->acts_cnt,
2747 e->scen_entry_idx = entry_idx;
2748 LIST_ADD(&e->l_entry, &prof->entries);
2750 if (do_chg_action) {
2751 /* For the action memory info, update the SW's copy of
2752 * exist entry with e's action memory info
2754 ice_free(hw, exist->acts);
2755 exist->acts_cnt = e->acts_cnt;
2756 exist->acts = (struct ice_flow_action *)
2757 ice_calloc(hw, exist->acts_cnt,
2758 sizeof(struct ice_flow_action));
2761 status = ICE_ERR_NO_MEMORY;
2765 ice_memcpy(exist->acts, e->acts,
2766 sizeof(struct ice_flow_action) * e->acts_cnt,
2767 ICE_NONDMA_TO_NONDMA);
2769 status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
2771 exist->scen_entry_idx);
2776 if (do_chg_rng_chk) {
2777 /* In this case, we want to update the range checker
2778 * information of the exist entry
2780 status = ice_flow_acl_union_rng_chk(exist->range_buf,
2786 /* As we don't add the new entry to our SW DB, deallocate its
2787 * memories, and return the exist entry to the caller
2789 ice_dealloc_flow_entry(hw, e);
2800 * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
2801 * @hw: pointer to the hardware structure
2802 * @prof: pointer to flow profile
2803 * @e: double pointer to the flow entry
2805 static enum ice_status
2806 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2807 struct ice_flow_entry **e)
2809 enum ice_status status;
2811 ice_acquire_lock(&prof->entries_lock);
2812 status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
2813 ice_release_lock(&prof->entries_lock);
2819 * ice_flow_add_entry - Add a flow entry
2820 * @hw: pointer to the HW struct
2821 * @blk: classification stage
2822 * @prof_id: ID of the profile to add a new flow entry to
2823 * @entry_id: unique ID to identify this flow entry
2824 * @vsi_handle: software VSI handle for the flow entry
2825 * @prio: priority of the flow entry
2826 * @data: pointer to a data buffer containing flow entry's match values/masks
2827 * @acts: arrays of actions to be performed on a match
2828 * @acts_cnt: number of actions
2829 * @entry_h: pointer to buffer that receives the new flow entry's handle
2832 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2833 u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
2834 void *data, struct ice_flow_action *acts, u8 acts_cnt,
2837 struct ice_flow_entry *e = NULL;
2838 struct ice_flow_prof *prof;
2839 enum ice_status status = ICE_SUCCESS;
2841 /* ACL entries must indicate an action */
2842 if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
2843 return ICE_ERR_PARAM;
2845 /* No flow entry data is expected for RSS */
2846 if (!entry_h || (!data && blk != ICE_BLK_RSS))
2847 return ICE_ERR_BAD_PTR;
2849 if (!ice_is_vsi_valid(hw, vsi_handle))
2850 return ICE_ERR_PARAM;
2852 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2854 prof = ice_flow_find_prof_id(hw, blk, prof_id);
2856 status = ICE_ERR_DOES_NOT_EXIST;
2858 /* Allocate memory for the entry being added and associate
2859 * the VSI to the found flow profile
2861 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
2863 status = ICE_ERR_NO_MEMORY;
2865 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
2868 ice_release_lock(&hw->fl_profs_locks[blk]);
2873 e->vsi_handle = vsi_handle;
2882 /* ACL will handle the entry management */
2883 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
2888 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
2896 status = ICE_ERR_NOT_IMPL;
2900 if (blk != ICE_BLK_ACL) {
2901 /* ACL will handle the entry management */
2902 ice_acquire_lock(&prof->entries_lock);
2903 LIST_ADD(&e->l_entry, &prof->entries);
2904 ice_release_lock(&prof->entries_lock);
2907 *entry_h = ICE_FLOW_ENTRY_HNDL(e);
2912 ice_free(hw, e->entry);
2920 * ice_flow_rem_entry - Remove a flow entry
2921 * @hw: pointer to the HW struct
2922 * @blk: classification stage
2923 * @entry_h: handle to the flow entry to be removed
2925 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
2928 struct ice_flow_entry *entry;
2929 struct ice_flow_prof *prof;
2930 enum ice_status status = ICE_SUCCESS;
2932 if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
2933 return ICE_ERR_PARAM;
2935 entry = ICE_FLOW_ENTRY_PTR((unsigned long)entry_h);
2937 /* Retain the pointer to the flow profile as the entry will be freed */
2941 ice_acquire_lock(&prof->entries_lock);
2942 status = ice_flow_rem_entry_sync(hw, blk, entry);
2943 ice_release_lock(&prof->entries_lock);
2950 * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
2951 * @seg: packet segment the field being set belongs to
2952 * @fld: field to be set
2953 * @field_type: type of the field
2954 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
2955 * entry's input buffer
2956 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
2958 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
2959 * entry's input buffer
2961 * This helper function stores information of a field being matched, including
2962 * the type of the field and the locations of the value to match, the mask, and
2963 * and the upper-bound value in the start of the input buffer for a flow entry.
2964 * This function should only be used for fixed-size data structures.
2966 * This function also opportunistically determines the protocol headers to be
2967 * present based on the fields being set. Some fields cannot be used alone to
2968 * determine the protocol headers present. Sometimes, fields for particular
2969 * protocol headers are not matched. In those cases, the protocol headers
2970 * must be explicitly set.
2973 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
2974 enum ice_flow_fld_match_type field_type, u16 val_loc,
2975 u16 mask_loc, u16 last_loc)
2977 u64 bit = BIT_ULL(fld);
2980 if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
2983 seg->fields[fld].type = field_type;
2984 seg->fields[fld].src.val = val_loc;
2985 seg->fields[fld].src.mask = mask_loc;
2986 seg->fields[fld].src.last = last_loc;
2988 ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
2992 * ice_flow_set_fld - specifies locations of field from entry's input buffer
2993 * @seg: packet segment the field being set belongs to
2994 * @fld: field to be set
2995 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
2996 * entry's input buffer
2997 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
2999 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3000 * entry's input buffer
3001 * @range: indicate if field being matched is to be in a range
3003 * This function specifies the locations, in the form of byte offsets from the
3004 * start of the input buffer for a flow entry, from where the value to match,
3005 * the mask value, and upper value can be extracted. These locations are then
3006 * stored in the flow profile. When adding a flow entry associated with the
3007 * flow profile, these locations will be used to quickly extract the values and
3008 * create the content of a match entry. This function should only be used for
3009 * fixed-size data structures.
3012 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3013 u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3015 enum ice_flow_fld_match_type t = range ?
3016 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3018 ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3022 * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3023 * @seg: packet segment the field being set belongs to
3024 * @fld: field to be set
3025 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3026 * entry's input buffer
3027 * @pref_loc: location of prefix value from entry's input buffer
3028 * @pref_sz: size of the location holding the prefix value
3030 * This function specifies the locations, in the form of byte offsets from the
3031 * start of the input buffer for a flow entry, from where the value to match
3032 * and the IPv4 prefix value can be extracted. These locations are then stored
3033 * in the flow profile. When adding flow entries to the associated flow profile,
3034 * these locations can be used to quickly extract the values to create the
3035 * content of a match entry. This function should only be used for fixed-size
3039 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3040 u16 val_loc, u16 pref_loc, u8 pref_sz)
3042 /* For this type of field, the "mask" location is for the prefix value's
3043 * location and the "last" location is for the size of the location of
3046 ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3047 pref_loc, (u16)pref_sz);
3051 * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3052 * @seg: packet segment the field being set belongs to
3053 * @off: offset of the raw field from the beginning of the segment in bytes
3054 * @len: length of the raw pattern to be matched
3055 * @val_loc: location of the value to match from entry's input buffer
3056 * @mask_loc: location of mask value from entry's input buffer
3058 * This function specifies the offset of the raw field to be match from the
3059 * beginning of the specified packet segment, and the locations, in the form of
3060 * byte offsets from the start of the input buffer for a flow entry, from where
3061 * the value to match and the mask value to be extracted. These locations are
3062 * then stored in the flow profile. When adding flow entries to the associated
3063 * flow profile, these locations can be used to quickly extract the values to
3064 * create the content of a match entry. This function should only be used for
3065 * fixed-size data structures.
3068 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3069 u16 val_loc, u16 mask_loc)
3071 if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3072 seg->raws[seg->raws_cnt].off = off;
3073 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3074 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3075 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3076 /* The "last" field is used to store the length of the field */
3077 seg->raws[seg->raws_cnt].info.src.last = len;
3080 /* Overflows of "raws" will be handled as an error condition later in
3081 * the flow when this information is processed.
3086 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3087 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
3089 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3090 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3092 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3093 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
3094 ICE_FLOW_SEG_HDR_SCTP)
3096 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3097 (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3098 ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3099 ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3102 * ice_flow_set_rss_seg_info - setup packet segments for RSS
3103 * @segs: pointer to the flow field segment(s)
3104 * @hash_fields: fields to be hashed on for the segment(s)
3105 * @flow_hdr: protocol header fields within a packet segment
3107 * Helper function to extract fields from hash bitmap and use flow
3108 * header value to set flow field segment for further use in flow
3109 * profile entry or removal.
3111 static enum ice_status
3112 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
3115 u64 val = hash_fields;
3118 for (i = 0; val && i < ICE_FLOW_FIELD_IDX_MAX; i++) {
3119 u64 bit = BIT_ULL(i);
3122 ice_flow_set_fld(segs, (enum ice_flow_field)i,
3123 ICE_FLOW_FLD_OFF_INVAL,
3124 ICE_FLOW_FLD_OFF_INVAL,
3125 ICE_FLOW_FLD_OFF_INVAL, false);
3129 ICE_FLOW_SET_HDRS(segs, flow_hdr);
3131 if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3132 ~ICE_FLOW_RSS_HDRS_INNER_MASK)
3133 return ICE_ERR_PARAM;
3135 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3136 if (val && !ice_is_pow2(val))
3139 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3140 if (val && !ice_is_pow2(val))
3147 * ice_rem_vsi_rss_list - remove VSI from RSS list
3148 * @hw: pointer to the hardware structure
3149 * @vsi_handle: software VSI handle
3151 * Remove the VSI from all RSS configurations in the list.
3153 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3155 struct ice_rss_cfg *r, *tmp;
3157 if (LIST_EMPTY(&hw->rss_list_head))
3160 ice_acquire_lock(&hw->rss_locks);
3161 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3162 ice_rss_cfg, l_entry) {
3163 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3164 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3165 LIST_DEL(&r->l_entry);
3169 ice_release_lock(&hw->rss_locks);
3173 * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3174 * @hw: pointer to the hardware structure
3175 * @vsi_handle: software VSI handle
3177 * This function will iterate through all flow profiles and disassociate
3178 * the VSI from that profile. If the flow profile has no VSIs it will
3181 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3183 const enum ice_block blk = ICE_BLK_RSS;
3184 struct ice_flow_prof *p, *t;
3185 enum ice_status status = ICE_SUCCESS;
3187 if (!ice_is_vsi_valid(hw, vsi_handle))
3188 return ICE_ERR_PARAM;
3190 if (LIST_EMPTY(&hw->fl_profs[blk]))
3193 ice_acquire_lock(&hw->fl_profs_locks[blk]);
3194 LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3196 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3197 status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3201 if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3202 status = ice_flow_rem_prof_sync(hw, blk, p);
3208 ice_release_lock(&hw->fl_profs_locks[blk]);
3214 * ice_rem_rss_list - remove RSS configuration from list
3215 * @hw: pointer to the hardware structure
3216 * @vsi_handle: software VSI handle
3217 * @prof: pointer to flow profile
3219 * Assumption: lock has already been acquired for RSS list
3222 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3224 struct ice_rss_cfg *r, *tmp;
3226 /* Search for RSS hash fields associated to the VSI that match the
3227 * hash configurations associated to the flow profile. If found
3228 * remove from the RSS entry list of the VSI context and delete entry.
3230 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3231 ice_rss_cfg, l_entry) {
3232 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3233 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3234 ice_clear_bit(vsi_handle, r->vsis);
3235 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3236 LIST_DEL(&r->l_entry);
3245 * ice_add_rss_list - add RSS configuration to list
3246 * @hw: pointer to the hardware structure
3247 * @vsi_handle: software VSI handle
3248 * @prof: pointer to flow profile
3250 * Assumption: lock has already been acquired for RSS list
3252 static enum ice_status
3253 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3255 struct ice_rss_cfg *r, *rss_cfg;
3257 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3258 ice_rss_cfg, l_entry)
3259 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3260 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3261 ice_set_bit(vsi_handle, r->vsis);
3265 rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3267 return ICE_ERR_NO_MEMORY;
3269 rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
3270 rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
3271 rss_cfg->symm = prof->cfg.symm;
3272 ice_set_bit(vsi_handle, rss_cfg->vsis);
3274 LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3279 #define ICE_FLOW_PROF_HASH_S 0
3280 #define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3281 #define ICE_FLOW_PROF_HDR_S 32
3282 #define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3283 #define ICE_FLOW_PROF_ENCAP_S 63
3284 #define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
3286 #define ICE_RSS_OUTER_HEADERS 1
3287 #define ICE_RSS_INNER_HEADERS 2
3289 /* Flow profile ID format:
3290 * [0:31] - Packet match fields
3291 * [32:62] - Protocol header
3292 * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
3294 #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
3295 (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3296 (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3297 ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
3300 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3302 u32 s = ((src % 4) << 3); /* byte shift */
3303 u32 v = dst | 0x80; /* value to program */
3304 u8 i = src / 4; /* register index */
3307 reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3308 reg = (reg & ~(0xff << s)) | (v << s);
3309 wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3313 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3316 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3319 for (i = 0; i < len; i++) {
3320 ice_rss_config_xor_word(hw, prof_id,
3321 /* Yes, field vector in GLQF_HSYMM and
3322 * GLQF_HINSET is inversed!
3324 fv_last_word - (src + i),
3325 fv_last_word - (dst + i));
3326 ice_rss_config_xor_word(hw, prof_id,
3327 fv_last_word - (dst + i),
3328 fv_last_word - (src + i));
3333 ice_rss_update_symm(struct ice_hw *hw,
3334 struct ice_flow_prof *prof)
3336 struct ice_prof_map *map;
3339 map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3340 prof_id = map->prof_id;
3342 /* clear to default */
3343 for (m = 0; m < 6; m++)
3344 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3345 if (prof->cfg.symm) {
3346 struct ice_flow_seg_info *seg =
3347 &prof->segs[prof->segs_cnt - 1];
3349 struct ice_flow_seg_xtrct *ipv4_src =
3350 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3351 struct ice_flow_seg_xtrct *ipv4_dst =
3352 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3353 struct ice_flow_seg_xtrct *ipv6_src =
3354 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3355 struct ice_flow_seg_xtrct *ipv6_dst =
3356 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3358 struct ice_flow_seg_xtrct *tcp_src =
3359 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3360 struct ice_flow_seg_xtrct *tcp_dst =
3361 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3363 struct ice_flow_seg_xtrct *udp_src =
3364 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3365 struct ice_flow_seg_xtrct *udp_dst =
3366 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3368 struct ice_flow_seg_xtrct *sctp_src =
3369 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3370 struct ice_flow_seg_xtrct *sctp_dst =
3371 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3374 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3375 ice_rss_config_xor(hw, prof_id,
3376 ipv4_src->idx, ipv4_dst->idx, 2);
3379 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3380 ice_rss_config_xor(hw, prof_id,
3381 ipv6_src->idx, ipv6_dst->idx, 8);
3384 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3385 ice_rss_config_xor(hw, prof_id,
3386 tcp_src->idx, tcp_dst->idx, 1);
3389 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3390 ice_rss_config_xor(hw, prof_id,
3391 udp_src->idx, udp_dst->idx, 1);
3394 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3395 ice_rss_config_xor(hw, prof_id,
3396 sctp_src->idx, sctp_dst->idx, 1);
3401 * ice_add_rss_cfg_sync - add an RSS configuration
3402 * @hw: pointer to the hardware structure
3403 * @vsi_handle: software VSI handle
3404 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3405 * @addl_hdrs: protocol header fields
3406 * @segs_cnt: packet segment count
3407 * @symm: symmetric hash enable/disable
3409 * Assumption: lock has already been acquired for RSS list
3411 static enum ice_status
3412 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3413 u32 addl_hdrs, u8 segs_cnt, bool symm)
3415 const enum ice_block blk = ICE_BLK_RSS;
3416 struct ice_flow_prof *prof = NULL;
3417 struct ice_flow_seg_info *segs;
3418 enum ice_status status;
3420 if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
3421 return ICE_ERR_PARAM;
3423 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3426 return ICE_ERR_NO_MEMORY;
3428 /* Construct the packet segment info from the hashed fields */
3429 status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3434 /* Search for a flow profile that has matching headers, hash fields
3435 * and has the input VSI associated to it. If found, no further
3436 * operations required and exit.
3438 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3440 ICE_FLOW_FIND_PROF_CHK_FLDS |
3441 ICE_FLOW_FIND_PROF_CHK_VSI);
3443 if (prof->cfg.symm == symm)
3445 prof->cfg.symm = symm;
3449 /* Check if a flow profile exists with the same protocol headers and
3450 * associated with the input VSI. If so disasscociate the VSI from
3451 * this profile. The VSI will be added to a new profile created with
3452 * the protocol header and new hash field configuration.
3454 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3455 vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3457 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3459 ice_rem_rss_list(hw, vsi_handle, prof);
3463 /* Remove profile if it has no VSIs associated */
3464 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3465 status = ice_flow_rem_prof(hw, blk, prof->id);
3471 /* Search for a profile that has same match fields only. If this
3472 * exists then associate the VSI to this profile.
3474 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3476 ICE_FLOW_FIND_PROF_CHK_FLDS);
3478 if (prof->cfg.symm == symm) {
3479 status = ice_flow_assoc_prof(hw, blk, prof,
3482 status = ice_add_rss_list(hw, vsi_handle,
3485 /* if a profile exist but with different symmetric
3486 * requirement, just return error.
3488 status = ICE_ERR_NOT_SUPPORTED;
3493 /* Create a new flow profile with generated profile and packet
3494 * segment information.
3496 status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3497 ICE_FLOW_GEN_PROFID(hashed_flds,
3498 segs[segs_cnt - 1].hdrs,
3500 segs, segs_cnt, NULL, 0, &prof);
3504 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3505 /* If association to a new flow profile failed then this profile can
3509 ice_flow_rem_prof(hw, blk, prof->id);
3513 status = ice_add_rss_list(hw, vsi_handle, prof);
3515 prof->cfg.symm = symm;
3518 ice_rss_update_symm(hw, prof);
3526 * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3527 * @hw: pointer to the hardware structure
3528 * @vsi_handle: software VSI handle
3529 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3530 * @addl_hdrs: protocol header fields
3531 * @symm: symmetric hash enable/disable
3533 * This function will generate a flow profile based on fields associated with
3534 * the input fields to hash on, the flow type and use the VSI number to add
3535 * a flow entry to the profile.
3538 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3539 u32 addl_hdrs, bool symm)
3541 enum ice_status status;
3543 if (hashed_flds == ICE_HASH_INVALID ||
3544 !ice_is_vsi_valid(hw, vsi_handle))
3545 return ICE_ERR_PARAM;
3547 ice_acquire_lock(&hw->rss_locks);
3548 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3549 ICE_RSS_OUTER_HEADERS, symm);
3551 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3552 addl_hdrs, ICE_RSS_INNER_HEADERS,
3554 ice_release_lock(&hw->rss_locks);
3560 * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3561 * @hw: pointer to the hardware structure
3562 * @vsi_handle: software VSI handle
3563 * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3564 * @addl_hdrs: Protocol header fields within a packet segment
3565 * @segs_cnt: packet segment count
3567 * Assumption: lock has already been acquired for RSS list
3569 static enum ice_status
3570 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3571 u32 addl_hdrs, u8 segs_cnt)
3573 const enum ice_block blk = ICE_BLK_RSS;
3574 struct ice_flow_seg_info *segs;
3575 struct ice_flow_prof *prof;
3576 enum ice_status status;
3578 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3581 return ICE_ERR_NO_MEMORY;
3583 /* Construct the packet segment info from the hashed fields */
3584 status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3589 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3591 ICE_FLOW_FIND_PROF_CHK_FLDS);
3593 status = ICE_ERR_DOES_NOT_EXIST;
3597 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3601 /* Remove RSS configuration from VSI context before deleting
3604 ice_rem_rss_list(hw, vsi_handle, prof);
3606 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
3607 status = ice_flow_rem_prof(hw, blk, prof->id);
3615 * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
3616 * @hw: pointer to the hardware structure
3617 * @vsi_handle: software VSI handle
3618 * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3619 * @addl_hdrs: Protocol header fields within a packet segment
3621 * This function will lookup the flow profile based on the input
3622 * hash field bitmap, iterate through the profile entry list of
3623 * that profile and find entry associated with input VSI to be
3624 * removed. Calls are made to underlying flow apis which will in
3625 * turn build or update buffers for RSS XLT1 section.
3628 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3631 enum ice_status status;
3633 if (hashed_flds == ICE_HASH_INVALID ||
3634 !ice_is_vsi_valid(hw, vsi_handle))
3635 return ICE_ERR_PARAM;
3637 ice_acquire_lock(&hw->rss_locks);
3638 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3639 ICE_RSS_OUTER_HEADERS);
3641 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3642 addl_hdrs, ICE_RSS_INNER_HEADERS);
3643 ice_release_lock(&hw->rss_locks);
3649 * ice_replay_rss_cfg - replay RSS configurations associated with VSI
3650 * @hw: pointer to the hardware structure
3651 * @vsi_handle: software VSI handle
3653 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3655 enum ice_status status = ICE_SUCCESS;
3656 struct ice_rss_cfg *r;
3658 if (!ice_is_vsi_valid(hw, vsi_handle))
3659 return ICE_ERR_PARAM;
3661 ice_acquire_lock(&hw->rss_locks);
3662 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3663 ice_rss_cfg, l_entry) {
3664 if (ice_is_bit_set(r->vsis, vsi_handle)) {
3665 status = ice_add_rss_cfg_sync(hw, vsi_handle,
3668 ICE_RSS_OUTER_HEADERS,
3672 status = ice_add_rss_cfg_sync(hw, vsi_handle,
3675 ICE_RSS_INNER_HEADERS,
3681 ice_release_lock(&hw->rss_locks);
3687 * ice_get_rss_cfg - returns hashed fields for the given header types
3688 * @hw: pointer to the hardware structure
3689 * @vsi_handle: software VSI handle
3690 * @hdrs: protocol header type
3692 * This function will return the match fields of the first instance of flow
3693 * profile having the given header types and containing input VSI
3695 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
3697 struct ice_rss_cfg *r, *rss_cfg = NULL;
3699 /* verify if the protocol header is non zero and VSI is valid */
3700 if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
3701 return ICE_HASH_INVALID;
3703 ice_acquire_lock(&hw->rss_locks);
3704 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3705 ice_rss_cfg, l_entry)
3706 if (ice_is_bit_set(r->vsis, vsi_handle) &&
3707 r->packet_hdr == hdrs) {
3711 ice_release_lock(&hw->rss_locks);
3713 return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID;