1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
5 #include "ice_common.h"
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE 2
10 #define ICE_FLOW_FLD_SZ_VLAN 2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR 4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR 16
13 #define ICE_FLOW_FLD_SZ_IP_DSCP 1
14 #define ICE_FLOW_FLD_SZ_IP_TTL 1
15 #define ICE_FLOW_FLD_SZ_IP_PROT 1
16 #define ICE_FLOW_FLD_SZ_PORT 2
17 #define ICE_FLOW_FLD_SZ_TCP_FLAGS 1
18 #define ICE_FLOW_FLD_SZ_ICMP_TYPE 1
19 #define ICE_FLOW_FLD_SZ_ICMP_CODE 1
20 #define ICE_FLOW_FLD_SZ_ARP_OPER 2
21 #define ICE_FLOW_FLD_SZ_GRE_KEYID 4
22 #define ICE_FLOW_FLD_SZ_GTP_TEID 4
23 #define ICE_FLOW_FLD_SZ_GTP_QFI 2
24 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID 2
25 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
26 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID 4
27 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
28 #define ICE_FLOW_FLD_SZ_AH_SPI 4
29 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI 4
31 /* Describe properties of a protocol header field */
32 struct ice_flow_field_info {
33 enum ice_flow_seg_hdr hdr;
34 s16 off; /* Offset from start of a protocol header, in bits */
35 u16 size; /* Size of fields in bits */
36 u16 mask; /* 16-bit mask for field */
39 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
41 .off = (_offset_bytes) * BITS_PER_BYTE, \
42 .size = (_size_bytes) * BITS_PER_BYTE, \
46 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
48 .off = (_offset_bytes) * BITS_PER_BYTE, \
49 .size = (_size_bytes) * BITS_PER_BYTE, \
53 /* Table containing properties of supported protocol header fields */
55 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
57 /* ICE_FLOW_FIELD_IDX_ETH_DA */
58 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
59 /* ICE_FLOW_FIELD_IDX_ETH_SA */
60 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
61 /* ICE_FLOW_FIELD_IDX_S_VLAN */
62 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
63 /* ICE_FLOW_FIELD_IDX_C_VLAN */
64 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
65 /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
66 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
68 /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
69 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
71 /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
72 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
74 /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
75 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
76 ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
77 /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
78 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
79 ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
80 /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
81 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
82 ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
83 /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
84 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
85 ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
86 /* ICE_FLOW_FIELD_IDX_IPV4_SA */
87 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
88 /* ICE_FLOW_FIELD_IDX_IPV4_DA */
89 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
90 /* ICE_FLOW_FIELD_IDX_IPV6_SA */
91 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
92 /* ICE_FLOW_FIELD_IDX_IPV6_DA */
93 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
95 /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
96 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
97 /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
98 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
99 /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
100 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
101 /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
102 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
103 /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
104 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
105 /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
106 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
107 /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
108 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
110 /* ICE_FLOW_FIELD_IDX_ARP_SIP */
111 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
112 /* ICE_FLOW_FIELD_IDX_ARP_DIP */
113 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
114 /* ICE_FLOW_FIELD_IDX_ARP_SHA */
115 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
116 /* ICE_FLOW_FIELD_IDX_ARP_DHA */
117 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
118 /* ICE_FLOW_FIELD_IDX_ARP_OP */
119 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
121 /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
122 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
123 /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
124 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
126 /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
127 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
129 /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
130 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
131 ICE_FLOW_FLD_SZ_GTP_TEID),
132 /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
133 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
134 ICE_FLOW_FLD_SZ_GTP_TEID),
135 /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
136 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
137 ICE_FLOW_FLD_SZ_GTP_TEID),
138 /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
139 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
140 ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
141 /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
142 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
143 ICE_FLOW_FLD_SZ_GTP_TEID),
144 /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
145 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
146 ICE_FLOW_FLD_SZ_GTP_TEID),
148 /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
149 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
150 ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
152 /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
153 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
154 ICE_FLOW_FLD_SZ_PFCP_SEID),
156 /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
157 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
158 ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
160 /* ICE_FLOW_FIELD_IDX_ESP_SPI */
161 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
162 ICE_FLOW_FLD_SZ_ESP_SPI),
164 /* ICE_FLOW_FIELD_IDX_AH_SPI */
165 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
166 ICE_FLOW_FLD_SZ_AH_SPI),
168 /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
169 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
170 ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
173 /* Bitmaps indicating relevant packet types for a particular protocol header
175 * Packet types for packets with an Outer/First/Single MAC header
177 static const u32 ice_ptypes_mac_ofos[] = {
178 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
179 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
180 0x00000000, 0x03FFF000, 0x7FFFFFE0, 0x00000000,
181 0x00000000, 0x00000000, 0x00000000, 0x00000000,
182 0x00000000, 0x00000000, 0x00000000, 0x00000000,
183 0x00000000, 0x00000000, 0x00000000, 0x00000000,
184 0x00000000, 0x00000000, 0x00000000, 0x00000000,
185 0x00000000, 0x00000000, 0x00000000, 0x00000000,
188 /* Packet types for packets with an Innermost/Last MAC VLAN header */
189 static const u32 ice_ptypes_macvlan_il[] = {
190 0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
191 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
192 0x00000000, 0x00000000, 0x00000000, 0x00000000,
193 0x00000000, 0x00000000, 0x00000000, 0x00000000,
194 0x00000000, 0x00000000, 0x00000000, 0x00000000,
195 0x00000000, 0x00000000, 0x00000000, 0x00000000,
196 0x00000000, 0x00000000, 0x00000000, 0x00000000,
197 0x00000000, 0x00000000, 0x00000000, 0x00000000,
200 /* Packet types for packets with an Outer/First/Single IPv4 header */
201 static const u32 ice_ptypes_ipv4_ofos[] = {
202 0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
203 0x00000000, 0x00000155, 0x00000000, 0x00000000,
204 0x0003000F, 0x000FC000, 0x83E0F800, 0x00000101,
205 0x00000000, 0x00000000, 0x00000000, 0x00000000,
206 0x00000000, 0x00000000, 0x00000000, 0x00000000,
207 0x00000000, 0x00000000, 0x00000000, 0x00000000,
208 0x00000000, 0x00000000, 0x00000000, 0x00000000,
209 0x00000000, 0x00000000, 0x00000000, 0x00000000,
212 /* Packet types for packets with an Innermost/Last IPv4 header */
213 static const u32 ice_ptypes_ipv4_il[] = {
214 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
215 0x0000000E, 0x00000000, 0x00000000, 0x00000000,
216 0x00000000, 0x00000000, 0x001FF800, 0x00000000,
217 0x00000000, 0x00000000, 0x00000000, 0x00000000,
218 0x00000000, 0x00000000, 0x00000000, 0x00000000,
219 0x00000000, 0x00000000, 0x00000000, 0x00000000,
220 0x00000000, 0x00000000, 0x00000000, 0x00000000,
221 0x00000000, 0x00000000, 0x00000000, 0x00000000,
224 /* Packet types for packets with an Outer/First/Single IPv6 header */
225 static const u32 ice_ptypes_ipv6_ofos[] = {
226 0x00000000, 0x00000000, 0x77000000, 0x10002000,
227 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
228 0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000206,
229 0x00000000, 0x00000000, 0x00000000, 0x00000000,
230 0x00000000, 0x00000000, 0x00000000, 0x00000000,
231 0x00000000, 0x00000000, 0x00000000, 0x00000000,
232 0x00000000, 0x00000000, 0x00000000, 0x00000000,
233 0x00000000, 0x00000000, 0x00000000, 0x00000000,
236 /* Packet types for packets with an Innermost/Last IPv6 header */
237 static const u32 ice_ptypes_ipv6_il[] = {
238 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
239 0x00000770, 0x00000000, 0x00000000, 0x00000000,
240 0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
241 0x00000000, 0x00000000, 0x00000000, 0x00000000,
242 0x00000000, 0x00000000, 0x00000000, 0x00000000,
243 0x00000000, 0x00000000, 0x00000000, 0x00000000,
244 0x00000000, 0x00000000, 0x00000000, 0x00000000,
245 0x00000000, 0x00000000, 0x00000000, 0x00000000,
248 /* Packet types for packets with an Outermost/First ARP header */
249 static const u32 ice_ptypes_arp_of[] = {
250 0x00000800, 0x00000000, 0x00000000, 0x00000000,
251 0x00000000, 0x00000000, 0x00000000, 0x00000000,
252 0x00000000, 0x00000000, 0x00000000, 0x00000000,
253 0x00000000, 0x00000000, 0x00000000, 0x00000000,
254 0x00000000, 0x00000000, 0x00000000, 0x00000000,
255 0x00000000, 0x00000000, 0x00000000, 0x00000000,
256 0x00000000, 0x00000000, 0x00000000, 0x00000000,
257 0x00000000, 0x00000000, 0x00000000, 0x00000000,
260 /* UDP Packet types for non-tunneled packets or tunneled
261 * packets with inner UDP.
263 static const u32 ice_ptypes_udp_il[] = {
264 0x81000000, 0x20204040, 0x04000010, 0x80810102,
265 0x00000040, 0x00000000, 0x00000000, 0x00000000,
266 0x00000000, 0x00410000, 0x90842000, 0x00000007,
267 0x00000000, 0x00000000, 0x00000000, 0x00000000,
268 0x00000000, 0x00000000, 0x00000000, 0x00000000,
269 0x00000000, 0x00000000, 0x00000000, 0x00000000,
270 0x00000000, 0x00000000, 0x00000000, 0x00000000,
271 0x00000000, 0x00000000, 0x00000000, 0x00000000,
274 /* Packet types for packets with an Innermost/Last TCP header */
275 static const u32 ice_ptypes_tcp_il[] = {
276 0x04000000, 0x80810102, 0x10000040, 0x02040408,
277 0x00000102, 0x00000000, 0x00000000, 0x00000000,
278 0x00000000, 0x00820000, 0x21084000, 0x00000000,
279 0x00000000, 0x00000000, 0x00000000, 0x00000000,
280 0x00000000, 0x00000000, 0x00000000, 0x00000000,
281 0x00000000, 0x00000000, 0x00000000, 0x00000000,
282 0x00000000, 0x00000000, 0x00000000, 0x00000000,
283 0x00000000, 0x00000000, 0x00000000, 0x00000000,
286 /* Packet types for packets with an Innermost/Last SCTP header */
287 static const u32 ice_ptypes_sctp_il[] = {
288 0x08000000, 0x01020204, 0x20000081, 0x04080810,
289 0x00000204, 0x00000000, 0x00000000, 0x00000000,
290 0x00000000, 0x01040000, 0x00000000, 0x00000000,
291 0x00000000, 0x00000000, 0x00000000, 0x00000000,
292 0x00000000, 0x00000000, 0x00000000, 0x00000000,
293 0x00000000, 0x00000000, 0x00000000, 0x00000000,
294 0x00000000, 0x00000000, 0x00000000, 0x00000000,
295 0x00000000, 0x00000000, 0x00000000, 0x00000000,
298 /* Packet types for packets with an Outermost/First ICMP header */
299 static const u32 ice_ptypes_icmp_of[] = {
300 0x10000000, 0x00000000, 0x00000000, 0x00000000,
301 0x00000000, 0x00000000, 0x00000000, 0x00000000,
302 0x00000000, 0x00000000, 0x00000000, 0x00000000,
303 0x00000000, 0x00000000, 0x00000000, 0x00000000,
304 0x00000000, 0x00000000, 0x00000000, 0x00000000,
305 0x00000000, 0x00000000, 0x00000000, 0x00000000,
306 0x00000000, 0x00000000, 0x00000000, 0x00000000,
307 0x00000000, 0x00000000, 0x00000000, 0x00000000,
310 /* Packet types for packets with an Innermost/Last ICMP header */
311 static const u32 ice_ptypes_icmp_il[] = {
312 0x00000000, 0x02040408, 0x40000102, 0x08101020,
313 0x00000408, 0x00000000, 0x00000000, 0x00000000,
314 0x00000000, 0x00000000, 0x42108000, 0x00000000,
315 0x00000000, 0x00000000, 0x00000000, 0x00000000,
316 0x00000000, 0x00000000, 0x00000000, 0x00000000,
317 0x00000000, 0x00000000, 0x00000000, 0x00000000,
318 0x00000000, 0x00000000, 0x00000000, 0x00000000,
319 0x00000000, 0x00000000, 0x00000000, 0x00000000,
322 /* Packet types for packets with an Outermost/First GRE header */
323 static const u32 ice_ptypes_gre_of[] = {
324 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
325 0x0000017E, 0x00000000, 0x00000000, 0x00000000,
326 0x00000000, 0x00000000, 0x00000000, 0x00000000,
327 0x00000000, 0x00000000, 0x00000000, 0x00000000,
328 0x00000000, 0x00000000, 0x00000000, 0x00000000,
329 0x00000000, 0x00000000, 0x00000000, 0x00000000,
330 0x00000000, 0x00000000, 0x00000000, 0x00000000,
331 0x00000000, 0x00000000, 0x00000000, 0x00000000,
334 /* Packet types for packets with an Innermost/Last MAC header */
335 static const u32 ice_ptypes_mac_il[] = {
336 0x00000000, 0x00000000, 0x00000000, 0x00000000,
337 0x00000000, 0x00000000, 0x00000000, 0x00000000,
338 0x00000000, 0x00000000, 0x00000000, 0x00000000,
339 0x00000000, 0x00000000, 0x00000000, 0x00000000,
340 0x00000000, 0x00000000, 0x00000000, 0x00000000,
341 0x00000000, 0x00000000, 0x00000000, 0x00000000,
342 0x00000000, 0x00000000, 0x00000000, 0x00000000,
343 0x00000000, 0x00000000, 0x00000000, 0x00000000,
346 /* Packet types for GTPC */
347 static const u32 ice_ptypes_gtpc[] = {
348 0x00000000, 0x00000000, 0x00000000, 0x00000000,
349 0x00000000, 0x00000000, 0x00000000, 0x00000000,
350 0x00000000, 0x00000000, 0x00000180, 0x00000000,
351 0x00000000, 0x00000000, 0x00000000, 0x00000000,
352 0x00000000, 0x00000000, 0x00000000, 0x00000000,
353 0x00000000, 0x00000000, 0x00000000, 0x00000000,
354 0x00000000, 0x00000000, 0x00000000, 0x00000000,
355 0x00000000, 0x00000000, 0x00000000, 0x00000000,
358 /* Packet types for GTPC with TEID */
359 static const u32 ice_ptypes_gtpc_tid[] = {
360 0x00000000, 0x00000000, 0x00000000, 0x00000000,
361 0x00000000, 0x00000000, 0x00000000, 0x00000000,
362 0x00000000, 0x00000000, 0x00000060, 0x00000000,
363 0x00000000, 0x00000000, 0x00000000, 0x00000000,
364 0x00000000, 0x00000000, 0x00000000, 0x00000000,
365 0x00000000, 0x00000000, 0x00000000, 0x00000000,
366 0x00000000, 0x00000000, 0x00000000, 0x00000000,
367 0x00000000, 0x00000000, 0x00000000, 0x00000000,
370 /* Packet types for GTPU */
371 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
372 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
373 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
374 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
375 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
376 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
377 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
378 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
379 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
380 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
381 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
382 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
383 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
384 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
385 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
386 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
387 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
388 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
389 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
390 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
391 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
394 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
395 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
396 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
397 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
398 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
399 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
400 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
401 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
402 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
403 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
404 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
405 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
406 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
407 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
408 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
409 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
410 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
411 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
412 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
413 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
414 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
417 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
418 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
419 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
420 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
421 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
422 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
423 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
424 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
425 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
426 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
427 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
428 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
429 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
430 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
431 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
432 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
433 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
434 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
435 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
436 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
437 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
440 static const u32 ice_ptypes_gtpu[] = {
441 0x00000000, 0x00000000, 0x00000000, 0x00000000,
442 0x00000000, 0x00000000, 0x00000000, 0x00000000,
443 0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
444 0x00000000, 0x00000000, 0x00000000, 0x00000000,
445 0x00000000, 0x00000000, 0x00000000, 0x00000000,
446 0x00000000, 0x00000000, 0x00000000, 0x00000000,
447 0x00000000, 0x00000000, 0x00000000, 0x00000000,
448 0x00000000, 0x00000000, 0x00000000, 0x00000000,
451 /* Packet types for pppoe */
452 static const u32 ice_ptypes_pppoe[] = {
453 0x00000000, 0x00000000, 0x00000000, 0x00000000,
454 0x00000000, 0x00000000, 0x00000000, 0x00000000,
455 0x00000000, 0x03FFF000, 0x00000000, 0x00000000,
456 0x00000000, 0x00000000, 0x00000000, 0x00000000,
457 0x00000000, 0x00000000, 0x00000000, 0x00000000,
458 0x00000000, 0x00000000, 0x00000000, 0x00000000,
459 0x00000000, 0x00000000, 0x00000000, 0x00000000,
460 0x00000000, 0x00000000, 0x00000000, 0x00000000,
463 /* Packet types for packets with PFCP NODE header */
464 static const u32 ice_ptypes_pfcp_node[] = {
465 0x00000000, 0x00000000, 0x00000000, 0x00000000,
466 0x00000000, 0x00000000, 0x00000000, 0x00000000,
467 0x00000000, 0x00000000, 0x80000000, 0x00000002,
468 0x00000000, 0x00000000, 0x00000000, 0x00000000,
469 0x00000000, 0x00000000, 0x00000000, 0x00000000,
470 0x00000000, 0x00000000, 0x00000000, 0x00000000,
471 0x00000000, 0x00000000, 0x00000000, 0x00000000,
472 0x00000000, 0x00000000, 0x00000000, 0x00000000,
475 /* Packet types for packets with PFCP SESSION header */
476 static const u32 ice_ptypes_pfcp_session[] = {
477 0x00000000, 0x00000000, 0x00000000, 0x00000000,
478 0x00000000, 0x00000000, 0x00000000, 0x00000000,
479 0x00000000, 0x00000000, 0x00000000, 0x00000005,
480 0x00000000, 0x00000000, 0x00000000, 0x00000000,
481 0x00000000, 0x00000000, 0x00000000, 0x00000000,
482 0x00000000, 0x00000000, 0x00000000, 0x00000000,
483 0x00000000, 0x00000000, 0x00000000, 0x00000000,
484 0x00000000, 0x00000000, 0x00000000, 0x00000000,
487 /* Packet types for l2tpv3 */
488 static const u32 ice_ptypes_l2tpv3[] = {
489 0x00000000, 0x00000000, 0x00000000, 0x00000000,
490 0x00000000, 0x00000000, 0x00000000, 0x00000000,
491 0x00000000, 0x00000000, 0x00000000, 0x00000300,
492 0x00000000, 0x00000000, 0x00000000, 0x00000000,
493 0x00000000, 0x00000000, 0x00000000, 0x00000000,
494 0x00000000, 0x00000000, 0x00000000, 0x00000000,
495 0x00000000, 0x00000000, 0x00000000, 0x00000000,
496 0x00000000, 0x00000000, 0x00000000, 0x00000000,
499 /* Packet types for esp */
500 static const u32 ice_ptypes_esp[] = {
501 0x00000000, 0x00000000, 0x00000000, 0x00000000,
502 0x00000000, 0x00000003, 0x00000000, 0x00000000,
503 0x00000000, 0x00000000, 0x00000000, 0x00000000,
504 0x00000000, 0x00000000, 0x00000000, 0x00000000,
505 0x00000000, 0x00000000, 0x00000000, 0x00000000,
506 0x00000000, 0x00000000, 0x00000000, 0x00000000,
507 0x00000000, 0x00000000, 0x00000000, 0x00000000,
508 0x00000000, 0x00000000, 0x00000000, 0x00000000,
511 /* Packet types for ah */
512 static const u32 ice_ptypes_ah[] = {
513 0x00000000, 0x00000000, 0x00000000, 0x00000000,
514 0x00000000, 0x0000000C, 0x00000000, 0x00000000,
515 0x00000000, 0x00000000, 0x00000000, 0x00000000,
516 0x00000000, 0x00000000, 0x00000000, 0x00000000,
517 0x00000000, 0x00000000, 0x00000000, 0x00000000,
518 0x00000000, 0x00000000, 0x00000000, 0x00000000,
519 0x00000000, 0x00000000, 0x00000000, 0x00000000,
520 0x00000000, 0x00000000, 0x00000000, 0x00000000,
523 /* Packet types for packets with NAT_T ESP header */
524 static const u32 ice_ptypes_nat_t_esp[] = {
525 0x00000000, 0x00000000, 0x00000000, 0x00000000,
526 0x00000000, 0x00000030, 0x00000000, 0x00000000,
527 0x00000000, 0x00000000, 0x00000000, 0x00000000,
528 0x00000000, 0x00000000, 0x00000000, 0x00000000,
529 0x00000000, 0x00000000, 0x00000000, 0x00000000,
530 0x00000000, 0x00000000, 0x00000000, 0x00000000,
531 0x00000000, 0x00000000, 0x00000000, 0x00000000,
532 0x00000000, 0x00000000, 0x00000000, 0x00000000,
535 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
536 0x00000846, 0x00000000, 0x00000000, 0x00000000,
537 0x00000000, 0x00000000, 0x00000000, 0x00000000,
538 0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
539 0x00000000, 0x00000000, 0x00000000, 0x00000000,
540 0x00000000, 0x00000000, 0x00000000, 0x00000000,
541 0x00000000, 0x00000000, 0x00000000, 0x00000000,
542 0x00000000, 0x00000000, 0x00000000, 0x00000000,
543 0x00000000, 0x00000000, 0x00000000, 0x00000000,
546 /* Manage parameters and info. used during the creation of a flow profile */
547 struct ice_flow_prof_params {
549 u16 entry_length; /* # of bytes formatted entry will require */
551 struct ice_flow_prof *prof;
553 /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
554 * This will give us the direction flags.
556 struct ice_fv_word es[ICE_MAX_FV_WORDS];
557 /* attributes can be used to add attributes to a particular PTYPE */
558 const struct ice_ptype_attributes *attr;
561 u16 mask[ICE_MAX_FV_WORDS];
562 ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
565 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
566 (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
567 ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
568 ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
569 ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
570 ICE_FLOW_SEG_HDR_NAT_T_ESP)
572 #define ICE_FLOW_SEG_HDRS_L2_MASK \
573 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
574 #define ICE_FLOW_SEG_HDRS_L3_MASK \
575 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
576 ICE_FLOW_SEG_HDR_ARP)
577 #define ICE_FLOW_SEG_HDRS_L4_MASK \
578 (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
579 ICE_FLOW_SEG_HDR_SCTP)
582 * ice_flow_val_hdrs - validates packet segments for valid protocol headers
583 * @segs: array of one or more packet segments that describe the flow
584 * @segs_cnt: number of packet segments provided
586 static enum ice_status
587 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
591 for (i = 0; i < segs_cnt; i++) {
592 /* Multiple L3 headers */
593 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
594 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
595 return ICE_ERR_PARAM;
597 /* Multiple L4 headers */
598 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
599 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
600 return ICE_ERR_PARAM;
606 /* Sizes of fixed known protocol headers without header options */
607 #define ICE_FLOW_PROT_HDR_SZ_MAC 14
608 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
609 #define ICE_FLOW_PROT_HDR_SZ_IPV4 20
610 #define ICE_FLOW_PROT_HDR_SZ_IPV6 40
611 #define ICE_FLOW_PROT_HDR_SZ_ARP 28
612 #define ICE_FLOW_PROT_HDR_SZ_ICMP 8
613 #define ICE_FLOW_PROT_HDR_SZ_TCP 20
614 #define ICE_FLOW_PROT_HDR_SZ_UDP 8
615 #define ICE_FLOW_PROT_HDR_SZ_SCTP 12
618 * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
619 * @params: information about the flow to be processed
620 * @seg: index of packet segment whose header size is to be determined
622 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
627 sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
628 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
631 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
632 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
633 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
634 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
635 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
636 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
637 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
638 /* A L3 header is required if L4 is specified */
642 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
643 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
644 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
645 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
646 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
647 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
648 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
649 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
655 * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
656 * @params: information about the flow to be processed
658 * This function identifies the packet types associated with the protocol
659 * headers being present in packet segments of the specified flow profile.
661 static enum ice_status
662 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
664 struct ice_flow_prof *prof;
667 ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
672 for (i = 0; i < params->prof->segs_cnt; i++) {
673 const ice_bitmap_t *src;
676 hdrs = prof->segs[i].hdrs;
678 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
679 src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
680 (const ice_bitmap_t *)ice_ptypes_mac_il;
681 ice_and_bitmap(params->ptypes, params->ptypes, src,
685 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
686 src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
687 ice_and_bitmap(params->ptypes, params->ptypes, src,
691 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
692 ice_and_bitmap(params->ptypes, params->ptypes,
693 (const ice_bitmap_t *)ice_ptypes_arp_of,
697 if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
698 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
699 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
700 ice_and_bitmap(params->ptypes, params->ptypes, src,
702 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
703 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
704 ice_and_bitmap(params->ptypes,
707 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
708 ice_and_bitmap(params->ptypes, params->ptypes,
709 (const ice_bitmap_t *)
712 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
713 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
714 ice_and_bitmap(params->ptypes, params->ptypes,
715 src, ICE_FLOW_PTYPE_MAX);
717 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
718 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
719 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
720 ice_and_bitmap(params->ptypes, params->ptypes, src,
722 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
723 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
724 ice_and_bitmap(params->ptypes,
727 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
728 ice_and_bitmap(params->ptypes, params->ptypes,
729 (const ice_bitmap_t *)
732 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
733 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
734 ice_and_bitmap(params->ptypes, params->ptypes,
735 src, ICE_FLOW_PTYPE_MAX);
739 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
740 src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
741 ice_and_bitmap(params->ptypes, params->ptypes,
742 src, ICE_FLOW_PTYPE_MAX);
743 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
744 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
745 ice_and_bitmap(params->ptypes, params->ptypes, src,
749 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
750 src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
751 (const ice_bitmap_t *)ice_ptypes_icmp_il;
752 ice_and_bitmap(params->ptypes, params->ptypes, src,
754 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
756 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
757 ice_and_bitmap(params->ptypes, params->ptypes,
758 src, ICE_FLOW_PTYPE_MAX);
760 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
761 src = (const ice_bitmap_t *)ice_ptypes_gtpc;
762 ice_and_bitmap(params->ptypes, params->ptypes,
763 src, ICE_FLOW_PTYPE_MAX);
764 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
765 src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
766 ice_and_bitmap(params->ptypes, params->ptypes,
767 src, ICE_FLOW_PTYPE_MAX);
768 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
769 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
770 ice_and_bitmap(params->ptypes, params->ptypes,
771 src, ICE_FLOW_PTYPE_MAX);
773 /* Attributes for GTP packet with downlink */
774 params->attr = ice_attr_gtpu_down;
775 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
776 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
777 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
778 ice_and_bitmap(params->ptypes, params->ptypes,
779 src, ICE_FLOW_PTYPE_MAX);
781 /* Attributes for GTP packet with uplink */
782 params->attr = ice_attr_gtpu_up;
783 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
784 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
785 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
786 ice_and_bitmap(params->ptypes, params->ptypes,
787 src, ICE_FLOW_PTYPE_MAX);
789 /* Attributes for GTP packet with Extension Header */
790 params->attr = ice_attr_gtpu_eh;
791 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
792 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
793 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
794 ice_and_bitmap(params->ptypes, params->ptypes,
795 src, ICE_FLOW_PTYPE_MAX);
796 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
797 src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
798 ice_and_bitmap(params->ptypes, params->ptypes,
799 src, ICE_FLOW_PTYPE_MAX);
800 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
801 src = (const ice_bitmap_t *)ice_ptypes_esp;
802 ice_and_bitmap(params->ptypes, params->ptypes,
803 src, ICE_FLOW_PTYPE_MAX);
804 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
805 src = (const ice_bitmap_t *)ice_ptypes_ah;
806 ice_and_bitmap(params->ptypes, params->ptypes,
807 src, ICE_FLOW_PTYPE_MAX);
808 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
809 src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
810 ice_and_bitmap(params->ptypes, params->ptypes,
811 src, ICE_FLOW_PTYPE_MAX);
814 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
815 if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
817 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
820 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
822 ice_and_bitmap(params->ptypes, params->ptypes,
823 src, ICE_FLOW_PTYPE_MAX);
825 src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
826 ice_andnot_bitmap(params->ptypes, params->ptypes,
827 src, ICE_FLOW_PTYPE_MAX);
829 src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
830 ice_andnot_bitmap(params->ptypes, params->ptypes,
831 src, ICE_FLOW_PTYPE_MAX);
839 * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
840 * @hw: pointer to the HW struct
841 * @params: information about the flow to be processed
842 * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
844 * This function will allocate an extraction sequence entries for a DWORD size
845 * chunk of the packet flags.
847 static enum ice_status
848 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
849 struct ice_flow_prof_params *params,
850 enum ice_flex_mdid_pkt_flags flags)
852 u8 fv_words = hw->blk[params->blk].es.fvw;
855 /* Make sure the number of extraction sequence entries required does not
856 * exceed the block's capacity.
858 if (params->es_cnt >= fv_words)
859 return ICE_ERR_MAX_LIMIT;
861 /* some blocks require a reversed field vector layout */
862 if (hw->blk[params->blk].es.reverse)
863 idx = fv_words - params->es_cnt - 1;
865 idx = params->es_cnt;
867 params->es[idx].prot_id = ICE_PROT_META_ID;
868 params->es[idx].off = flags;
875 * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
876 * @hw: pointer to the HW struct
877 * @params: information about the flow to be processed
878 * @seg: packet segment index of the field to be extracted
879 * @fld: ID of field to be extracted
880 * @match: bitfield of all fields
882 * This function determines the protocol ID, offset, and size of the given
883 * field. It then allocates one or more extraction sequence entries for the
884 * given field, and fill the entries with protocol ID and offset information.
886 static enum ice_status
887 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
888 u8 seg, enum ice_flow_field fld, u64 match)
890 enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
891 enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
892 u8 fv_words = hw->blk[params->blk].es.fvw;
893 struct ice_flow_fld_info *flds;
894 u16 cnt, ese_bits, i;
900 flds = params->prof->segs[seg].fields;
903 case ICE_FLOW_FIELD_IDX_ETH_DA:
904 case ICE_FLOW_FIELD_IDX_ETH_SA:
905 case ICE_FLOW_FIELD_IDX_S_VLAN:
906 case ICE_FLOW_FIELD_IDX_C_VLAN:
907 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
909 case ICE_FLOW_FIELD_IDX_ETH_TYPE:
910 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
912 case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
913 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
915 case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
916 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
918 case ICE_FLOW_FIELD_IDX_IPV4_TTL:
919 case ICE_FLOW_FIELD_IDX_IPV4_PROT:
920 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
922 /* TTL and PROT share the same extraction seq. entry.
923 * Each is considered a sibling to the other in terms of sharing
924 * the same extraction sequence entry.
926 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
927 sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
928 else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
929 sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
931 /* If the sibling field is also included, that field's
932 * mask needs to be included.
934 if (match & BIT(sib))
935 sib_mask = ice_flds_info[sib].mask;
937 case ICE_FLOW_FIELD_IDX_IPV6_TTL:
938 case ICE_FLOW_FIELD_IDX_IPV6_PROT:
939 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
941 /* TTL and PROT share the same extraction seq. entry.
942 * Each is considered a sibling to the other in terms of sharing
943 * the same extraction sequence entry.
945 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
946 sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
947 else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
948 sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
950 /* If the sibling field is also included, that field's
951 * mask needs to be included.
953 if (match & BIT(sib))
954 sib_mask = ice_flds_info[sib].mask;
956 case ICE_FLOW_FIELD_IDX_IPV4_SA:
957 case ICE_FLOW_FIELD_IDX_IPV4_DA:
958 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
960 case ICE_FLOW_FIELD_IDX_IPV6_SA:
961 case ICE_FLOW_FIELD_IDX_IPV6_DA:
962 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
964 case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
965 case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
966 case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
967 prot_id = ICE_PROT_TCP_IL;
969 case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
970 case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
971 prot_id = ICE_PROT_UDP_IL_OR_S;
973 case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
974 case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
975 prot_id = ICE_PROT_SCTP_IL;
977 case ICE_FLOW_FIELD_IDX_GTPC_TEID:
978 case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
979 case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
980 case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
981 case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
982 case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
983 /* GTP is accessed through UDP OF protocol */
984 prot_id = ICE_PROT_UDP_OF;
986 case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
987 prot_id = ICE_PROT_PPPOE;
989 case ICE_FLOW_FIELD_IDX_PFCP_SEID:
990 prot_id = ICE_PROT_UDP_IL_OR_S;
992 case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
993 prot_id = ICE_PROT_L2TPV3;
995 case ICE_FLOW_FIELD_IDX_ESP_SPI:
996 prot_id = ICE_PROT_ESP_F;
998 case ICE_FLOW_FIELD_IDX_AH_SPI:
999 prot_id = ICE_PROT_ESP_2;
1001 case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1002 prot_id = ICE_PROT_UDP_IL_OR_S;
1004 case ICE_FLOW_FIELD_IDX_ARP_SIP:
1005 case ICE_FLOW_FIELD_IDX_ARP_DIP:
1006 case ICE_FLOW_FIELD_IDX_ARP_SHA:
1007 case ICE_FLOW_FIELD_IDX_ARP_DHA:
1008 case ICE_FLOW_FIELD_IDX_ARP_OP:
1009 prot_id = ICE_PROT_ARP_OF;
1011 case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1012 case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1013 /* ICMP type and code share the same extraction seq. entry */
1014 prot_id = (params->prof->segs[seg].hdrs &
1015 ICE_FLOW_SEG_HDR_IPV4) ?
1016 ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1017 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1018 ICE_FLOW_FIELD_IDX_ICMP_CODE :
1019 ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1021 case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1022 prot_id = ICE_PROT_GRE_OF;
1025 return ICE_ERR_NOT_IMPL;
1028 /* Each extraction sequence entry is a word in size, and extracts a
1029 * word-aligned offset from a protocol header.
1031 ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1033 flds[fld].xtrct.prot_id = prot_id;
1034 flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1035 ICE_FLOW_FV_EXTRACT_SZ;
1036 flds[fld].xtrct.disp = (u8)((ice_flds_info[fld].off + adj) % ese_bits);
1037 flds[fld].xtrct.idx = params->es_cnt;
1038 flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1040 /* Adjust the next field-entry index after accommodating the number of
1041 * entries this field consumes
1043 cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1044 ice_flds_info[fld].size, ese_bits);
1046 /* Fill in the extraction sequence entries needed for this field */
1047 off = flds[fld].xtrct.off;
1048 mask = flds[fld].xtrct.mask;
1049 for (i = 0; i < cnt; i++) {
1050 /* Only consume an extraction sequence entry if there is no
1051 * sibling field associated with this field or the sibling entry
1052 * already extracts the word shared with this field.
1054 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1055 flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1056 flds[sib].xtrct.off != off) {
1059 /* Make sure the number of extraction sequence required
1060 * does not exceed the block's capability
1062 if (params->es_cnt >= fv_words)
1063 return ICE_ERR_MAX_LIMIT;
1065 /* some blocks require a reversed field vector layout */
1066 if (hw->blk[params->blk].es.reverse)
1067 idx = fv_words - params->es_cnt - 1;
1069 idx = params->es_cnt;
1071 params->es[idx].prot_id = prot_id;
1072 params->es[idx].off = off;
1073 params->mask[idx] = mask | sib_mask;
1077 off += ICE_FLOW_FV_EXTRACT_SZ;
1084 * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1085 * @hw: pointer to the HW struct
1086 * @params: information about the flow to be processed
1087 * @seg: index of packet segment whose raw fields are to be be extracted
1089 static enum ice_status
1090 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1097 if (!params->prof->segs[seg].raws_cnt)
1100 if (params->prof->segs[seg].raws_cnt >
1101 ARRAY_SIZE(params->prof->segs[seg].raws))
1102 return ICE_ERR_MAX_LIMIT;
1104 /* Offsets within the segment headers are not supported */
1105 hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1107 return ICE_ERR_PARAM;
1109 fv_words = hw->blk[params->blk].es.fvw;
1111 for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1112 struct ice_flow_seg_fld_raw *raw;
1115 raw = ¶ms->prof->segs[seg].raws[i];
1117 /* Storing extraction information */
1118 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1119 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1120 ICE_FLOW_FV_EXTRACT_SZ;
1121 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1123 raw->info.xtrct.idx = params->es_cnt;
1125 /* Determine the number of field vector entries this raw field
1128 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1129 (raw->info.src.last * BITS_PER_BYTE),
1130 (ICE_FLOW_FV_EXTRACT_SZ *
1132 off = raw->info.xtrct.off;
1133 for (j = 0; j < cnt; j++) {
1136 /* Make sure the number of extraction sequence required
1137 * does not exceed the block's capability
1139 if (params->es_cnt >= hw->blk[params->blk].es.count ||
1140 params->es_cnt >= ICE_MAX_FV_WORDS)
1141 return ICE_ERR_MAX_LIMIT;
1143 /* some blocks require a reversed field vector layout */
1144 if (hw->blk[params->blk].es.reverse)
1145 idx = fv_words - params->es_cnt - 1;
1147 idx = params->es_cnt;
1149 params->es[idx].prot_id = raw->info.xtrct.prot_id;
1150 params->es[idx].off = off;
1152 off += ICE_FLOW_FV_EXTRACT_SZ;
1160 * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1161 * @hw: pointer to the HW struct
1162 * @params: information about the flow to be processed
1164 * This function iterates through all matched fields in the given segments, and
1165 * creates an extraction sequence for the fields.
1167 static enum ice_status
1168 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1169 struct ice_flow_prof_params *params)
1171 enum ice_status status = ICE_SUCCESS;
1174 /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1177 if (params->blk == ICE_BLK_ACL) {
1178 status = ice_flow_xtract_pkt_flags(hw, params,
1179 ICE_RX_MDID_PKT_FLAGS_15_0);
1184 for (i = 0; i < params->prof->segs_cnt; i++) {
1185 u64 match = params->prof->segs[i].match;
1186 enum ice_flow_field j;
1188 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1189 const u64 bit = BIT_ULL(j);
1192 status = ice_flow_xtract_fld(hw, params, i, j,
1200 /* Process raw matching bytes */
1201 status = ice_flow_xtract_raws(hw, params, i);
1210 * ice_flow_sel_acl_scen - returns the specific scenario
1211 * @hw: pointer to the hardware structure
1212 * @params: information about the flow to be processed
1214 * This function will return the specific scenario based on the
1215 * params passed to it
1217 static enum ice_status
1218 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1220 /* Find the best-fit scenario for the provided match width */
1221 struct ice_acl_scen *cand_scen = NULL, *scen;
1224 return ICE_ERR_DOES_NOT_EXIST;
1226 /* Loop through each scenario and match against the scenario width
1227 * to select the specific scenario
1229 LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1230 if (scen->eff_width >= params->entry_length &&
1231 (!cand_scen || cand_scen->eff_width > scen->eff_width))
1234 return ICE_ERR_DOES_NOT_EXIST;
1236 params->prof->cfg.scen = cand_scen;
1242 * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1243 * @params: information about the flow to be processed
1245 static enum ice_status
1246 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1248 u16 index, i, range_idx = 0;
1250 index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1252 for (i = 0; i < params->prof->segs_cnt; i++) {
1253 struct ice_flow_seg_info *seg = ¶ms->prof->segs[i];
1254 u64 match = seg->match;
1257 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1258 struct ice_flow_fld_info *fld;
1259 const u64 bit = BIT_ULL(j);
1264 fld = &seg->fields[j];
1265 fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1267 if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1268 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1270 /* Range checking only supported for single
1273 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1275 BITS_PER_BYTE * 2) > 1)
1276 return ICE_ERR_PARAM;
1278 /* Ranges must define low and high values */
1279 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1280 fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1281 return ICE_ERR_PARAM;
1283 fld->entry.val = range_idx++;
1285 /* Store adjusted byte-length of field for later
1286 * use, taking into account potential
1287 * non-byte-aligned displacement
1289 fld->entry.last = DIVIDE_AND_ROUND_UP
1290 (ice_flds_info[j].size +
1291 (fld->xtrct.disp % BITS_PER_BYTE),
1293 fld->entry.val = index;
1294 index += fld->entry.last;
1300 for (j = 0; j < seg->raws_cnt; j++) {
1301 struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1303 raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1304 raw->info.entry.val = index;
1305 raw->info.entry.last = raw->info.src.last;
1306 index += raw->info.entry.last;
1310 /* Currently only support using the byte selection base, which only
1311 * allows for an effective entry size of 30 bytes. Reject anything
1314 if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1315 return ICE_ERR_PARAM;
1317 /* Only 8 range checkers per profile, reject anything trying to use
1320 if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1321 return ICE_ERR_PARAM;
1323 /* Store # bytes required for entry for later use */
1324 params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1330 * ice_flow_proc_segs - process all packet segments associated with a profile
1331 * @hw: pointer to the HW struct
1332 * @params: information about the flow to be processed
1334 static enum ice_status
1335 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1337 enum ice_status status;
1339 status = ice_flow_proc_seg_hdrs(params);
1343 status = ice_flow_create_xtrct_seq(hw, params);
1347 switch (params->blk) {
1350 status = ICE_SUCCESS;
1353 status = ice_flow_acl_def_entry_frmt(params);
1356 status = ice_flow_sel_acl_scen(hw, params);
1362 return ICE_ERR_NOT_IMPL;
1368 #define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001
1369 #define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002
1370 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004
1373 * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1374 * @hw: pointer to the HW struct
1375 * @blk: classification stage
1376 * @dir: flow direction
1377 * @segs: array of one or more packet segments that describe the flow
1378 * @segs_cnt: number of packet segments provided
1379 * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1380 * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1382 static struct ice_flow_prof *
1383 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1384 enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1385 u8 segs_cnt, u16 vsi_handle, u32 conds)
1387 struct ice_flow_prof *p, *prof = NULL;
1389 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1390 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1391 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1392 segs_cnt && segs_cnt == p->segs_cnt) {
1395 /* Check for profile-VSI association if specified */
1396 if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1397 ice_is_vsi_valid(hw, vsi_handle) &&
1398 !ice_is_bit_set(p->vsis, vsi_handle))
1401 /* Protocol headers must be checked. Matched fields are
1402 * checked if specified.
1404 for (i = 0; i < segs_cnt; i++)
1405 if (segs[i].hdrs != p->segs[i].hdrs ||
1406 ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1407 segs[i].match != p->segs[i].match))
1410 /* A match is found if all segments are matched */
1411 if (i == segs_cnt) {
1416 ice_release_lock(&hw->fl_profs_locks[blk]);
1422 * ice_flow_find_prof - Look up a profile matching headers and matched fields
1423 * @hw: pointer to the HW struct
1424 * @blk: classification stage
1425 * @dir: flow direction
1426 * @segs: array of one or more packet segments that describe the flow
1427 * @segs_cnt: number of packet segments provided
1430 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1431 struct ice_flow_seg_info *segs, u8 segs_cnt)
1433 struct ice_flow_prof *p;
1435 p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1436 ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1438 return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1442 * ice_flow_find_prof_id - Look up a profile with given profile ID
1443 * @hw: pointer to the HW struct
1444 * @blk: classification stage
1445 * @prof_id: unique ID to identify this flow profile
1447 static struct ice_flow_prof *
1448 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1450 struct ice_flow_prof *p;
1452 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1453 if (p->id == prof_id)
1460 * ice_dealloc_flow_entry - Deallocate flow entry memory
1461 * @hw: pointer to the HW struct
1462 * @entry: flow entry to be removed
1465 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1471 ice_free(hw, entry->entry);
1473 if (entry->range_buf) {
1474 ice_free(hw, entry->range_buf);
1475 entry->range_buf = NULL;
1479 ice_free(hw, entry->acts);
1481 entry->acts_cnt = 0;
1484 ice_free(hw, entry);
1487 #define ICE_ACL_INVALID_SCEN 0x3f
1490 * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
1491 * @hw: pointer to the hardware structure
1492 * @prof: pointer to flow profile
1493 * @buf: destination buffer function writes partial extraction sequence to
1495 * returns ICE_SUCCESS if no PF is associated to the given profile
1496 * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
1497 * returns other error code for real error
1499 static enum ice_status
1500 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1501 struct ice_aqc_acl_prof_generic_frmt *buf)
1503 enum ice_status status;
1506 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1510 status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1514 /* If all PF's associated scenarios are all 0 or all
1515 * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1516 * not been configured yet.
1518 if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1519 buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1520 buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1521 buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1524 if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1525 buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1526 buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1527 buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1528 buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1529 buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1530 buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1531 buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1534 return ICE_ERR_IN_USE;
1538 * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
1539 * @hw: pointer to the hardware structure
1540 * @acts: array of actions to be performed on a match
1541 * @acts_cnt: number of actions
1543 static enum ice_status
1544 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1549 for (i = 0; i < acts_cnt; i++) {
1550 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1551 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1552 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1553 struct ice_acl_cntrs cntrs;
1554 enum ice_status status;
1556 cntrs.bank = 0; /* Only bank0 for the moment */
1558 LE16_TO_CPU(acts[i].data.acl_act.value);
1560 LE16_TO_CPU(acts[i].data.acl_act.value);
1562 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1563 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1565 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1567 status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1576 * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
1577 * @hw: pointer to the hardware structure
1578 * @prof: pointer to flow profile
1580 * Disassociate the scenario from the profile for the PF of the VSI.
1582 static enum ice_status
1583 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1585 struct ice_aqc_acl_prof_generic_frmt buf;
1586 enum ice_status status = ICE_SUCCESS;
1589 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1591 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1595 status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1599 /* Clear scenario for this PF */
1600 buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1601 status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1607 * ice_flow_rem_entry_sync - Remove a flow entry
1608 * @hw: pointer to the HW struct
1609 * @blk: classification stage
1610 * @entry: flow entry to be removed
1612 static enum ice_status
1613 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1614 struct ice_flow_entry *entry)
1617 return ICE_ERR_BAD_PTR;
1619 if (blk == ICE_BLK_ACL) {
1620 enum ice_status status;
1623 return ICE_ERR_BAD_PTR;
1625 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1626 entry->scen_entry_idx);
1630 /* Checks if we need to release an ACL counter. */
1631 if (entry->acts_cnt && entry->acts)
1632 ice_flow_acl_free_act_cntr(hw, entry->acts,
1636 LIST_DEL(&entry->l_entry);
1638 ice_dealloc_flow_entry(hw, entry);
1644 * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1645 * @hw: pointer to the HW struct
1646 * @blk: classification stage
1647 * @dir: flow direction
1648 * @prof_id: unique ID to identify this flow profile
1649 * @segs: array of one or more packet segments that describe the flow
1650 * @segs_cnt: number of packet segments provided
1651 * @acts: array of default actions
1652 * @acts_cnt: number of default actions
1653 * @prof: stores the returned flow profile added
1655 * Assumption: the caller has acquired the lock to the profile list
1657 static enum ice_status
1658 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1659 enum ice_flow_dir dir, u64 prof_id,
1660 struct ice_flow_seg_info *segs, u8 segs_cnt,
1661 struct ice_flow_action *acts, u8 acts_cnt,
1662 struct ice_flow_prof **prof)
1664 struct ice_flow_prof_params params;
1665 enum ice_status status;
1668 if (!prof || (acts_cnt && !acts))
1669 return ICE_ERR_BAD_PTR;
1671 ice_memset(¶ms, 0, sizeof(params), ICE_NONDMA_MEM);
1672 params.prof = (struct ice_flow_prof *)
1673 ice_malloc(hw, sizeof(*params.prof));
1675 return ICE_ERR_NO_MEMORY;
1677 /* initialize extraction sequence to all invalid (0xff) */
1678 for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1679 params.es[i].prot_id = ICE_PROT_INVALID;
1680 params.es[i].off = ICE_FV_OFFSET_INVAL;
1684 params.prof->id = prof_id;
1685 params.prof->dir = dir;
1686 params.prof->segs_cnt = segs_cnt;
1688 /* Make a copy of the segments that need to be persistent in the flow
1691 for (i = 0; i < segs_cnt; i++)
1692 ice_memcpy(¶ms.prof->segs[i], &segs[i], sizeof(*segs),
1693 ICE_NONDMA_TO_NONDMA);
1695 /* Make a copy of the actions that need to be persistent in the flow
1699 params.prof->acts = (struct ice_flow_action *)
1700 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
1701 ICE_NONDMA_TO_NONDMA);
1703 if (!params.prof->acts) {
1704 status = ICE_ERR_NO_MEMORY;
1709 status = ice_flow_proc_segs(hw, ¶ms);
1711 ice_debug(hw, ICE_DBG_FLOW,
1712 "Error processing a flow's packet segments\n");
1716 /* Add a HW profile for this flow profile */
1717 status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes,
1718 params.attr, params.attr_cnt, params.es,
1721 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
1725 INIT_LIST_HEAD(¶ms.prof->entries);
1726 ice_init_lock(¶ms.prof->entries_lock);
1727 *prof = params.prof;
1731 if (params.prof->acts)
1732 ice_free(hw, params.prof->acts);
1733 ice_free(hw, params.prof);
1740 * ice_flow_rem_prof_sync - remove a flow profile
1741 * @hw: pointer to the hardware structure
1742 * @blk: classification stage
1743 * @prof: pointer to flow profile to remove
1745 * Assumption: the caller has acquired the lock to the profile list
1747 static enum ice_status
1748 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
1749 struct ice_flow_prof *prof)
1751 enum ice_status status;
1753 /* Remove all remaining flow entries before removing the flow profile */
1754 if (!LIST_EMPTY(&prof->entries)) {
1755 struct ice_flow_entry *e, *t;
1757 ice_acquire_lock(&prof->entries_lock);
1759 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
1761 status = ice_flow_rem_entry_sync(hw, blk, e);
1766 ice_release_lock(&prof->entries_lock);
1769 if (blk == ICE_BLK_ACL) {
1770 struct ice_aqc_acl_profile_ranges query_rng_buf;
1771 struct ice_aqc_acl_prof_generic_frmt buf;
1774 /* Disassociate the scenario from the profile for the PF */
1775 status = ice_flow_acl_disassoc_scen(hw, prof);
1779 /* Clear the range-checker if the profile ID is no longer
1782 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1783 if (status && status != ICE_ERR_IN_USE) {
1785 } else if (!status) {
1786 /* Clear the range-checker value for profile ID */
1787 ice_memset(&query_rng_buf, 0,
1788 sizeof(struct ice_aqc_acl_profile_ranges),
1791 status = ice_flow_get_hw_prof(hw, blk, prof->id,
1796 status = ice_prog_acl_prof_ranges(hw, prof_id,
1797 &query_rng_buf, NULL);
1803 /* Remove all hardware profiles associated with this flow profile */
1804 status = ice_rem_prof(hw, blk, prof->id);
1806 LIST_DEL(&prof->l_entry);
1807 ice_destroy_lock(&prof->entries_lock);
1809 ice_free(hw, prof->acts);
1817 * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
1818 * @buf: Destination buffer function writes partial xtrct sequence to
1819 * @info: Info about field
1822 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
1823 struct ice_flow_fld_info *info)
1828 src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
1829 info->xtrct.disp / BITS_PER_BYTE;
1830 dst = info->entry.val;
1831 for (i = 0; i < info->entry.last; i++)
1832 /* HW stores field vector words in LE, convert words back to BE
1833 * so constructed entries will end up in network order
1835 buf->byte_selection[dst++] = src++ ^ 1;
1839 * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
1840 * @hw: pointer to the hardware structure
1841 * @prof: pointer to flow profile
1843 static enum ice_status
1844 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
1846 struct ice_aqc_acl_prof_generic_frmt buf;
1847 struct ice_flow_fld_info *info;
1848 enum ice_status status;
1852 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1854 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1858 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1859 if (status && status != ICE_ERR_IN_USE)
1863 /* Program the profile dependent configuration. This is done
1864 * only once regardless of the number of PFs using that profile
1866 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1868 for (i = 0; i < prof->segs_cnt; i++) {
1869 struct ice_flow_seg_info *seg = &prof->segs[i];
1870 u64 match = seg->match;
1873 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1874 const u64 bit = BIT_ULL(j);
1879 info = &seg->fields[j];
1881 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
1882 buf.word_selection[info->entry.val] =
1885 ice_flow_acl_set_xtrct_seq_fld(&buf,
1891 for (j = 0; j < seg->raws_cnt; j++) {
1892 info = &seg->raws[j].info;
1893 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
1897 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
1898 ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
1902 /* Update the current PF */
1903 buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
1904 status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1910 * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
1911 * @hw: pointer to the hardware structure
1912 * @blk: classification stage
1913 * @vsi_handle: software VSI handle
1914 * @vsig: target VSI group
1916 * Assumption: the caller has already verified that the VSI to
1917 * be added has the same characteristics as the VSIG and will
1918 * thereby have access to all resources added to that VSIG.
1921 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
1924 enum ice_status status;
1926 if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
1927 return ICE_ERR_PARAM;
1929 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1930 status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
1932 ice_release_lock(&hw->fl_profs_locks[blk]);
1938 * ice_flow_assoc_prof - associate a VSI with a flow profile
1939 * @hw: pointer to the hardware structure
1940 * @blk: classification stage
1941 * @prof: pointer to flow profile
1942 * @vsi_handle: software VSI handle
1944 * Assumption: the caller has acquired the lock to the profile list
1945 * and the software VSI handle has been validated
1947 static enum ice_status
1948 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
1949 struct ice_flow_prof *prof, u16 vsi_handle)
1951 enum ice_status status = ICE_SUCCESS;
1953 if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
1954 if (blk == ICE_BLK_ACL) {
1955 status = ice_flow_acl_set_xtrct_seq(hw, prof);
1959 status = ice_add_prof_id_flow(hw, blk,
1960 ice_get_hw_vsi_num(hw,
1964 ice_set_bit(vsi_handle, prof->vsis);
1966 ice_debug(hw, ICE_DBG_FLOW,
1967 "HW profile add failed, %d\n",
1975 * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
1976 * @hw: pointer to the hardware structure
1977 * @blk: classification stage
1978 * @prof: pointer to flow profile
1979 * @vsi_handle: software VSI handle
1981 * Assumption: the caller has acquired the lock to the profile list
1982 * and the software VSI handle has been validated
1984 static enum ice_status
1985 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
1986 struct ice_flow_prof *prof, u16 vsi_handle)
1988 enum ice_status status = ICE_SUCCESS;
1990 if (ice_is_bit_set(prof->vsis, vsi_handle)) {
1991 status = ice_rem_prof_id_flow(hw, blk,
1992 ice_get_hw_vsi_num(hw,
1996 ice_clear_bit(vsi_handle, prof->vsis);
1998 ice_debug(hw, ICE_DBG_FLOW,
1999 "HW profile remove failed, %d\n",
2007 * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2008 * @hw: pointer to the HW struct
2009 * @blk: classification stage
2010 * @dir: flow direction
2011 * @prof_id: unique ID to identify this flow profile
2012 * @segs: array of one or more packet segments that describe the flow
2013 * @segs_cnt: number of packet segments provided
2014 * @acts: array of default actions
2015 * @acts_cnt: number of default actions
2016 * @prof: stores the returned flow profile added
2019 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2020 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2021 struct ice_flow_action *acts, u8 acts_cnt,
2022 struct ice_flow_prof **prof)
2024 enum ice_status status;
2026 if (segs_cnt > ICE_FLOW_SEG_MAX)
2027 return ICE_ERR_MAX_LIMIT;
2030 return ICE_ERR_PARAM;
2033 return ICE_ERR_BAD_PTR;
2035 status = ice_flow_val_hdrs(segs, segs_cnt);
2039 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2041 status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2042 acts, acts_cnt, prof);
2044 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2046 ice_release_lock(&hw->fl_profs_locks[blk]);
2052 * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2053 * @hw: pointer to the HW struct
2054 * @blk: the block for which the flow profile is to be removed
2055 * @prof_id: unique ID of the flow profile to be removed
2058 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2060 struct ice_flow_prof *prof;
2061 enum ice_status status;
2063 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2065 prof = ice_flow_find_prof_id(hw, blk, prof_id);
2067 status = ICE_ERR_DOES_NOT_EXIST;
2071 /* prof becomes invalid after the call */
2072 status = ice_flow_rem_prof_sync(hw, blk, prof);
2075 ice_release_lock(&hw->fl_profs_locks[blk]);
2081 * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
2082 * @hw: pointer to the HW struct
2083 * @blk: classification stage
2084 * @prof_id: the profile ID handle
2085 * @hw_prof_id: pointer to variable to receive the HW profile ID
2088 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2091 struct ice_prof_map *map;
2093 map = ice_search_prof_id(hw, blk, prof_id);
2095 *hw_prof_id = map->prof_id;
2099 return ICE_ERR_DOES_NOT_EXIST;
2103 * ice_flow_find_entry - look for a flow entry using its unique ID
2104 * @hw: pointer to the HW struct
2105 * @blk: classification stage
2106 * @entry_id: unique ID to identify this flow entry
2108 * This function looks for the flow entry with the specified unique ID in all
2109 * flow profiles of the specified classification stage. If the entry is found,
2110 * and it returns the handle to the flow entry. Otherwise, it returns
2111 * ICE_FLOW_ENTRY_ID_INVAL.
2113 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2115 struct ice_flow_entry *found = NULL;
2116 struct ice_flow_prof *p;
2118 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2120 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2121 struct ice_flow_entry *e;
2123 ice_acquire_lock(&p->entries_lock);
2124 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2125 if (e->id == entry_id) {
2129 ice_release_lock(&p->entries_lock);
2135 ice_release_lock(&hw->fl_profs_locks[blk]);
2137 return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2141 * ice_flow_acl_check_actions - Checks the ACL rule's actions
2142 * @hw: pointer to the hardware structure
2143 * @acts: array of actions to be performed on a match
2144 * @acts_cnt: number of actions
2145 * @cnt_alloc: indicates if an ACL counter has been allocated.
2147 static enum ice_status
2148 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2149 u8 acts_cnt, bool *cnt_alloc)
2151 ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2154 ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2157 if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2158 return ICE_ERR_OUT_OF_RANGE;
2160 for (i = 0; i < acts_cnt; i++) {
2161 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2162 acts[i].type != ICE_FLOW_ACT_DROP &&
2163 acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2164 acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2167 /* If the caller want to add two actions of the same type, then
2168 * it is considered invalid configuration.
2170 if (ice_test_and_set_bit(acts[i].type, dup_check))
2171 return ICE_ERR_PARAM;
2174 /* Checks if ACL counters are needed. */
2175 for (i = 0; i < acts_cnt; i++) {
2176 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2177 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2178 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2179 struct ice_acl_cntrs cntrs;
2180 enum ice_status status;
2183 cntrs.bank = 0; /* Only bank0 for the moment */
2185 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2186 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2188 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2190 status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2193 /* Counter index within the bank */
2194 acts[i].data.acl_act.value =
2195 CPU_TO_LE16(cntrs.first_cntr);
2204 * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2205 * @fld: number of the given field
2206 * @info: info about field
2207 * @range_buf: range checker configuration buffer
2208 * @data: pointer to a data buffer containing flow entry's match values/masks
2209 * @range: Input/output param indicating which range checkers are being used
2212 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2213 struct ice_aqc_acl_profile_ranges *range_buf,
2214 u8 *data, u8 *range)
2218 /* If not specified, default mask is all bits in field */
2219 new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2220 BIT(ice_flds_info[fld].size) - 1 :
2221 (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2223 /* If the mask is 0, then we don't need to worry about this input
2224 * range checker value.
2228 (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2230 (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2231 u8 range_idx = info->entry.val;
2233 range_buf->checker_cfg[range_idx].low_boundary =
2234 CPU_TO_BE16(new_low);
2235 range_buf->checker_cfg[range_idx].high_boundary =
2236 CPU_TO_BE16(new_high);
2237 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2239 /* Indicate which range checker is being used */
2240 *range |= BIT(range_idx);
2245 * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2246 * @fld: number of the given field
2247 * @info: info about the field
2248 * @buf: buffer containing the entry
2249 * @dontcare: buffer containing don't care mask for entry
2250 * @data: pointer to a data buffer containing flow entry's match values/masks
2253 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2254 u8 *dontcare, u8 *data)
2256 u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2257 bool use_mask = false;
2260 src = info->src.val;
2261 mask = info->src.mask;
2262 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2263 disp = info->xtrct.disp % BITS_PER_BYTE;
2265 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2268 for (k = 0; k < info->entry.last; k++, dst++) {
2269 /* Add overflow bits from previous byte */
2270 buf[dst] = (tmp_s & 0xff00) >> 8;
2272 /* If mask is not valid, tmp_m is always zero, so just setting
2273 * dontcare to 0 (no masked bits). If mask is valid, pulls in
2274 * overflow bits of mask from prev byte
2276 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2278 /* If there is displacement, last byte will only contain
2279 * displaced data, but there is no more data to read from user
2280 * buffer, so skip so as not to potentially read beyond end of
2283 if (!disp || k < info->entry.last - 1) {
2284 /* Store shifted data to use in next byte */
2285 tmp_s = data[src++] << disp;
2287 /* Add current (shifted) byte */
2288 buf[dst] |= tmp_s & 0xff;
2290 /* Handle mask if valid */
2292 tmp_m = (~data[mask++] & 0xff) << disp;
2293 dontcare[dst] |= tmp_m & 0xff;
2298 /* Fill in don't care bits at beginning of field */
2300 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2301 for (k = 0; k < disp; k++)
2302 dontcare[dst] |= BIT(k);
2305 end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2307 /* Fill in don't care bits at end of field */
2309 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2310 info->entry.last - 1;
2311 for (k = end_disp; k < BITS_PER_BYTE; k++)
2312 dontcare[dst] |= BIT(k);
2317 * ice_flow_acl_frmt_entry - Format ACL entry
2318 * @hw: pointer to the hardware structure
2319 * @prof: pointer to flow profile
2320 * @e: pointer to the flow entry
2321 * @data: pointer to a data buffer containing flow entry's match values/masks
2322 * @acts: array of actions to be performed on a match
2323 * @acts_cnt: number of actions
2325 * Formats the key (and key_inverse) to be matched from the data passed in,
2326 * along with data from the flow profile. This key/key_inverse pair makes up
2327 * the 'entry' for an ACL flow entry.
2329 static enum ice_status
2330 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2331 struct ice_flow_entry *e, u8 *data,
2332 struct ice_flow_action *acts, u8 acts_cnt)
2334 u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2335 struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2336 enum ice_status status;
2341 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2345 /* Format the result action */
2347 status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2351 status = ICE_ERR_NO_MEMORY;
2353 e->acts = (struct ice_flow_action *)
2354 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2355 ICE_NONDMA_TO_NONDMA);
2360 e->acts_cnt = acts_cnt;
2362 /* Format the matching data */
2363 buf_sz = prof->cfg.scen->width;
2364 buf = (u8 *)ice_malloc(hw, buf_sz);
2368 dontcare = (u8 *)ice_malloc(hw, buf_sz);
2372 /* 'key' buffer will store both key and key_inverse, so must be twice
2375 key = (u8 *)ice_malloc(hw, buf_sz * 2);
2379 range_buf = (struct ice_aqc_acl_profile_ranges *)
2380 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2384 /* Set don't care mask to all 1's to start, will zero out used bytes */
2385 ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2387 for (i = 0; i < prof->segs_cnt; i++) {
2388 struct ice_flow_seg_info *seg = &prof->segs[i];
2389 u64 match = seg->match;
2392 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
2393 struct ice_flow_fld_info *info;
2394 const u64 bit = BIT_ULL(j);
2399 info = &seg->fields[j];
2401 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2402 ice_flow_acl_frmt_entry_range(j, info,
2406 ice_flow_acl_frmt_entry_fld(j, info, buf,
2412 for (j = 0; j < seg->raws_cnt; j++) {
2413 struct ice_flow_fld_info *info = &seg->raws[j].info;
2414 u16 dst, src, mask, k;
2415 bool use_mask = false;
2417 src = info->src.val;
2418 dst = info->entry.val -
2419 ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2420 mask = info->src.mask;
2422 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2425 for (k = 0; k < info->entry.last; k++, dst++) {
2426 buf[dst] = data[src++];
2428 dontcare[dst] = ~data[mask++];
2435 buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2436 dontcare[prof->cfg.scen->pid_idx] = 0;
2438 /* Format the buffer for direction flags */
2439 dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2441 if (prof->dir == ICE_FLOW_RX)
2442 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2445 buf[prof->cfg.scen->rng_chk_idx] = range;
2446 /* Mark any unused range checkers as don't care */
2447 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2448 e->range_buf = range_buf;
2450 ice_free(hw, range_buf);
2453 status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2459 e->entry_sz = buf_sz * 2;
2466 ice_free(hw, dontcare);
2471 if (status && range_buf) {
2472 ice_free(hw, range_buf);
2473 e->range_buf = NULL;
2476 if (status && e->acts) {
2477 ice_free(hw, e->acts);
2482 if (status && cnt_alloc)
2483 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2489 * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2490 * the compared data.
2491 * @prof: pointer to flow profile
2492 * @e: pointer to the comparing flow entry
2493 * @do_chg_action: decide if we want to change the ACL action
2494 * @do_add_entry: decide if we want to add the new ACL entry
2495 * @do_rem_entry: decide if we want to remove the current ACL entry
2497 * Find an ACL scenario entry that matches the compared data. In the same time,
2498 * this function also figure out:
2499 * a/ If we want to change the ACL action
2500 * b/ If we want to add the new ACL entry
2501 * c/ If we want to remove the current ACL entry
2503 static struct ice_flow_entry *
2504 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2505 struct ice_flow_entry *e, bool *do_chg_action,
2506 bool *do_add_entry, bool *do_rem_entry)
2508 struct ice_flow_entry *p, *return_entry = NULL;
2512 * a/ There exists an entry with same matching data, but different
2513 * priority, then we remove this existing ACL entry. Then, we
2514 * will add the new entry to the ACL scenario.
2515 * b/ There exists an entry with same matching data, priority, and
2516 * result action, then we do nothing
2517 * c/ There exists an entry with same matching data, priority, but
2518 * different, action, then do only change the action's entry.
2519 * d/ Else, we add this new entry to the ACL scenario.
2521 *do_chg_action = false;
2522 *do_add_entry = true;
2523 *do_rem_entry = false;
2524 LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2525 if (memcmp(p->entry, e->entry, p->entry_sz))
2528 /* From this point, we have the same matching_data. */
2529 *do_add_entry = false;
2532 if (p->priority != e->priority) {
2533 /* matching data && !priority */
2534 *do_add_entry = true;
2535 *do_rem_entry = true;
2539 /* From this point, we will have matching_data && priority */
2540 if (p->acts_cnt != e->acts_cnt)
2541 *do_chg_action = true;
2542 for (i = 0; i < p->acts_cnt; i++) {
2543 bool found_not_match = false;
2545 for (j = 0; j < e->acts_cnt; j++)
2546 if (memcmp(&p->acts[i], &e->acts[j],
2547 sizeof(struct ice_flow_action))) {
2548 found_not_match = true;
2552 if (found_not_match) {
2553 *do_chg_action = true;
2558 /* (do_chg_action = true) means :
2559 * matching_data && priority && !result_action
2560 * (do_chg_action = false) means :
2561 * matching_data && priority && result_action
2566 return return_entry;
2570 * ice_flow_acl_convert_to_acl_prior - Convert to ACL priority
2573 static enum ice_acl_entry_prior
2574 ice_flow_acl_convert_to_acl_prior(enum ice_flow_priority p)
2576 enum ice_acl_entry_prior acl_prior;
2579 case ICE_FLOW_PRIO_LOW:
2580 acl_prior = ICE_LOW;
2582 case ICE_FLOW_PRIO_NORMAL:
2583 acl_prior = ICE_NORMAL;
2585 case ICE_FLOW_PRIO_HIGH:
2586 acl_prior = ICE_HIGH;
2589 acl_prior = ICE_NORMAL;
2597 * ice_flow_acl_union_rng_chk - Perform union operation between two
2598 * range-range checker buffers
2599 * @dst_buf: pointer to destination range checker buffer
2600 * @src_buf: pointer to source range checker buffer
2602 * For this function, we do the union between dst_buf and src_buf
2603 * range checker buffer, and we will save the result back to dst_buf
2605 static enum ice_status
2606 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2607 struct ice_aqc_acl_profile_ranges *src_buf)
2611 if (!dst_buf || !src_buf)
2612 return ICE_ERR_BAD_PTR;
2614 for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2615 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2616 bool will_populate = false;
2618 in_data = &src_buf->checker_cfg[i];
2623 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2624 cfg_data = &dst_buf->checker_cfg[j];
2626 if (!cfg_data->mask ||
2627 !memcmp(cfg_data, in_data,
2628 sizeof(struct ice_acl_rng_data))) {
2629 will_populate = true;
2634 if (will_populate) {
2635 ice_memcpy(cfg_data, in_data,
2636 sizeof(struct ice_acl_rng_data),
2637 ICE_NONDMA_TO_NONDMA);
2639 /* No available slot left to program range checker */
2640 return ICE_ERR_MAX_LIMIT;
2648 * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2649 * @hw: pointer to the hardware structure
2650 * @prof: pointer to flow profile
2651 * @entry: double pointer to the flow entry
2653 * For this function, we will look at the current added entries in the
2654 * corresponding ACL scenario. Then, we will perform matching logic to
2655 * see if we want to add/modify/do nothing with this new entry.
2657 static enum ice_status
2658 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2659 struct ice_flow_entry **entry)
2661 bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2662 struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2663 struct ice_acl_act_entry *acts = NULL;
2664 struct ice_flow_entry *exist;
2665 enum ice_status status = ICE_SUCCESS;
2666 struct ice_flow_entry *e;
2669 if (!entry || !(*entry) || !prof)
2670 return ICE_ERR_BAD_PTR;
2674 do_chg_rng_chk = false;
2678 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2683 /* Query the current range-checker value in FW */
2684 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2688 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2689 sizeof(struct ice_aqc_acl_profile_ranges),
2690 ICE_NONDMA_TO_NONDMA);
2692 /* Generate the new range-checker value */
2693 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2697 /* Reconfigure the range check if the buffer is changed. */
2698 do_chg_rng_chk = false;
2699 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2700 sizeof(struct ice_aqc_acl_profile_ranges))) {
2701 status = ice_prog_acl_prof_ranges(hw, prof_id,
2702 &cfg_rng_buf, NULL);
2706 do_chg_rng_chk = true;
2710 /* Figure out if we want to (change the ACL action) and/or
2711 * (Add the new ACL entry) and/or (Remove the current ACL entry)
2713 exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
2714 &do_add_entry, &do_rem_entry);
2717 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
2722 /* Prepare the result action buffer */
2723 acts = (struct ice_acl_act_entry *)ice_calloc
2724 (hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
2725 for (i = 0; i < e->acts_cnt; i++)
2726 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
2727 sizeof(struct ice_acl_act_entry),
2728 ICE_NONDMA_TO_NONDMA);
2731 enum ice_acl_entry_prior prior;
2735 keys = (u8 *)e->entry;
2736 inverts = keys + (e->entry_sz / 2);
2737 prior = ice_flow_acl_convert_to_acl_prior(e->priority);
2739 status = ice_acl_add_entry(hw, prof->cfg.scen, prior, keys,
2740 inverts, acts, e->acts_cnt,
2745 e->scen_entry_idx = entry_idx;
2746 LIST_ADD(&e->l_entry, &prof->entries);
2748 if (do_chg_action) {
2749 /* For the action memory info, update the SW's copy of
2750 * exist entry with e's action memory info
2752 ice_free(hw, exist->acts);
2753 exist->acts_cnt = e->acts_cnt;
2754 exist->acts = (struct ice_flow_action *)
2755 ice_calloc(hw, exist->acts_cnt,
2756 sizeof(struct ice_flow_action));
2759 status = ICE_ERR_NO_MEMORY;
2763 ice_memcpy(exist->acts, e->acts,
2764 sizeof(struct ice_flow_action) * e->acts_cnt,
2765 ICE_NONDMA_TO_NONDMA);
2767 status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
2769 exist->scen_entry_idx);
2774 if (do_chg_rng_chk) {
2775 /* In this case, we want to update the range checker
2776 * information of the exist entry
2778 status = ice_flow_acl_union_rng_chk(exist->range_buf,
2784 /* As we don't add the new entry to our SW DB, deallocate its
2785 * memories, and return the exist entry to the caller
2787 ice_dealloc_flow_entry(hw, e);
2798 * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
2799 * @hw: pointer to the hardware structure
2800 * @prof: pointer to flow profile
2801 * @e: double pointer to the flow entry
2803 static enum ice_status
2804 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2805 struct ice_flow_entry **e)
2807 enum ice_status status;
2809 ice_acquire_lock(&prof->entries_lock);
2810 status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
2811 ice_release_lock(&prof->entries_lock);
2817 * ice_flow_add_entry - Add a flow entry
2818 * @hw: pointer to the HW struct
2819 * @blk: classification stage
2820 * @prof_id: ID of the profile to add a new flow entry to
2821 * @entry_id: unique ID to identify this flow entry
2822 * @vsi_handle: software VSI handle for the flow entry
2823 * @prio: priority of the flow entry
2824 * @data: pointer to a data buffer containing flow entry's match values/masks
2825 * @acts: arrays of actions to be performed on a match
2826 * @acts_cnt: number of actions
2827 * @entry_h: pointer to buffer that receives the new flow entry's handle
2830 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2831 u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
2832 void *data, struct ice_flow_action *acts, u8 acts_cnt,
2835 struct ice_flow_entry *e = NULL;
2836 struct ice_flow_prof *prof;
2837 enum ice_status status = ICE_SUCCESS;
2839 /* ACL entries must indicate an action */
2840 if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
2841 return ICE_ERR_PARAM;
2843 /* No flow entry data is expected for RSS */
2844 if (!entry_h || (!data && blk != ICE_BLK_RSS))
2845 return ICE_ERR_BAD_PTR;
2847 if (!ice_is_vsi_valid(hw, vsi_handle))
2848 return ICE_ERR_PARAM;
2850 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2852 prof = ice_flow_find_prof_id(hw, blk, prof_id);
2854 status = ICE_ERR_DOES_NOT_EXIST;
2856 /* Allocate memory for the entry being added and associate
2857 * the VSI to the found flow profile
2859 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
2861 status = ICE_ERR_NO_MEMORY;
2863 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
2866 ice_release_lock(&hw->fl_profs_locks[blk]);
2871 e->vsi_handle = vsi_handle;
2880 /* ACL will handle the entry management */
2881 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
2886 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
2894 status = ICE_ERR_NOT_IMPL;
2898 if (blk != ICE_BLK_ACL) {
2899 /* ACL will handle the entry management */
2900 ice_acquire_lock(&prof->entries_lock);
2901 LIST_ADD(&e->l_entry, &prof->entries);
2902 ice_release_lock(&prof->entries_lock);
2905 *entry_h = ICE_FLOW_ENTRY_HNDL(e);
2910 ice_free(hw, e->entry);
2918 * ice_flow_rem_entry - Remove a flow entry
2919 * @hw: pointer to the HW struct
2920 * @blk: classification stage
2921 * @entry_h: handle to the flow entry to be removed
2923 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
2926 struct ice_flow_entry *entry;
2927 struct ice_flow_prof *prof;
2928 enum ice_status status = ICE_SUCCESS;
2930 if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
2931 return ICE_ERR_PARAM;
2933 entry = ICE_FLOW_ENTRY_PTR((unsigned long)entry_h);
2935 /* Retain the pointer to the flow profile as the entry will be freed */
2939 ice_acquire_lock(&prof->entries_lock);
2940 status = ice_flow_rem_entry_sync(hw, blk, entry);
2941 ice_release_lock(&prof->entries_lock);
2948 * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
2949 * @seg: packet segment the field being set belongs to
2950 * @fld: field to be set
2951 * @field_type: type of the field
2952 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
2953 * entry's input buffer
2954 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
2956 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
2957 * entry's input buffer
2959 * This helper function stores information of a field being matched, including
2960 * the type of the field and the locations of the value to match, the mask, and
2961 * and the upper-bound value in the start of the input buffer for a flow entry.
2962 * This function should only be used for fixed-size data structures.
2964 * This function also opportunistically determines the protocol headers to be
2965 * present based on the fields being set. Some fields cannot be used alone to
2966 * determine the protocol headers present. Sometimes, fields for particular
2967 * protocol headers are not matched. In those cases, the protocol headers
2968 * must be explicitly set.
2971 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
2972 enum ice_flow_fld_match_type field_type, u16 val_loc,
2973 u16 mask_loc, u16 last_loc)
2975 u64 bit = BIT_ULL(fld);
2978 if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
2981 seg->fields[fld].type = field_type;
2982 seg->fields[fld].src.val = val_loc;
2983 seg->fields[fld].src.mask = mask_loc;
2984 seg->fields[fld].src.last = last_loc;
2986 ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
2990 * ice_flow_set_fld - specifies locations of field from entry's input buffer
2991 * @seg: packet segment the field being set belongs to
2992 * @fld: field to be set
2993 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
2994 * entry's input buffer
2995 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
2997 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
2998 * entry's input buffer
2999 * @range: indicate if field being matched is to be in a range
3001 * This function specifies the locations, in the form of byte offsets from the
3002 * start of the input buffer for a flow entry, from where the value to match,
3003 * the mask value, and upper value can be extracted. These locations are then
3004 * stored in the flow profile. When adding a flow entry associated with the
3005 * flow profile, these locations will be used to quickly extract the values and
3006 * create the content of a match entry. This function should only be used for
3007 * fixed-size data structures.
3010 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3011 u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3013 enum ice_flow_fld_match_type t = range ?
3014 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3016 ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3020 * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3021 * @seg: packet segment the field being set belongs to
3022 * @fld: field to be set
3023 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3024 * entry's input buffer
3025 * @pref_loc: location of prefix value from entry's input buffer
3026 * @pref_sz: size of the location holding the prefix value
3028 * This function specifies the locations, in the form of byte offsets from the
3029 * start of the input buffer for a flow entry, from where the value to match
3030 * and the IPv4 prefix value can be extracted. These locations are then stored
3031 * in the flow profile. When adding flow entries to the associated flow profile,
3032 * these locations can be used to quickly extract the values to create the
3033 * content of a match entry. This function should only be used for fixed-size
3037 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3038 u16 val_loc, u16 pref_loc, u8 pref_sz)
3040 /* For this type of field, the "mask" location is for the prefix value's
3041 * location and the "last" location is for the size of the location of
3044 ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3045 pref_loc, (u16)pref_sz);
3049 * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3050 * @seg: packet segment the field being set belongs to
3051 * @off: offset of the raw field from the beginning of the segment in bytes
3052 * @len: length of the raw pattern to be matched
3053 * @val_loc: location of the value to match from entry's input buffer
3054 * @mask_loc: location of mask value from entry's input buffer
3056 * This function specifies the offset of the raw field to be match from the
3057 * beginning of the specified packet segment, and the locations, in the form of
3058 * byte offsets from the start of the input buffer for a flow entry, from where
3059 * the value to match and the mask value to be extracted. These locations are
3060 * then stored in the flow profile. When adding flow entries to the associated
3061 * flow profile, these locations can be used to quickly extract the values to
3062 * create the content of a match entry. This function should only be used for
3063 * fixed-size data structures.
3066 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3067 u16 val_loc, u16 mask_loc)
3069 if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3070 seg->raws[seg->raws_cnt].off = off;
3071 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3072 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3073 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3074 /* The "last" field is used to store the length of the field */
3075 seg->raws[seg->raws_cnt].info.src.last = len;
3078 /* Overflows of "raws" will be handled as an error condition later in
3079 * the flow when this information is processed.
3084 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3085 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
3087 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3088 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3090 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3091 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
3092 ICE_FLOW_SEG_HDR_SCTP)
3094 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3095 (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3096 ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3097 ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3100 * ice_flow_set_rss_seg_info - setup packet segments for RSS
3101 * @segs: pointer to the flow field segment(s)
3102 * @hash_fields: fields to be hashed on for the segment(s)
3103 * @flow_hdr: protocol header fields within a packet segment
3105 * Helper function to extract fields from hash bitmap and use flow
3106 * header value to set flow field segment for further use in flow
3107 * profile entry or removal.
3109 static enum ice_status
3110 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
3113 u64 val = hash_fields;
3116 for (i = 0; val && i < ICE_FLOW_FIELD_IDX_MAX; i++) {
3117 u64 bit = BIT_ULL(i);
3120 ice_flow_set_fld(segs, (enum ice_flow_field)i,
3121 ICE_FLOW_FLD_OFF_INVAL,
3122 ICE_FLOW_FLD_OFF_INVAL,
3123 ICE_FLOW_FLD_OFF_INVAL, false);
3127 ICE_FLOW_SET_HDRS(segs, flow_hdr);
3129 if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3130 ~ICE_FLOW_RSS_HDRS_INNER_MASK)
3131 return ICE_ERR_PARAM;
3133 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3134 if (val && !ice_is_pow2(val))
3137 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3138 if (val && !ice_is_pow2(val))
3145 * ice_rem_vsi_rss_list - remove VSI from RSS list
3146 * @hw: pointer to the hardware structure
3147 * @vsi_handle: software VSI handle
3149 * Remove the VSI from all RSS configurations in the list.
3151 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3153 struct ice_rss_cfg *r, *tmp;
3155 if (LIST_EMPTY(&hw->rss_list_head))
3158 ice_acquire_lock(&hw->rss_locks);
3159 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3160 ice_rss_cfg, l_entry)
3161 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3162 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3163 LIST_DEL(&r->l_entry);
3166 ice_release_lock(&hw->rss_locks);
3170 * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3171 * @hw: pointer to the hardware structure
3172 * @vsi_handle: software VSI handle
3174 * This function will iterate through all flow profiles and disassociate
3175 * the VSI from that profile. If the flow profile has no VSIs it will
3178 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3180 const enum ice_block blk = ICE_BLK_RSS;
3181 struct ice_flow_prof *p, *t;
3182 enum ice_status status = ICE_SUCCESS;
3184 if (!ice_is_vsi_valid(hw, vsi_handle))
3185 return ICE_ERR_PARAM;
3187 if (LIST_EMPTY(&hw->fl_profs[blk]))
3190 ice_acquire_lock(&hw->fl_profs_locks[blk]);
3191 LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3193 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3194 status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3198 if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3199 status = ice_flow_rem_prof_sync(hw, blk, p);
3204 ice_release_lock(&hw->fl_profs_locks[blk]);
3210 * ice_rem_rss_list - remove RSS configuration from list
3211 * @hw: pointer to the hardware structure
3212 * @vsi_handle: software VSI handle
3213 * @prof: pointer to flow profile
3215 * Assumption: lock has already been acquired for RSS list
3218 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3220 struct ice_rss_cfg *r, *tmp;
3222 /* Search for RSS hash fields associated to the VSI that match the
3223 * hash configurations associated to the flow profile. If found
3224 * remove from the RSS entry list of the VSI context and delete entry.
3226 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3227 ice_rss_cfg, l_entry)
3228 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3229 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3230 ice_clear_bit(vsi_handle, r->vsis);
3231 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3232 LIST_DEL(&r->l_entry);
3240 * ice_add_rss_list - add RSS configuration to list
3241 * @hw: pointer to the hardware structure
3242 * @vsi_handle: software VSI handle
3243 * @prof: pointer to flow profile
3245 * Assumption: lock has already been acquired for RSS list
3247 static enum ice_status
3248 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3250 struct ice_rss_cfg *r, *rss_cfg;
3252 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3253 ice_rss_cfg, l_entry)
3254 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3255 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3256 ice_set_bit(vsi_handle, r->vsis);
3260 rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3262 return ICE_ERR_NO_MEMORY;
3264 rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
3265 rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
3266 rss_cfg->symm = prof->cfg.symm;
3267 ice_set_bit(vsi_handle, rss_cfg->vsis);
3269 LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3274 #define ICE_FLOW_PROF_HASH_S 0
3275 #define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3276 #define ICE_FLOW_PROF_HDR_S 32
3277 #define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3278 #define ICE_FLOW_PROF_ENCAP_S 63
3279 #define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
3281 #define ICE_RSS_OUTER_HEADERS 1
3282 #define ICE_RSS_INNER_HEADERS 2
3284 /* Flow profile ID format:
3285 * [0:31] - Packet match fields
3286 * [32:62] - Protocol header
3287 * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
3289 #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
3290 (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3291 (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3292 ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
3295 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3297 u32 s = ((src % 4) << 3); /* byte shift */
3298 u32 v = dst | 0x80; /* value to program */
3299 u8 i = src / 4; /* register index */
3302 reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3303 reg = (reg & ~(0xff << s)) | (v << s);
3304 wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3308 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3311 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3314 for (i = 0; i < len; i++) {
3315 ice_rss_config_xor_word(hw, prof_id,
3316 /* Yes, field vector in GLQF_HSYMM and
3317 * GLQF_HINSET is inversed!
3319 fv_last_word - (src + i),
3320 fv_last_word - (dst + i));
3321 ice_rss_config_xor_word(hw, prof_id,
3322 fv_last_word - (dst + i),
3323 fv_last_word - (src + i));
3328 ice_rss_update_symm(struct ice_hw *hw,
3329 struct ice_flow_prof *prof)
3331 struct ice_prof_map *map;
3334 map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3335 prof_id = map->prof_id;
3337 /* clear to default */
3338 for (m = 0; m < 6; m++)
3339 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3340 if (prof->cfg.symm) {
3341 struct ice_flow_seg_info *seg =
3342 &prof->segs[prof->segs_cnt - 1];
3344 struct ice_flow_seg_xtrct *ipv4_src =
3345 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3346 struct ice_flow_seg_xtrct *ipv4_dst =
3347 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3348 struct ice_flow_seg_xtrct *ipv6_src =
3349 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3350 struct ice_flow_seg_xtrct *ipv6_dst =
3351 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3353 struct ice_flow_seg_xtrct *tcp_src =
3354 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3355 struct ice_flow_seg_xtrct *tcp_dst =
3356 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3358 struct ice_flow_seg_xtrct *udp_src =
3359 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3360 struct ice_flow_seg_xtrct *udp_dst =
3361 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3363 struct ice_flow_seg_xtrct *sctp_src =
3364 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3365 struct ice_flow_seg_xtrct *sctp_dst =
3366 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3369 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3370 ice_rss_config_xor(hw, prof_id,
3371 ipv4_src->idx, ipv4_dst->idx, 2);
3374 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3375 ice_rss_config_xor(hw, prof_id,
3376 ipv6_src->idx, ipv6_dst->idx, 8);
3379 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3380 ice_rss_config_xor(hw, prof_id,
3381 tcp_src->idx, tcp_dst->idx, 1);
3384 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3385 ice_rss_config_xor(hw, prof_id,
3386 udp_src->idx, udp_dst->idx, 1);
3389 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3390 ice_rss_config_xor(hw, prof_id,
3391 sctp_src->idx, sctp_dst->idx, 1);
3396 * ice_add_rss_cfg_sync - add an RSS configuration
3397 * @hw: pointer to the hardware structure
3398 * @vsi_handle: software VSI handle
3399 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3400 * @addl_hdrs: protocol header fields
3401 * @segs_cnt: packet segment count
3402 * @symm: symmetric hash enable/disable
3404 * Assumption: lock has already been acquired for RSS list
3406 static enum ice_status
3407 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3408 u32 addl_hdrs, u8 segs_cnt, bool symm)
3410 const enum ice_block blk = ICE_BLK_RSS;
3411 struct ice_flow_prof *prof = NULL;
3412 struct ice_flow_seg_info *segs;
3413 enum ice_status status;
3415 if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
3416 return ICE_ERR_PARAM;
3418 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3421 return ICE_ERR_NO_MEMORY;
3423 /* Construct the packet segment info from the hashed fields */
3424 status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3429 /* Search for a flow profile that has matching headers, hash fields
3430 * and has the input VSI associated to it. If found, no further
3431 * operations required and exit.
3433 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3435 ICE_FLOW_FIND_PROF_CHK_FLDS |
3436 ICE_FLOW_FIND_PROF_CHK_VSI);
3438 if (prof->cfg.symm == symm)
3440 prof->cfg.symm = symm;
3444 /* Check if a flow profile exists with the same protocol headers and
3445 * associated with the input VSI. If so disassociate the VSI from
3446 * this profile. The VSI will be added to a new profile created with
3447 * the protocol header and new hash field configuration.
3449 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3450 vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3452 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3454 ice_rem_rss_list(hw, vsi_handle, prof);
3458 /* Remove profile if it has no VSIs associated */
3459 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3460 status = ice_flow_rem_prof(hw, blk, prof->id);
3466 /* Search for a profile that has same match fields only. If this
3467 * exists then associate the VSI to this profile.
3469 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3471 ICE_FLOW_FIND_PROF_CHK_FLDS);
3473 if (prof->cfg.symm == symm) {
3474 status = ice_flow_assoc_prof(hw, blk, prof,
3477 status = ice_add_rss_list(hw, vsi_handle,
3480 /* if a profile exist but with different symmetric
3481 * requirement, just return error.
3483 status = ICE_ERR_NOT_SUPPORTED;
3488 /* Create a new flow profile with generated profile and packet
3489 * segment information.
3491 status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3492 ICE_FLOW_GEN_PROFID(hashed_flds,
3493 segs[segs_cnt - 1].hdrs,
3495 segs, segs_cnt, NULL, 0, &prof);
3499 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3500 /* If association to a new flow profile failed then this profile can
3504 ice_flow_rem_prof(hw, blk, prof->id);
3508 status = ice_add_rss_list(hw, vsi_handle, prof);
3510 prof->cfg.symm = symm;
3513 ice_rss_update_symm(hw, prof);
3521 * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3522 * @hw: pointer to the hardware structure
3523 * @vsi_handle: software VSI handle
3524 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3525 * @addl_hdrs: protocol header fields
3526 * @symm: symmetric hash enable/disable
3528 * This function will generate a flow profile based on fields associated with
3529 * the input fields to hash on, the flow type and use the VSI number to add
3530 * a flow entry to the profile.
3533 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3534 u32 addl_hdrs, bool symm)
3536 enum ice_status status;
3538 if (hashed_flds == ICE_HASH_INVALID ||
3539 !ice_is_vsi_valid(hw, vsi_handle))
3540 return ICE_ERR_PARAM;
3542 ice_acquire_lock(&hw->rss_locks);
3543 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3544 ICE_RSS_OUTER_HEADERS, symm);
3546 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3547 addl_hdrs, ICE_RSS_INNER_HEADERS,
3549 ice_release_lock(&hw->rss_locks);
3555 * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3556 * @hw: pointer to the hardware structure
3557 * @vsi_handle: software VSI handle
3558 * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3559 * @addl_hdrs: Protocol header fields within a packet segment
3560 * @segs_cnt: packet segment count
3562 * Assumption: lock has already been acquired for RSS list
3564 static enum ice_status
3565 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3566 u32 addl_hdrs, u8 segs_cnt)
3568 const enum ice_block blk = ICE_BLK_RSS;
3569 struct ice_flow_seg_info *segs;
3570 struct ice_flow_prof *prof;
3571 enum ice_status status;
3573 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3576 return ICE_ERR_NO_MEMORY;
3578 /* Construct the packet segment info from the hashed fields */
3579 status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3584 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3586 ICE_FLOW_FIND_PROF_CHK_FLDS);
3588 status = ICE_ERR_DOES_NOT_EXIST;
3592 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3596 /* Remove RSS configuration from VSI context before deleting
3599 ice_rem_rss_list(hw, vsi_handle, prof);
3601 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
3602 status = ice_flow_rem_prof(hw, blk, prof->id);
3610 * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
3611 * @hw: pointer to the hardware structure
3612 * @vsi_handle: software VSI handle
3613 * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3614 * @addl_hdrs: Protocol header fields within a packet segment
3616 * This function will lookup the flow profile based on the input
3617 * hash field bitmap, iterate through the profile entry list of
3618 * that profile and find entry associated with input VSI to be
3619 * removed. Calls are made to underlying flow apis which will in
3620 * turn build or update buffers for RSS XLT1 section.
3623 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3626 enum ice_status status;
3628 if (hashed_flds == ICE_HASH_INVALID ||
3629 !ice_is_vsi_valid(hw, vsi_handle))
3630 return ICE_ERR_PARAM;
3632 ice_acquire_lock(&hw->rss_locks);
3633 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3634 ICE_RSS_OUTER_HEADERS);
3636 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3637 addl_hdrs, ICE_RSS_INNER_HEADERS);
3638 ice_release_lock(&hw->rss_locks);
3644 * ice_replay_rss_cfg - replay RSS configurations associated with VSI
3645 * @hw: pointer to the hardware structure
3646 * @vsi_handle: software VSI handle
3648 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3650 enum ice_status status = ICE_SUCCESS;
3651 struct ice_rss_cfg *r;
3653 if (!ice_is_vsi_valid(hw, vsi_handle))
3654 return ICE_ERR_PARAM;
3656 ice_acquire_lock(&hw->rss_locks);
3657 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3658 ice_rss_cfg, l_entry) {
3659 if (ice_is_bit_set(r->vsis, vsi_handle)) {
3660 status = ice_add_rss_cfg_sync(hw, vsi_handle,
3663 ICE_RSS_OUTER_HEADERS,
3667 status = ice_add_rss_cfg_sync(hw, vsi_handle,
3670 ICE_RSS_INNER_HEADERS,
3676 ice_release_lock(&hw->rss_locks);
3682 * ice_get_rss_cfg - returns hashed fields for the given header types
3683 * @hw: pointer to the hardware structure
3684 * @vsi_handle: software VSI handle
3685 * @hdrs: protocol header type
3687 * This function will return the match fields of the first instance of flow
3688 * profile having the given header types and containing input VSI
3690 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
3692 struct ice_rss_cfg *r, *rss_cfg = NULL;
3694 /* verify if the protocol header is non zero and VSI is valid */
3695 if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
3696 return ICE_HASH_INVALID;
3698 ice_acquire_lock(&hw->rss_locks);
3699 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3700 ice_rss_cfg, l_entry)
3701 if (ice_is_bit_set(r->vsis, vsi_handle) &&
3702 r->packet_hdr == hdrs) {
3706 ice_release_lock(&hw->rss_locks);
3708 return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID;