1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020
5 #include "ice_common.h"
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE 2
10 #define ICE_FLOW_FLD_SZ_VLAN 2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR 4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR 16
13 #define ICE_FLOW_FLD_SZ_IP_DSCP 1
14 #define ICE_FLOW_FLD_SZ_IP_TTL 1
15 #define ICE_FLOW_FLD_SZ_IP_PROT 1
16 #define ICE_FLOW_FLD_SZ_PORT 2
17 #define ICE_FLOW_FLD_SZ_TCP_FLAGS 1
18 #define ICE_FLOW_FLD_SZ_ICMP_TYPE 1
19 #define ICE_FLOW_FLD_SZ_ICMP_CODE 1
20 #define ICE_FLOW_FLD_SZ_ARP_OPER 2
21 #define ICE_FLOW_FLD_SZ_GRE_KEYID 4
22 #define ICE_FLOW_FLD_SZ_GTP_TEID 4
23 #define ICE_FLOW_FLD_SZ_GTP_QFI 2
24 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID 2
26 /* Describe properties of a protocol header field */
27 struct ice_flow_field_info {
28 enum ice_flow_seg_hdr hdr;
29 s16 off; /* Offset from start of a protocol header, in bits */
30 u16 size; /* Size of fields in bits */
31 u16 mask; /* 16-bit mask for field */
34 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
36 .off = (_offset_bytes) * BITS_PER_BYTE, \
37 .size = (_size_bytes) * BITS_PER_BYTE, \
41 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
43 .off = (_offset_bytes) * BITS_PER_BYTE, \
44 .size = (_size_bytes) * BITS_PER_BYTE, \
48 /* Table containing properties of supported protocol header fields */
50 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
52 /* ICE_FLOW_FIELD_IDX_ETH_DA */
53 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
54 /* ICE_FLOW_FIELD_IDX_ETH_SA */
55 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
56 /* ICE_FLOW_FIELD_IDX_S_VLAN */
57 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
58 /* ICE_FLOW_FIELD_IDX_C_VLAN */
59 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
60 /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
61 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 12, ICE_FLOW_FLD_SZ_ETH_TYPE),
63 /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
64 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
66 /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
67 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
69 /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
70 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
71 ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
72 /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
73 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
74 ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
75 /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
76 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
77 ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
78 /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
79 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
80 ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
81 /* ICE_FLOW_FIELD_IDX_IPV4_SA */
82 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
83 /* ICE_FLOW_FIELD_IDX_IPV4_DA */
84 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
85 /* ICE_FLOW_FIELD_IDX_IPV6_SA */
86 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
87 /* ICE_FLOW_FIELD_IDX_IPV6_DA */
88 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
90 /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
91 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
92 /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
93 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
94 /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
95 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
96 /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
97 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
98 /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
99 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
100 /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
101 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
102 /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
103 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
105 /* ICE_FLOW_FIELD_IDX_ARP_SIP */
106 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
107 /* ICE_FLOW_FIELD_IDX_ARP_DIP */
108 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
109 /* ICE_FLOW_FIELD_IDX_ARP_SHA */
110 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
111 /* ICE_FLOW_FIELD_IDX_ARP_DHA */
112 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
113 /* ICE_FLOW_FIELD_IDX_ARP_OP */
114 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
116 /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
117 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
118 /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
119 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
121 /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
122 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
124 /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
125 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
126 ICE_FLOW_FLD_SZ_GTP_TEID),
127 /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
128 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
129 ICE_FLOW_FLD_SZ_GTP_TEID),
130 /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
131 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
132 ICE_FLOW_FLD_SZ_GTP_TEID),
133 /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
134 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
135 ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
136 /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
137 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
138 ICE_FLOW_FLD_SZ_GTP_TEID),
139 /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
140 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
141 ICE_FLOW_FLD_SZ_GTP_TEID),
143 /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
144 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
145 ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
148 /* Bitmaps indicating relevant packet types for a particular protocol header
150 * Packet types for packets with an Outer/First/Single MAC header
152 static const u32 ice_ptypes_mac_ofos[] = {
153 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
154 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
155 0x00000000, 0x03FFF000, 0x7FFFFFE0, 0x00000000,
156 0x00000000, 0x00000000, 0x00000000, 0x00000000,
157 0x00000000, 0x00000000, 0x00000000, 0x00000000,
158 0x00000000, 0x00000000, 0x00000000, 0x00000000,
159 0x00000000, 0x00000000, 0x00000000, 0x00000000,
160 0x00000000, 0x00000000, 0x00000000, 0x00000000,
163 /* Packet types for packets with an Innermost/Last MAC VLAN header */
164 static const u32 ice_ptypes_macvlan_il[] = {
165 0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
166 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
167 0x00000000, 0x00000000, 0x00000000, 0x00000000,
168 0x00000000, 0x00000000, 0x00000000, 0x00000000,
169 0x00000000, 0x00000000, 0x00000000, 0x00000000,
170 0x00000000, 0x00000000, 0x00000000, 0x00000000,
171 0x00000000, 0x00000000, 0x00000000, 0x00000000,
172 0x00000000, 0x00000000, 0x00000000, 0x00000000,
175 /* Packet types for packets with an Outer/First/Single IPv4 header */
176 static const u32 ice_ptypes_ipv4_ofos[] = {
177 0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
178 0x00000000, 0x00000000, 0x00000000, 0x00000000,
179 0x0003000F, 0x000FC000, 0x03E0F800, 0x00000000,
180 0x00000000, 0x00000000, 0x00000000, 0x00000000,
181 0x00000000, 0x00000000, 0x00000000, 0x00000000,
182 0x00000000, 0x00000000, 0x00000000, 0x00000000,
183 0x00000000, 0x00000000, 0x00000000, 0x00000000,
184 0x00000000, 0x00000000, 0x00000000, 0x00000000,
187 /* Packet types for packets with an Innermost/Last IPv4 header */
188 static const u32 ice_ptypes_ipv4_il[] = {
189 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
190 0x0000000E, 0x00000000, 0x00000000, 0x00000000,
191 0x00000000, 0x00000000, 0x001FF800, 0x00000000,
192 0x00000000, 0x00000000, 0x00000000, 0x00000000,
193 0x00000000, 0x00000000, 0x00000000, 0x00000000,
194 0x00000000, 0x00000000, 0x00000000, 0x00000000,
195 0x00000000, 0x00000000, 0x00000000, 0x00000000,
196 0x00000000, 0x00000000, 0x00000000, 0x00000000,
199 /* Packet types for packets with an Outer/First/Single IPv6 header */
200 static const u32 ice_ptypes_ipv6_ofos[] = {
201 0x00000000, 0x00000000, 0x77000000, 0x10002000,
202 0x00000000, 0x00000000, 0x00000000, 0x00000000,
203 0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000000,
204 0x00000000, 0x00000000, 0x00000000, 0x00000000,
205 0x00000000, 0x00000000, 0x00000000, 0x00000000,
206 0x00000000, 0x00000000, 0x00000000, 0x00000000,
207 0x00000000, 0x00000000, 0x00000000, 0x00000000,
208 0x00000000, 0x00000000, 0x00000000, 0x00000000,
211 /* Packet types for packets with an Innermost/Last IPv6 header */
212 static const u32 ice_ptypes_ipv6_il[] = {
213 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
214 0x00000770, 0x00000000, 0x00000000, 0x00000000,
215 0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
216 0x00000000, 0x00000000, 0x00000000, 0x00000000,
217 0x00000000, 0x00000000, 0x00000000, 0x00000000,
218 0x00000000, 0x00000000, 0x00000000, 0x00000000,
219 0x00000000, 0x00000000, 0x00000000, 0x00000000,
220 0x00000000, 0x00000000, 0x00000000, 0x00000000,
223 /* Packet types for packets with an Outermost/First ARP header */
224 static const u32 ice_ptypes_arp_of[] = {
225 0x00000800, 0x00000000, 0x00000000, 0x00000000,
226 0x00000000, 0x00000000, 0x00000000, 0x00000000,
227 0x00000000, 0x00000000, 0x00000000, 0x00000000,
228 0x00000000, 0x00000000, 0x00000000, 0x00000000,
229 0x00000000, 0x00000000, 0x00000000, 0x00000000,
230 0x00000000, 0x00000000, 0x00000000, 0x00000000,
231 0x00000000, 0x00000000, 0x00000000, 0x00000000,
232 0x00000000, 0x00000000, 0x00000000, 0x00000000,
235 /* UDP Packet types for non-tunneled packets or tunneled
236 * packets with inner UDP.
238 static const u32 ice_ptypes_udp_il[] = {
239 0x81000000, 0x20204040, 0x04000010, 0x80810102,
240 0x00000040, 0x00000000, 0x00000000, 0x00000000,
241 0x00000000, 0x00410000, 0x10842000, 0x00000000,
242 0x00000000, 0x00000000, 0x00000000, 0x00000000,
243 0x00000000, 0x00000000, 0x00000000, 0x00000000,
244 0x00000000, 0x00000000, 0x00000000, 0x00000000,
245 0x00000000, 0x00000000, 0x00000000, 0x00000000,
246 0x00000000, 0x00000000, 0x00000000, 0x00000000,
249 /* Packet types for packets with an Innermost/Last TCP header */
250 static const u32 ice_ptypes_tcp_il[] = {
251 0x04000000, 0x80810102, 0x10000040, 0x02040408,
252 0x00000102, 0x00000000, 0x00000000, 0x00000000,
253 0x00000000, 0x00820000, 0x21084000, 0x00000000,
254 0x00000000, 0x00000000, 0x00000000, 0x00000000,
255 0x00000000, 0x00000000, 0x00000000, 0x00000000,
256 0x00000000, 0x00000000, 0x00000000, 0x00000000,
257 0x00000000, 0x00000000, 0x00000000, 0x00000000,
258 0x00000000, 0x00000000, 0x00000000, 0x00000000,
261 /* Packet types for packets with an Innermost/Last SCTP header */
262 static const u32 ice_ptypes_sctp_il[] = {
263 0x08000000, 0x01020204, 0x20000081, 0x04080810,
264 0x00000204, 0x00000000, 0x00000000, 0x00000000,
265 0x00000000, 0x01040000, 0x00000000, 0x00000000,
266 0x00000000, 0x00000000, 0x00000000, 0x00000000,
267 0x00000000, 0x00000000, 0x00000000, 0x00000000,
268 0x00000000, 0x00000000, 0x00000000, 0x00000000,
269 0x00000000, 0x00000000, 0x00000000, 0x00000000,
270 0x00000000, 0x00000000, 0x00000000, 0x00000000,
273 /* Packet types for packets with an Outermost/First ICMP header */
274 static const u32 ice_ptypes_icmp_of[] = {
275 0x10000000, 0x00000000, 0x00000000, 0x00000000,
276 0x00000000, 0x00000000, 0x00000000, 0x00000000,
277 0x00000000, 0x00000000, 0x00000000, 0x00000000,
278 0x00000000, 0x00000000, 0x00000000, 0x00000000,
279 0x00000000, 0x00000000, 0x00000000, 0x00000000,
280 0x00000000, 0x00000000, 0x00000000, 0x00000000,
281 0x00000000, 0x00000000, 0x00000000, 0x00000000,
282 0x00000000, 0x00000000, 0x00000000, 0x00000000,
285 /* Packet types for packets with an Innermost/Last ICMP header */
286 static const u32 ice_ptypes_icmp_il[] = {
287 0x00000000, 0x02040408, 0x40000102, 0x08101020,
288 0x00000408, 0x00000000, 0x00000000, 0x00000000,
289 0x00000000, 0x00000000, 0x42108000, 0x00000000,
290 0x00000000, 0x00000000, 0x00000000, 0x00000000,
291 0x00000000, 0x00000000, 0x00000000, 0x00000000,
292 0x00000000, 0x00000000, 0x00000000, 0x00000000,
293 0x00000000, 0x00000000, 0x00000000, 0x00000000,
294 0x00000000, 0x00000000, 0x00000000, 0x00000000,
297 /* Packet types for packets with an Outermost/First GRE header */
298 static const u32 ice_ptypes_gre_of[] = {
299 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
300 0x0000017E, 0x00000000, 0x00000000, 0x00000000,
301 0x00000000, 0x00000000, 0x00000000, 0x00000000,
302 0x00000000, 0x00000000, 0x00000000, 0x00000000,
303 0x00000000, 0x00000000, 0x00000000, 0x00000000,
304 0x00000000, 0x00000000, 0x00000000, 0x00000000,
305 0x00000000, 0x00000000, 0x00000000, 0x00000000,
306 0x00000000, 0x00000000, 0x00000000, 0x00000000,
309 /* Packet types for packets with an Innermost/Last MAC header */
310 static const u32 ice_ptypes_mac_il[] = {
311 0x00000000, 0x00000000, 0x00000000, 0x00000000,
312 0x00000000, 0x00000000, 0x00000000, 0x00000000,
313 0x00000000, 0x00000000, 0x00000000, 0x00000000,
314 0x00000000, 0x00000000, 0x00000000, 0x00000000,
315 0x00000000, 0x00000000, 0x00000000, 0x00000000,
316 0x00000000, 0x00000000, 0x00000000, 0x00000000,
317 0x00000000, 0x00000000, 0x00000000, 0x00000000,
318 0x00000000, 0x00000000, 0x00000000, 0x00000000,
321 /* Packet types for GTPC */
322 static const u32 ice_ptypes_gtpc[] = {
323 0x00000000, 0x00000000, 0x00000000, 0x00000000,
324 0x00000000, 0x00000000, 0x00000000, 0x00000000,
325 0x00000000, 0x00000000, 0x00000180, 0x00000000,
326 0x00000000, 0x00000000, 0x00000000, 0x00000000,
327 0x00000000, 0x00000000, 0x00000000, 0x00000000,
328 0x00000000, 0x00000000, 0x00000000, 0x00000000,
329 0x00000000, 0x00000000, 0x00000000, 0x00000000,
330 0x00000000, 0x00000000, 0x00000000, 0x00000000,
333 /* Packet types for GTPC with TEID */
334 static const u32 ice_ptypes_gtpc_tid[] = {
335 0x00000000, 0x00000000, 0x00000000, 0x00000000,
336 0x00000000, 0x00000000, 0x00000000, 0x00000000,
337 0x00000000, 0x00000000, 0x00000060, 0x00000000,
338 0x00000000, 0x00000000, 0x00000000, 0x00000000,
339 0x00000000, 0x00000000, 0x00000000, 0x00000000,
340 0x00000000, 0x00000000, 0x00000000, 0x00000000,
341 0x00000000, 0x00000000, 0x00000000, 0x00000000,
342 0x00000000, 0x00000000, 0x00000000, 0x00000000,
345 /* Packet types for GTPU */
346 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
347 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
348 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
349 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
350 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
351 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
352 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
353 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
354 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
355 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
356 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
357 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
358 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
359 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
360 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
361 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
362 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
363 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
364 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
365 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
366 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
369 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
370 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
371 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
372 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
373 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
374 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
375 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
376 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
377 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
378 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
379 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
380 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
381 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
382 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
383 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
384 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
385 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
386 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
387 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
388 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
389 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
392 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
393 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
394 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
395 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
396 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
397 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
398 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
399 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
400 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
401 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
402 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
403 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
404 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
405 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
406 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
407 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
408 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
409 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
410 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
411 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
412 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
415 static const u32 ice_ptypes_gtpu[] = {
416 0x00000000, 0x00000000, 0x00000000, 0x00000000,
417 0x00000000, 0x00000000, 0x00000000, 0x00000000,
418 0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
419 0x00000000, 0x00000000, 0x00000000, 0x00000000,
420 0x00000000, 0x00000000, 0x00000000, 0x00000000,
421 0x00000000, 0x00000000, 0x00000000, 0x00000000,
422 0x00000000, 0x00000000, 0x00000000, 0x00000000,
423 0x00000000, 0x00000000, 0x00000000, 0x00000000,
426 /* Packet types for pppoe */
427 static const u32 ice_ptypes_pppoe[] = {
428 0x00000000, 0x00000000, 0x00000000, 0x00000000,
429 0x00000000, 0x00000000, 0x00000000, 0x00000000,
430 0x00000000, 0x03FFF000, 0x00000000, 0x00000000,
431 0x00000000, 0x00000000, 0x00000000, 0x00000000,
432 0x00000000, 0x00000000, 0x00000000, 0x00000000,
433 0x00000000, 0x00000000, 0x00000000, 0x00000000,
434 0x00000000, 0x00000000, 0x00000000, 0x00000000,
435 0x00000000, 0x00000000, 0x00000000, 0x00000000,
438 /* Manage parameters and info. used during the creation of a flow profile */
439 struct ice_flow_prof_params {
441 u16 entry_length; /* # of bytes formatted entry will require */
443 struct ice_flow_prof *prof;
445 /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
446 * This will give us the direction flags.
448 struct ice_fv_word es[ICE_MAX_FV_WORDS];
449 /* attributes can be used to add attributes to a particular PTYPE */
450 const struct ice_ptype_attributes *attr;
453 u16 mask[ICE_MAX_FV_WORDS];
454 ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
457 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
458 (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
459 ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU)
461 #define ICE_FLOW_SEG_HDRS_L2_MASK \
462 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
463 #define ICE_FLOW_SEG_HDRS_L3_MASK \
464 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
465 ICE_FLOW_SEG_HDR_ARP)
466 #define ICE_FLOW_SEG_HDRS_L4_MASK \
467 (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
468 ICE_FLOW_SEG_HDR_SCTP)
471 * ice_flow_val_hdrs - validates packet segments for valid protocol headers
472 * @segs: array of one or more packet segments that describe the flow
473 * @segs_cnt: number of packet segments provided
475 static enum ice_status
476 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
480 for (i = 0; i < segs_cnt; i++) {
481 /* Multiple L3 headers */
482 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
483 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
484 return ICE_ERR_PARAM;
486 /* Multiple L4 headers */
487 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
488 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
489 return ICE_ERR_PARAM;
495 /* Sizes of fixed known protocol headers without header options */
496 #define ICE_FLOW_PROT_HDR_SZ_MAC 14
497 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
498 #define ICE_FLOW_PROT_HDR_SZ_IPV4 20
499 #define ICE_FLOW_PROT_HDR_SZ_IPV6 40
500 #define ICE_FLOW_PROT_HDR_SZ_ARP 28
501 #define ICE_FLOW_PROT_HDR_SZ_ICMP 8
502 #define ICE_FLOW_PROT_HDR_SZ_TCP 20
503 #define ICE_FLOW_PROT_HDR_SZ_UDP 8
504 #define ICE_FLOW_PROT_HDR_SZ_SCTP 12
507 * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
508 * @params: information about the flow to be processed
509 * @seg: index of packet segment whose header size is to be determined
511 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
516 sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
517 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
520 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
521 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
522 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
523 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
524 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
525 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
526 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
527 /* A L3 header is required if L4 is specified */
531 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
532 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
533 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
534 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
535 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
536 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
537 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
538 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
544 * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
545 * @params: information about the flow to be processed
547 * This function identifies the packet types associated with the protocol
548 * headers being present in packet segments of the specified flow profile.
550 static enum ice_status
551 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
553 struct ice_flow_prof *prof;
556 ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
561 for (i = 0; i < params->prof->segs_cnt; i++) {
562 const ice_bitmap_t *src;
565 hdrs = prof->segs[i].hdrs;
567 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
568 src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
569 (const ice_bitmap_t *)ice_ptypes_mac_il;
570 ice_and_bitmap(params->ptypes, params->ptypes, src,
574 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
575 src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
576 ice_and_bitmap(params->ptypes, params->ptypes, src,
580 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
581 ice_and_bitmap(params->ptypes, params->ptypes,
582 (const ice_bitmap_t *)ice_ptypes_arp_of,
586 if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
587 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
588 ice_and_bitmap(params->ptypes, params->ptypes, src,
592 if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
593 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
594 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
595 ice_and_bitmap(params->ptypes, params->ptypes, src,
597 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
598 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
599 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
600 ice_and_bitmap(params->ptypes, params->ptypes, src,
604 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
605 src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
606 (const ice_bitmap_t *)ice_ptypes_icmp_il;
607 ice_and_bitmap(params->ptypes, params->ptypes, src,
609 } else if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
610 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
611 ice_and_bitmap(params->ptypes, params->ptypes, src,
613 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
614 ice_and_bitmap(params->ptypes, params->ptypes,
615 (const ice_bitmap_t *)ice_ptypes_tcp_il,
617 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
618 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
619 ice_and_bitmap(params->ptypes, params->ptypes, src,
621 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
623 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
624 ice_and_bitmap(params->ptypes, params->ptypes,
625 src, ICE_FLOW_PTYPE_MAX);
627 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
628 src = (const ice_bitmap_t *)ice_ptypes_gtpc;
629 ice_and_bitmap(params->ptypes, params->ptypes,
630 src, ICE_FLOW_PTYPE_MAX);
631 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
632 src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
633 ice_and_bitmap(params->ptypes, params->ptypes,
634 src, ICE_FLOW_PTYPE_MAX);
635 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
636 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
637 ice_and_bitmap(params->ptypes, params->ptypes,
638 src, ICE_FLOW_PTYPE_MAX);
640 /* Attributes for GTP packet with downlink */
641 params->attr = ice_attr_gtpu_down;
642 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
643 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
644 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
645 ice_and_bitmap(params->ptypes, params->ptypes,
646 src, ICE_FLOW_PTYPE_MAX);
648 /* Attributes for GTP packet with uplink */
649 params->attr = ice_attr_gtpu_up;
650 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
651 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
652 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
653 ice_and_bitmap(params->ptypes, params->ptypes,
654 src, ICE_FLOW_PTYPE_MAX);
656 /* Attributes for GTP packet with Extension Header */
657 params->attr = ice_attr_gtpu_eh;
658 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
659 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
660 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
661 ice_and_bitmap(params->ptypes, params->ptypes,
662 src, ICE_FLOW_PTYPE_MAX);
670 * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
671 * @hw: pointer to the HW struct
672 * @params: information about the flow to be processed
673 * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
675 * This function will allocate an extraction sequence entries for a DWORD size
676 * chunk of the packet flags.
678 static enum ice_status
679 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
680 struct ice_flow_prof_params *params,
681 enum ice_flex_mdid_pkt_flags flags)
683 u8 fv_words = hw->blk[params->blk].es.fvw;
686 /* Make sure the number of extraction sequence entries required does not
687 * exceed the block's capacity.
689 if (params->es_cnt >= fv_words)
690 return ICE_ERR_MAX_LIMIT;
692 /* some blocks require a reversed field vector layout */
693 if (hw->blk[params->blk].es.reverse)
694 idx = fv_words - params->es_cnt - 1;
696 idx = params->es_cnt;
698 params->es[idx].prot_id = ICE_PROT_META_ID;
699 params->es[idx].off = flags;
706 * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
707 * @hw: pointer to the HW struct
708 * @params: information about the flow to be processed
709 * @seg: packet segment index of the field to be extracted
710 * @fld: ID of field to be extracted
711 * @match: bitfield of all fields
713 * This function determines the protocol ID, offset, and size of the given
714 * field. It then allocates one or more extraction sequence entries for the
715 * given field, and fill the entries with protocol ID and offset information.
717 static enum ice_status
718 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
719 u8 seg, enum ice_flow_field fld, u64 match)
721 enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
722 enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
723 u8 fv_words = hw->blk[params->blk].es.fvw;
724 struct ice_flow_fld_info *flds;
725 u16 cnt, ese_bits, i;
731 flds = params->prof->segs[seg].fields;
734 case ICE_FLOW_FIELD_IDX_ETH_DA:
735 case ICE_FLOW_FIELD_IDX_ETH_SA:
736 case ICE_FLOW_FIELD_IDX_S_VLAN:
737 case ICE_FLOW_FIELD_IDX_C_VLAN:
738 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
740 case ICE_FLOW_FIELD_IDX_ETH_TYPE:
741 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
743 case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
744 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
746 case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
747 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
749 case ICE_FLOW_FIELD_IDX_IPV4_TTL:
750 case ICE_FLOW_FIELD_IDX_IPV4_PROT:
751 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
753 /* TTL and PROT share the same extraction seq. entry.
754 * Each is considered a sibling to the other in terms of sharing
755 * the same extraction sequence entry.
757 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
758 sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
759 else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
760 sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
762 /* If the sibling field is also included, that field's
763 * mask needs to be included.
765 if (match & BIT(sib))
766 sib_mask = ice_flds_info[sib].mask;
768 case ICE_FLOW_FIELD_IDX_IPV6_TTL:
769 case ICE_FLOW_FIELD_IDX_IPV6_PROT:
770 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
772 /* TTL and PROT share the same extraction seq. entry.
773 * Each is considered a sibling to the other in terms of sharing
774 * the same extraction sequence entry.
776 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
777 sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
778 else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
779 sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
781 /* If the sibling field is also included, that field's
782 * mask needs to be included.
784 if (match & BIT(sib))
785 sib_mask = ice_flds_info[sib].mask;
787 case ICE_FLOW_FIELD_IDX_IPV4_SA:
788 case ICE_FLOW_FIELD_IDX_IPV4_DA:
789 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
791 case ICE_FLOW_FIELD_IDX_IPV6_SA:
792 case ICE_FLOW_FIELD_IDX_IPV6_DA:
793 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
795 case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
796 case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
797 case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
798 prot_id = ICE_PROT_TCP_IL;
800 case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
801 case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
802 prot_id = ICE_PROT_UDP_IL_OR_S;
804 case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
805 case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
806 prot_id = ICE_PROT_SCTP_IL;
808 case ICE_FLOW_FIELD_IDX_GTPC_TEID:
809 case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
810 case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
811 case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
812 case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
813 case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
814 /* GTP is accessed through UDP OF protocol */
815 prot_id = ICE_PROT_UDP_OF;
817 case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
818 prot_id = ICE_PROT_PPPOE;
820 case ICE_FLOW_FIELD_IDX_ARP_SIP:
821 case ICE_FLOW_FIELD_IDX_ARP_DIP:
822 case ICE_FLOW_FIELD_IDX_ARP_SHA:
823 case ICE_FLOW_FIELD_IDX_ARP_DHA:
824 case ICE_FLOW_FIELD_IDX_ARP_OP:
825 prot_id = ICE_PROT_ARP_OF;
827 case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
828 case ICE_FLOW_FIELD_IDX_ICMP_CODE:
829 /* ICMP type and code share the same extraction seq. entry */
830 prot_id = (params->prof->segs[seg].hdrs &
831 ICE_FLOW_SEG_HDR_IPV4) ?
832 ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
833 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
834 ICE_FLOW_FIELD_IDX_ICMP_CODE :
835 ICE_FLOW_FIELD_IDX_ICMP_TYPE;
837 case ICE_FLOW_FIELD_IDX_GRE_KEYID:
838 prot_id = ICE_PROT_GRE_OF;
841 return ICE_ERR_NOT_IMPL;
844 /* Each extraction sequence entry is a word in size, and extracts a
845 * word-aligned offset from a protocol header.
847 ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
849 flds[fld].xtrct.prot_id = prot_id;
850 flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
851 ICE_FLOW_FV_EXTRACT_SZ;
852 flds[fld].xtrct.disp = (u8)((ice_flds_info[fld].off + adj) % ese_bits);
853 flds[fld].xtrct.idx = params->es_cnt;
854 flds[fld].xtrct.mask = ice_flds_info[fld].mask;
856 /* Adjust the next field-entry index after accommodating the number of
857 * entries this field consumes
859 cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
860 ice_flds_info[fld].size, ese_bits);
862 /* Fill in the extraction sequence entries needed for this field */
863 off = flds[fld].xtrct.off;
864 mask = flds[fld].xtrct.mask;
865 for (i = 0; i < cnt; i++) {
866 /* Only consume an extraction sequence entry if there is no
867 * sibling field associated with this field or the sibling entry
868 * already extracts the word shared with this field.
870 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
871 flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
872 flds[sib].xtrct.off != off) {
875 /* Make sure the number of extraction sequence required
876 * does not exceed the block's capability
878 if (params->es_cnt >= fv_words)
879 return ICE_ERR_MAX_LIMIT;
881 /* some blocks require a reversed field vector layout */
882 if (hw->blk[params->blk].es.reverse)
883 idx = fv_words - params->es_cnt - 1;
885 idx = params->es_cnt;
887 params->es[idx].prot_id = prot_id;
888 params->es[idx].off = off;
889 params->mask[idx] = mask | sib_mask;
893 off += ICE_FLOW_FV_EXTRACT_SZ;
900 * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
901 * @hw: pointer to the HW struct
902 * @params: information about the flow to be processed
903 * @seg: index of packet segment whose raw fields are to be be extracted
905 static enum ice_status
906 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
913 if (!params->prof->segs[seg].raws_cnt)
916 if (params->prof->segs[seg].raws_cnt >
917 ARRAY_SIZE(params->prof->segs[seg].raws))
918 return ICE_ERR_MAX_LIMIT;
920 /* Offsets within the segment headers are not supported */
921 hdrs_sz = ice_flow_calc_seg_sz(params, seg);
923 return ICE_ERR_PARAM;
925 fv_words = hw->blk[params->blk].es.fvw;
927 for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
928 struct ice_flow_seg_fld_raw *raw;
931 raw = ¶ms->prof->segs[seg].raws[i];
933 /* Storing extraction information */
934 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
935 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
936 ICE_FLOW_FV_EXTRACT_SZ;
937 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
939 raw->info.xtrct.idx = params->es_cnt;
941 /* Determine the number of field vector entries this raw field
944 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
945 (raw->info.src.last * BITS_PER_BYTE),
946 (ICE_FLOW_FV_EXTRACT_SZ *
948 off = raw->info.xtrct.off;
949 for (j = 0; j < cnt; j++) {
952 /* Make sure the number of extraction sequence required
953 * does not exceed the block's capability
955 if (params->es_cnt >= hw->blk[params->blk].es.count ||
956 params->es_cnt >= ICE_MAX_FV_WORDS)
957 return ICE_ERR_MAX_LIMIT;
959 /* some blocks require a reversed field vector layout */
960 if (hw->blk[params->blk].es.reverse)
961 idx = fv_words - params->es_cnt - 1;
963 idx = params->es_cnt;
965 params->es[idx].prot_id = raw->info.xtrct.prot_id;
966 params->es[idx].off = off;
968 off += ICE_FLOW_FV_EXTRACT_SZ;
976 * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
977 * @hw: pointer to the HW struct
978 * @params: information about the flow to be processed
980 * This function iterates through all matched fields in the given segments, and
981 * creates an extraction sequence for the fields.
983 static enum ice_status
984 ice_flow_create_xtrct_seq(struct ice_hw *hw,
985 struct ice_flow_prof_params *params)
987 enum ice_status status = ICE_SUCCESS;
990 /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
993 if (params->blk == ICE_BLK_ACL) {
994 status = ice_flow_xtract_pkt_flags(hw, params,
995 ICE_RX_MDID_PKT_FLAGS_15_0);
1000 for (i = 0; i < params->prof->segs_cnt; i++) {
1001 u64 match = params->prof->segs[i].match;
1002 enum ice_flow_field j;
1004 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1005 const u64 bit = BIT_ULL(j);
1008 status = ice_flow_xtract_fld(hw, params, i, j,
1016 /* Process raw matching bytes */
1017 status = ice_flow_xtract_raws(hw, params, i);
1026 * ice_flow_sel_acl_scen - returns the specific scenario
1027 * @hw: pointer to the hardware structure
1028 * @params: information about the flow to be processed
1030 * This function will return the specific scenario based on the
1031 * params passed to it
1033 static enum ice_status
1034 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1036 /* Find the best-fit scenario for the provided match width */
1037 struct ice_acl_scen *cand_scen = NULL, *scen;
1040 return ICE_ERR_DOES_NOT_EXIST;
1042 /* Loop through each scenario and match against the scenario width
1043 * to select the specific scenario
1045 LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1046 if (scen->eff_width >= params->entry_length &&
1047 (!cand_scen || cand_scen->eff_width > scen->eff_width))
1050 return ICE_ERR_DOES_NOT_EXIST;
1052 params->prof->cfg.scen = cand_scen;
1058 * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1059 * @params: information about the flow to be processed
1061 static enum ice_status
1062 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1064 u16 index, i, range_idx = 0;
1066 index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1068 for (i = 0; i < params->prof->segs_cnt; i++) {
1069 struct ice_flow_seg_info *seg = ¶ms->prof->segs[i];
1070 u64 match = seg->match;
1073 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1074 struct ice_flow_fld_info *fld;
1075 const u64 bit = BIT_ULL(j);
1080 fld = &seg->fields[j];
1081 fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1083 if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1084 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1086 /* Range checking only supported for single
1089 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1091 BITS_PER_BYTE * 2) > 1)
1092 return ICE_ERR_PARAM;
1094 /* Ranges must define low and high values */
1095 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1096 fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1097 return ICE_ERR_PARAM;
1099 fld->entry.val = range_idx++;
1101 /* Store adjusted byte-length of field for later
1102 * use, taking into account potential
1103 * non-byte-aligned displacement
1105 fld->entry.last = DIVIDE_AND_ROUND_UP
1106 (ice_flds_info[j].size +
1107 (fld->xtrct.disp % BITS_PER_BYTE),
1109 fld->entry.val = index;
1110 index += fld->entry.last;
1116 for (j = 0; j < seg->raws_cnt; j++) {
1117 struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1119 raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1120 raw->info.entry.val = index;
1121 raw->info.entry.last = raw->info.src.last;
1122 index += raw->info.entry.last;
1126 /* Currently only support using the byte selection base, which only
1127 * allows for an effective entry size of 30 bytes. Reject anything
1130 if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1131 return ICE_ERR_PARAM;
1133 /* Only 8 range checkers per profile, reject anything trying to use
1136 if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1137 return ICE_ERR_PARAM;
1139 /* Store # bytes required for entry for later use */
1140 params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1146 * ice_flow_proc_segs - process all packet segments associated with a profile
1147 * @hw: pointer to the HW struct
1148 * @params: information about the flow to be processed
1150 static enum ice_status
1151 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1153 enum ice_status status;
1155 status = ice_flow_proc_seg_hdrs(params);
1159 status = ice_flow_create_xtrct_seq(hw, params);
1163 switch (params->blk) {
1165 /* Only header information is provided for RSS configuration.
1166 * No further processing is needed.
1168 status = ICE_SUCCESS;
1171 status = ice_flow_acl_def_entry_frmt(params);
1174 status = ice_flow_sel_acl_scen(hw, params);
1179 status = ICE_SUCCESS;
1183 return ICE_ERR_NOT_IMPL;
1189 #define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001
1190 #define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002
1191 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004
1194 * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1195 * @hw: pointer to the HW struct
1196 * @blk: classification stage
1197 * @dir: flow direction
1198 * @segs: array of one or more packet segments that describe the flow
1199 * @segs_cnt: number of packet segments provided
1200 * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1201 * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1203 static struct ice_flow_prof *
1204 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1205 enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1206 u8 segs_cnt, u16 vsi_handle, u32 conds)
1208 struct ice_flow_prof *p, *prof = NULL;
1210 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1211 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
1212 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1213 segs_cnt && segs_cnt == p->segs_cnt) {
1216 /* Check for profile-VSI association if specified */
1217 if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1218 ice_is_vsi_valid(hw, vsi_handle) &&
1219 !ice_is_bit_set(p->vsis, vsi_handle))
1222 /* Protocol headers must be checked. Matched fields are
1223 * checked if specified.
1225 for (i = 0; i < segs_cnt; i++)
1226 if (segs[i].hdrs != p->segs[i].hdrs ||
1227 ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1228 segs[i].match != p->segs[i].match))
1231 /* A match is found if all segments are matched */
1232 if (i == segs_cnt) {
1238 ice_release_lock(&hw->fl_profs_locks[blk]);
1244 * ice_flow_find_prof - Look up a profile matching headers and matched fields
1245 * @hw: pointer to the HW struct
1246 * @blk: classification stage
1247 * @dir: flow direction
1248 * @segs: array of one or more packet segments that describe the flow
1249 * @segs_cnt: number of packet segments provided
1252 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1253 struct ice_flow_seg_info *segs, u8 segs_cnt)
1255 struct ice_flow_prof *p;
1257 p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1258 ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1260 return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1264 * ice_flow_find_prof_id - Look up a profile with given profile ID
1265 * @hw: pointer to the HW struct
1266 * @blk: classification stage
1267 * @prof_id: unique ID to identify this flow profile
1269 static struct ice_flow_prof *
1270 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1272 struct ice_flow_prof *p;
1274 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
1275 if (p->id == prof_id)
1283 * ice_dealloc_flow_entry - Deallocate flow entry memory
1284 * @hw: pointer to the HW struct
1285 * @entry: flow entry to be removed
1288 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1294 ice_free(hw, entry->entry);
1296 if (entry->range_buf) {
1297 ice_free(hw, entry->range_buf);
1298 entry->range_buf = NULL;
1302 ice_free(hw, entry->acts);
1304 entry->acts_cnt = 0;
1307 ice_free(hw, entry);
1310 #define ICE_ACL_INVALID_SCEN 0x3f
1313 * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any pf
1314 * @hw: pointer to the hardware structure
1315 * @prof: pointer to flow profile
1316 * @buf: destination buffer function writes partial xtrct sequence to
1318 * returns ICE_SUCCESS if no pf is associated to the given profile
1319 * returns ICE_ERR_IN_USE if at least one pf is associated to the given profile
1320 * returns other error code for real error
1322 static enum ice_status
1323 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1324 struct ice_aqc_acl_prof_generic_frmt *buf)
1326 enum ice_status status;
1329 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1333 status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1337 /* If all pf's associated scenarios are all 0 or all
1338 * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1339 * not been configured yet.
1341 if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1342 buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1343 buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1344 buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1347 if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1348 buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1349 buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1350 buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1351 buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1352 buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1353 buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1354 buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1357 return ICE_ERR_IN_USE;
1361 * ice_flow_acl_free_act_cntr - Free the acl rule's actions
1362 * @hw: pointer to the hardware structure
1363 * @acts: array of actions to be performed on a match
1364 * @acts_cnt: number of actions
1366 static enum ice_status
1367 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1372 for (i = 0; i < acts_cnt; i++) {
1373 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1374 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1375 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1376 struct ice_acl_cntrs cntrs;
1377 enum ice_status status;
1379 cntrs.bank = 0; /* Only bank0 for the moment */
1381 LE16_TO_CPU(acts[i].data.acl_act.value);
1383 LE16_TO_CPU(acts[i].data.acl_act.value);
1385 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1386 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1388 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1390 status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1399 * ice_flow_acl_disassoc_scen - Disassociate the scenario to the Profile
1400 * @hw: pointer to the hardware structure
1401 * @prof: pointer to flow profile
1403 * Disassociate the scenario to the Profile for the PF of the VSI.
1405 static enum ice_status
1406 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1408 struct ice_aqc_acl_prof_generic_frmt buf;
1409 enum ice_status status = ICE_SUCCESS;
1412 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1414 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1418 status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1422 /* Clear scenario for this pf */
1423 buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1424 status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1430 * ice_flow_rem_entry_sync - Remove a flow entry
1431 * @hw: pointer to the HW struct
1432 * @blk: classification stage
1433 * @entry: flow entry to be removed
1435 static enum ice_status
1436 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1437 struct ice_flow_entry *entry)
1440 return ICE_ERR_BAD_PTR;
1442 if (blk == ICE_BLK_ACL) {
1443 enum ice_status status;
1446 return ICE_ERR_BAD_PTR;
1448 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1449 entry->scen_entry_idx);
1453 /* Checks if we need to release an ACL counter. */
1454 if (entry->acts_cnt && entry->acts)
1455 ice_flow_acl_free_act_cntr(hw, entry->acts,
1459 LIST_DEL(&entry->l_entry);
1461 ice_dealloc_flow_entry(hw, entry);
1467 * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1468 * @hw: pointer to the HW struct
1469 * @blk: classification stage
1470 * @dir: flow direction
1471 * @prof_id: unique ID to identify this flow profile
1472 * @segs: array of one or more packet segments that describe the flow
1473 * @segs_cnt: number of packet segments provided
1474 * @acts: array of default actions
1475 * @acts_cnt: number of default actions
1476 * @prof: stores the returned flow profile added
1478 * Assumption: the caller has acquired the lock to the profile list
1480 static enum ice_status
1481 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1482 enum ice_flow_dir dir, u64 prof_id,
1483 struct ice_flow_seg_info *segs, u8 segs_cnt,
1484 struct ice_flow_action *acts, u8 acts_cnt,
1485 struct ice_flow_prof **prof)
1487 struct ice_flow_prof_params params;
1488 enum ice_status status;
1491 if (!prof || (acts_cnt && !acts))
1492 return ICE_ERR_BAD_PTR;
1494 ice_memset(¶ms, 0, sizeof(params), ICE_NONDMA_MEM);
1495 params.prof = (struct ice_flow_prof *)
1496 ice_malloc(hw, sizeof(*params.prof));
1498 return ICE_ERR_NO_MEMORY;
1500 /* initialize extraction sequence to all invalid (0xff) */
1501 for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1502 params.es[i].prot_id = ICE_PROT_INVALID;
1503 params.es[i].off = ICE_FV_OFFSET_INVAL;
1507 params.prof->id = prof_id;
1508 params.prof->dir = dir;
1509 params.prof->segs_cnt = segs_cnt;
1511 /* Make a copy of the segments that need to be persistent in the flow
1514 for (i = 0; i < segs_cnt; i++)
1515 ice_memcpy(¶ms.prof->segs[i], &segs[i], sizeof(*segs),
1516 ICE_NONDMA_TO_NONDMA);
1518 /* Make a copy of the actions that need to be persistent in the flow
1522 params.prof->acts = (struct ice_flow_action *)
1523 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
1524 ICE_NONDMA_TO_NONDMA);
1526 if (!params.prof->acts) {
1527 status = ICE_ERR_NO_MEMORY;
1532 status = ice_flow_proc_segs(hw, ¶ms);
1534 ice_debug(hw, ICE_DBG_FLOW,
1535 "Error processing a flow's packet segments\n");
1539 /* Add a HW profile for this flow profile */
1540 status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes,
1541 params.attr, params.attr_cnt, params.es,
1544 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
1548 INIT_LIST_HEAD(¶ms.prof->entries);
1549 ice_init_lock(¶ms.prof->entries_lock);
1550 *prof = params.prof;
1554 if (params.prof->acts)
1555 ice_free(hw, params.prof->acts);
1556 ice_free(hw, params.prof);
1563 * ice_flow_rem_prof_sync - remove a flow profile
1564 * @hw: pointer to the hardware structure
1565 * @blk: classification stage
1566 * @prof: pointer to flow profile to remove
1568 * Assumption: the caller has acquired the lock to the profile list
1570 static enum ice_status
1571 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
1572 struct ice_flow_prof *prof)
1574 enum ice_status status;
1576 /* Remove all remaining flow entries before removing the flow profile */
1577 if (!LIST_EMPTY(&prof->entries)) {
1578 struct ice_flow_entry *e, *t;
1580 ice_acquire_lock(&prof->entries_lock);
1582 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
1584 status = ice_flow_rem_entry_sync(hw, blk, e);
1589 ice_release_lock(&prof->entries_lock);
1592 if (blk == ICE_BLK_ACL) {
1593 struct ice_aqc_acl_profile_ranges query_rng_buf;
1594 struct ice_aqc_acl_prof_generic_frmt buf;
1597 /* Deassociate the scenario to the Profile for the PF */
1598 status = ice_flow_acl_disassoc_scen(hw, prof);
1602 /* Clear the range-checker if the profile ID is no longer
1605 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1606 if (status && status != ICE_ERR_IN_USE) {
1608 } else if (!status) {
1609 /* Clear the range-checker value for profile ID */
1610 ice_memset(&query_rng_buf, 0,
1611 sizeof(struct ice_aqc_acl_profile_ranges),
1614 status = ice_flow_get_hw_prof(hw, blk, prof->id,
1619 status = ice_prog_acl_prof_ranges(hw, prof_id,
1620 &query_rng_buf, NULL);
1626 /* Remove all hardware profiles associated with this flow profile */
1627 status = ice_rem_prof(hw, blk, prof->id);
1629 LIST_DEL(&prof->l_entry);
1630 ice_destroy_lock(&prof->entries_lock);
1632 ice_free(hw, prof->acts);
1640 * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
1641 * @buf: Destination buffer function writes partial xtrct sequence to
1642 * @info: Info about field
1645 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
1646 struct ice_flow_fld_info *info)
1651 src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
1652 info->xtrct.disp / BITS_PER_BYTE;
1653 dst = info->entry.val;
1654 for (i = 0; i < info->entry.last; i++)
1655 /* HW stores field vector words in LE, convert words back to BE
1656 * so constructed entries will end up in network order
1658 buf->byte_selection[dst++] = src++ ^ 1;
1662 * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
1663 * @hw: pointer to the hardware structure
1664 * @prof: pointer to flow profile
1666 static enum ice_status
1667 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
1669 struct ice_aqc_acl_prof_generic_frmt buf;
1670 struct ice_flow_fld_info *info;
1671 enum ice_status status;
1675 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1677 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1681 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1682 if (status && status != ICE_ERR_IN_USE)
1686 /* Program the profile dependent configuration. This is done
1687 * only once regardless of the number of PFs using that profile
1689 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1691 for (i = 0; i < prof->segs_cnt; i++) {
1692 struct ice_flow_seg_info *seg = &prof->segs[i];
1693 u64 match = seg->match;
1696 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1697 const u64 bit = BIT_ULL(j);
1702 info = &seg->fields[j];
1704 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
1705 buf.word_selection[info->entry.val] =
1708 ice_flow_acl_set_xtrct_seq_fld(&buf,
1714 for (j = 0; j < seg->raws_cnt; j++) {
1715 info = &seg->raws[j].info;
1716 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
1720 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
1721 ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
1725 /* Update the current PF */
1726 buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
1727 status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1733 * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
1734 * @hw: pointer to the hardware structure
1735 * @blk: classification stage
1736 * @vsi_handle: software VSI handle
1737 * @vsig: target VSI group
1739 * Assumption: the caller has already verified that the VSI to
1740 * be added has the same characteristics as the VSIG and will
1741 * thereby have access to all resources added to that VSIG.
1744 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
1747 enum ice_status status;
1749 if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
1750 return ICE_ERR_PARAM;
1752 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1753 status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
1755 ice_release_lock(&hw->fl_profs_locks[blk]);
1761 * ice_flow_assoc_prof - associate a VSI with a flow profile
1762 * @hw: pointer to the hardware structure
1763 * @blk: classification stage
1764 * @prof: pointer to flow profile
1765 * @vsi_handle: software VSI handle
1767 * Assumption: the caller has acquired the lock to the profile list
1768 * and the software VSI handle has been validated
1770 static enum ice_status
1771 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
1772 struct ice_flow_prof *prof, u16 vsi_handle)
1774 enum ice_status status = ICE_SUCCESS;
1776 if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
1777 if (blk == ICE_BLK_ACL) {
1778 status = ice_flow_acl_set_xtrct_seq(hw, prof);
1782 status = ice_add_prof_id_flow(hw, blk,
1783 ice_get_hw_vsi_num(hw,
1787 ice_set_bit(vsi_handle, prof->vsis);
1789 ice_debug(hw, ICE_DBG_FLOW,
1790 "HW profile add failed, %d\n",
1798 * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
1799 * @hw: pointer to the hardware structure
1800 * @blk: classification stage
1801 * @prof: pointer to flow profile
1802 * @vsi_handle: software VSI handle
1804 * Assumption: the caller has acquired the lock to the profile list
1805 * and the software VSI handle has been validated
1807 static enum ice_status
1808 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
1809 struct ice_flow_prof *prof, u16 vsi_handle)
1811 enum ice_status status = ICE_SUCCESS;
1813 if (ice_is_bit_set(prof->vsis, vsi_handle)) {
1814 status = ice_rem_prof_id_flow(hw, blk,
1815 ice_get_hw_vsi_num(hw,
1819 ice_clear_bit(vsi_handle, prof->vsis);
1821 ice_debug(hw, ICE_DBG_FLOW,
1822 "HW profile remove failed, %d\n",
1830 * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
1831 * @hw: pointer to the HW struct
1832 * @blk: classification stage
1833 * @dir: flow direction
1834 * @prof_id: unique ID to identify this flow profile
1835 * @segs: array of one or more packet segments that describe the flow
1836 * @segs_cnt: number of packet segments provided
1837 * @acts: array of default actions
1838 * @acts_cnt: number of default actions
1839 * @prof: stores the returned flow profile added
1842 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1843 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
1844 struct ice_flow_action *acts, u8 acts_cnt,
1845 struct ice_flow_prof **prof)
1847 enum ice_status status;
1849 if (segs_cnt > ICE_FLOW_SEG_MAX)
1850 return ICE_ERR_MAX_LIMIT;
1853 return ICE_ERR_PARAM;
1856 return ICE_ERR_BAD_PTR;
1858 status = ice_flow_val_hdrs(segs, segs_cnt);
1862 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1864 status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
1865 acts, acts_cnt, prof);
1867 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
1869 ice_release_lock(&hw->fl_profs_locks[blk]);
1875 * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
1876 * @hw: pointer to the HW struct
1877 * @blk: the block for which the flow profile is to be removed
1878 * @prof_id: unique ID of the flow profile to be removed
1881 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1883 struct ice_flow_prof *prof;
1884 enum ice_status status;
1886 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1888 prof = ice_flow_find_prof_id(hw, blk, prof_id);
1890 status = ICE_ERR_DOES_NOT_EXIST;
1894 /* prof becomes invalid after the call */
1895 status = ice_flow_rem_prof_sync(hw, blk, prof);
1898 ice_release_lock(&hw->fl_profs_locks[blk]);
1904 * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
1905 * @hw: pointer to the HW struct
1906 * @blk: classification stage
1907 * @prof_id: the profile ID handle
1908 * @hw_prof_id: pointer to variable to receive the HW profile ID
1911 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
1914 struct ice_prof_map *map;
1916 map = ice_search_prof_id(hw, blk, prof_id);
1918 *hw_prof_id = map->prof_id;
1922 return ICE_ERR_DOES_NOT_EXIST;
1926 * ice_flow_find_entry - look for a flow entry using its unique ID
1927 * @hw: pointer to the HW struct
1928 * @blk: classification stage
1929 * @entry_id: unique ID to identify this flow entry
1931 * This function looks for the flow entry with the specified unique ID in all
1932 * flow profiles of the specified classification stage. If the entry is found,
1933 * and it returns the handle to the flow entry. Otherwise, it returns
1934 * ICE_FLOW_ENTRY_ID_INVAL.
1936 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
1938 struct ice_flow_entry *found = NULL;
1939 struct ice_flow_prof *p;
1941 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1943 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
1944 struct ice_flow_entry *e;
1946 ice_acquire_lock(&p->entries_lock);
1947 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
1948 if (e->id == entry_id) {
1952 ice_release_lock(&p->entries_lock);
1958 ice_release_lock(&hw->fl_profs_locks[blk]);
1960 return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
1964 * ice_flow_acl_check_actions - Checks the acl rule's actions
1965 * @hw: pointer to the hardware structure
1966 * @acts: array of actions to be performed on a match
1967 * @acts_cnt: number of actions
1968 * @cnt_alloc: indicates if a ACL counter has been allocated.
1970 static enum ice_status
1971 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
1972 u8 acts_cnt, bool *cnt_alloc)
1974 ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
1977 ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
1980 if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
1981 return ICE_ERR_OUT_OF_RANGE;
1983 for (i = 0; i < acts_cnt; i++) {
1984 if (acts[i].type != ICE_FLOW_ACT_NOP &&
1985 acts[i].type != ICE_FLOW_ACT_DROP &&
1986 acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
1987 acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
1990 /* If the caller want to add two actions of the same type, then
1991 * it is considered invalid configuration.
1993 if (ice_test_and_set_bit(acts[i].type, dup_check))
1994 return ICE_ERR_PARAM;
1997 /* Checks if ACL counters are needed. */
1998 for (i = 0; i < acts_cnt; i++) {
1999 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2000 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2001 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2002 struct ice_acl_cntrs cntrs;
2003 enum ice_status status;
2006 cntrs.bank = 0; /* Only bank0 for the moment */
2008 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2009 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2011 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2013 status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2016 /* Counter index within the bank */
2017 acts[i].data.acl_act.value =
2018 CPU_TO_LE16(cntrs.first_cntr);
2027 * ice_flow_acl_frmt_entry_range - Format an acl range checker for a given field
2028 * @fld: number of the given field
2029 * @info: info about field
2030 * @range_buf: range checker configuration buffer
2031 * @data: pointer to a data buffer containing flow entry's match values/masks
2032 * @range: Input/output param indicating which range checkers are being used
2035 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2036 struct ice_aqc_acl_profile_ranges *range_buf,
2037 u8 *data, u8 *range)
2041 /* If not specified, default mask is all bits in field */
2042 new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2043 BIT(ice_flds_info[fld].size) - 1 :
2044 (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2046 /* If the mask is 0, then we don't need to worry about this input
2047 * range checker value.
2051 (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2053 (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2054 u8 range_idx = info->entry.val;
2056 range_buf->checker_cfg[range_idx].low_boundary =
2057 CPU_TO_BE16(new_low);
2058 range_buf->checker_cfg[range_idx].high_boundary =
2059 CPU_TO_BE16(new_high);
2060 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2062 /* Indicate which range checker is being used */
2063 *range |= BIT(range_idx);
2068 * ice_flow_acl_frmt_entry_fld - Partially format acl entry for a given field
2069 * @fld: number of the given field
2070 * @info: info about the field
2071 * @buf: buffer containing the entry
2072 * @dontcare: buffer containing don't care mask for entry
2073 * @data: pointer to a data buffer containing flow entry's match values/masks
2076 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2077 u8 *dontcare, u8 *data)
2079 u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2080 bool use_mask = false;
2083 src = info->src.val;
2084 mask = info->src.mask;
2085 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2086 disp = info->xtrct.disp % BITS_PER_BYTE;
2088 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2091 for (k = 0; k < info->entry.last; k++, dst++) {
2092 /* Add overflow bits from previous byte */
2093 buf[dst] = (tmp_s & 0xff00) >> 8;
2095 /* If mask is not valid, tmp_m is always zero, so just setting
2096 * dontcare to 0 (no masked bits). If mask is valid, pulls in
2097 * overflow bits of mask from prev byte
2099 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2101 /* If there is displacement, last byte will only contain
2102 * displaced data, but there is no more data to read from user
2103 * buffer, so skip so as not to potentially read beyond end of
2106 if (!disp || k < info->entry.last - 1) {
2107 /* Store shifted data to use in next byte */
2108 tmp_s = data[src++] << disp;
2110 /* Add current (shifted) byte */
2111 buf[dst] |= tmp_s & 0xff;
2113 /* Handle mask if valid */
2115 tmp_m = (~data[mask++] & 0xff) << disp;
2116 dontcare[dst] |= tmp_m & 0xff;
2121 /* Fill in don't care bits at beginning of field */
2123 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2124 for (k = 0; k < disp; k++)
2125 dontcare[dst] |= BIT(k);
2128 end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2130 /* Fill in don't care bits at end of field */
2132 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2133 info->entry.last - 1;
2134 for (k = end_disp; k < BITS_PER_BYTE; k++)
2135 dontcare[dst] |= BIT(k);
2140 * ice_flow_acl_frmt_entry - Format acl entry
2141 * @hw: pointer to the hardware structure
2142 * @prof: pointer to flow profile
2143 * @e: pointer to the flow entry
2144 * @data: pointer to a data buffer containing flow entry's match values/masks
2145 * @acts: array of actions to be performed on a match
2146 * @acts_cnt: number of actions
2148 * Formats the key (and key_inverse) to be matched from the data passed in,
2149 * along with data from the flow profile. This key/key_inverse pair makes up
2150 * the 'entry' for an acl flow entry.
2152 static enum ice_status
2153 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2154 struct ice_flow_entry *e, u8 *data,
2155 struct ice_flow_action *acts, u8 acts_cnt)
2157 u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2158 struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2159 enum ice_status status;
2164 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2168 /* Format the result action */
2170 status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2174 status = ICE_ERR_NO_MEMORY;
2176 e->acts = (struct ice_flow_action *)
2177 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2178 ICE_NONDMA_TO_NONDMA);
2183 e->acts_cnt = acts_cnt;
2185 /* Format the matching data */
2186 buf_sz = prof->cfg.scen->width;
2187 buf = (u8 *)ice_malloc(hw, buf_sz);
2191 dontcare = (u8 *)ice_malloc(hw, buf_sz);
2195 /* 'key' buffer will store both key and key_inverse, so must be twice
2198 key = (u8 *)ice_malloc(hw, buf_sz * 2);
2202 range_buf = (struct ice_aqc_acl_profile_ranges *)
2203 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2207 /* Set don't care mask to all 1's to start, will zero out used bytes */
2208 ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2210 for (i = 0; i < prof->segs_cnt; i++) {
2211 struct ice_flow_seg_info *seg = &prof->segs[i];
2212 u64 match = seg->match;
2215 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
2216 struct ice_flow_fld_info *info;
2217 const u64 bit = BIT_ULL(j);
2222 info = &seg->fields[j];
2224 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2225 ice_flow_acl_frmt_entry_range(j, info,
2229 ice_flow_acl_frmt_entry_fld(j, info, buf,
2235 for (j = 0; j < seg->raws_cnt; j++) {
2236 struct ice_flow_fld_info *info = &seg->raws[j].info;
2237 u16 dst, src, mask, k;
2238 bool use_mask = false;
2240 src = info->src.val;
2241 dst = info->entry.val -
2242 ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2243 mask = info->src.mask;
2245 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2248 for (k = 0; k < info->entry.last; k++, dst++) {
2249 buf[dst] = data[src++];
2251 dontcare[dst] = ~data[mask++];
2258 buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2259 dontcare[prof->cfg.scen->pid_idx] = 0;
2261 /* Format the buffer for direction flags */
2262 dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2264 if (prof->dir == ICE_FLOW_RX)
2265 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2268 buf[prof->cfg.scen->rng_chk_idx] = range;
2269 /* Mark any unused range checkers as don't care */
2270 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2271 e->range_buf = range_buf;
2273 ice_free(hw, range_buf);
2276 status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2282 e->entry_sz = buf_sz * 2;
2289 ice_free(hw, dontcare);
2294 if (status && range_buf) {
2295 ice_free(hw, range_buf);
2296 e->range_buf = NULL;
2299 if (status && e->acts) {
2300 ice_free(hw, e->acts);
2305 if (status && cnt_alloc)
2306 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2312 * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2313 * the compared data.
2314 * @prof: pointer to flow profile
2315 * @e: pointer to the comparing flow entry
2316 * @do_chg_action: decide if we want to change the ACL action
2317 * @do_add_entry: decide if we want to add the new ACL entry
2318 * @do_rem_entry: decide if we want to remove the current ACL entry
2320 * Find an ACL scenario entry that matches the compared data. In the same time,
2321 * this function also figure out:
2322 * a/ If we want to change the ACL action
2323 * b/ If we want to add the new ACL entry
2324 * c/ If we want to remove the current ACL entry
2326 static struct ice_flow_entry *
2327 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2328 struct ice_flow_entry *e, bool *do_chg_action,
2329 bool *do_add_entry, bool *do_rem_entry)
2331 struct ice_flow_entry *p, *return_entry = NULL;
2335 * a/ There exists an entry with same matching data, but different
2336 * priority, then we remove this existing ACL entry. Then, we
2337 * will add the new entry to the ACL scenario.
2338 * b/ There exists an entry with same matching data, priority, and
2339 * result action, then we do nothing
2340 * c/ There exists an entry with same matching data, priority, but
2341 * different, action, then do only change the action's entry.
2342 * d/ Else, we add this new entry to the ACL scenario.
2344 *do_chg_action = false;
2345 *do_add_entry = true;
2346 *do_rem_entry = false;
2347 LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2348 if (memcmp(p->entry, e->entry, p->entry_sz))
2351 /* From this point, we have the same matching_data. */
2352 *do_add_entry = false;
2355 if (p->priority != e->priority) {
2356 /* matching data && !priority */
2357 *do_add_entry = true;
2358 *do_rem_entry = true;
2362 /* From this point, we will have matching_data && priority */
2363 if (p->acts_cnt != e->acts_cnt)
2364 *do_chg_action = true;
2365 for (i = 0; i < p->acts_cnt; i++) {
2366 bool found_not_match = false;
2368 for (j = 0; j < e->acts_cnt; j++)
2369 if (memcmp(&p->acts[i], &e->acts[j],
2370 sizeof(struct ice_flow_action))) {
2371 found_not_match = true;
2375 if (found_not_match) {
2376 *do_chg_action = true;
2381 /* (do_chg_action = true) means :
2382 * matching_data && priority && !result_action
2383 * (do_chg_action = false) means :
2384 * matching_data && priority && result_action
2389 return return_entry;
2393 * ice_flow_acl_convert_to_acl_prior - Convert to ACL priority
2396 static enum ice_acl_entry_prior
2397 ice_flow_acl_convert_to_acl_prior(enum ice_flow_priority p)
2399 enum ice_acl_entry_prior acl_prior;
2402 case ICE_FLOW_PRIO_LOW:
2403 acl_prior = ICE_LOW;
2405 case ICE_FLOW_PRIO_NORMAL:
2406 acl_prior = ICE_NORMAL;
2408 case ICE_FLOW_PRIO_HIGH:
2409 acl_prior = ICE_HIGH;
2412 acl_prior = ICE_NORMAL;
2420 * ice_flow_acl_union_rng_chk - Perform union operation between two
2421 * range-range checker buffers
2422 * @dst_buf: pointer to destination range checker buffer
2423 * @src_buf: pointer to source range checker buffer
2425 * For this function, we do the union between dst_buf and src_buf
2426 * range checker buffer, and we will save the result back to dst_buf
2428 static enum ice_status
2429 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2430 struct ice_aqc_acl_profile_ranges *src_buf)
2434 if (!dst_buf || !src_buf)
2435 return ICE_ERR_BAD_PTR;
2437 for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2438 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2439 bool will_populate = false;
2441 in_data = &src_buf->checker_cfg[i];
2446 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2447 cfg_data = &dst_buf->checker_cfg[j];
2449 if (!cfg_data->mask ||
2450 !memcmp(cfg_data, in_data,
2451 sizeof(struct ice_acl_rng_data))) {
2452 will_populate = true;
2457 if (will_populate) {
2458 ice_memcpy(cfg_data, in_data,
2459 sizeof(struct ice_acl_rng_data),
2460 ICE_NONDMA_TO_NONDMA);
2462 /* No available slot left to program range checker */
2463 return ICE_ERR_MAX_LIMIT;
2471 * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2472 * @hw: pointer to the hardware structure
2473 * @prof: pointer to flow profile
2474 * @entry: double pointer to the flow entry
2476 * For this function, we will look at the current added entries in the
2477 * corresponding ACL scenario. Then, we will perform matching logic to
2478 * see if we want to add/modify/do nothing with this new entry.
2480 static enum ice_status
2481 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2482 struct ice_flow_entry **entry)
2484 bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2485 struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2486 struct ice_acl_act_entry *acts = NULL;
2487 struct ice_flow_entry *exist;
2488 enum ice_status status = ICE_SUCCESS;
2489 struct ice_flow_entry *e;
2492 if (!entry || !(*entry) || !prof)
2493 return ICE_ERR_BAD_PTR;
2497 do_chg_rng_chk = false;
2501 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2506 /* Query the current range-checker value in FW */
2507 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2511 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2512 sizeof(struct ice_aqc_acl_profile_ranges),
2513 ICE_NONDMA_TO_NONDMA);
2515 /* Generate the new range-checker value */
2516 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2520 /* Reconfigure the range check if the buffer is changed. */
2521 do_chg_rng_chk = false;
2522 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2523 sizeof(struct ice_aqc_acl_profile_ranges))) {
2524 status = ice_prog_acl_prof_ranges(hw, prof_id,
2525 &cfg_rng_buf, NULL);
2529 do_chg_rng_chk = true;
2533 /* Figure out if we want to (change the ACL action) and/or
2534 * (Add the new ACL entry) and/or (Remove the current ACL entry)
2536 exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
2537 &do_add_entry, &do_rem_entry);
2540 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
2545 /* Prepare the result action buffer */
2546 acts = (struct ice_acl_act_entry *)ice_calloc
2547 (hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
2548 for (i = 0; i < e->acts_cnt; i++)
2549 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
2550 sizeof(struct ice_acl_act_entry),
2551 ICE_NONDMA_TO_NONDMA);
2554 enum ice_acl_entry_prior prior;
2558 keys = (u8 *)e->entry;
2559 inverts = keys + (e->entry_sz / 2);
2560 prior = ice_flow_acl_convert_to_acl_prior(e->priority);
2562 status = ice_acl_add_entry(hw, prof->cfg.scen, prior, keys,
2563 inverts, acts, e->acts_cnt,
2568 e->scen_entry_idx = entry_idx;
2569 LIST_ADD(&e->l_entry, &prof->entries);
2571 if (do_chg_action) {
2572 /* For the action memory info, update the SW's copy of
2573 * exist entry with e's action memory info
2575 ice_free(hw, exist->acts);
2576 exist->acts_cnt = e->acts_cnt;
2577 exist->acts = (struct ice_flow_action *)
2578 ice_calloc(hw, exist->acts_cnt,
2579 sizeof(struct ice_flow_action));
2582 status = ICE_ERR_NO_MEMORY;
2586 ice_memcpy(exist->acts, e->acts,
2587 sizeof(struct ice_flow_action) * e->acts_cnt,
2588 ICE_NONDMA_TO_NONDMA);
2590 status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
2592 exist->scen_entry_idx);
2597 if (do_chg_rng_chk) {
2598 /* In this case, we want to update the range checker
2599 * information of the exist entry
2601 status = ice_flow_acl_union_rng_chk(exist->range_buf,
2607 /* As we don't add the new entry to our SW DB, deallocate its
2608 * memories, and return the exist entry to the caller
2610 ice_dealloc_flow_entry(hw, e);
2621 * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
2622 * @hw: pointer to the hardware structure
2623 * @prof: pointer to flow profile
2624 * @e: double pointer to the flow entry
2626 static enum ice_status
2627 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2628 struct ice_flow_entry **e)
2630 enum ice_status status;
2632 ice_acquire_lock(&prof->entries_lock);
2633 status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
2634 ice_release_lock(&prof->entries_lock);
2640 * ice_flow_add_entry - Add a flow entry
2641 * @hw: pointer to the HW struct
2642 * @blk: classification stage
2643 * @prof_id: ID of the profile to add a new flow entry to
2644 * @entry_id: unique ID to identify this flow entry
2645 * @vsi_handle: software VSI handle for the flow entry
2646 * @prio: priority of the flow entry
2647 * @data: pointer to a data buffer containing flow entry's match values/masks
2648 * @acts: arrays of actions to be performed on a match
2649 * @acts_cnt: number of actions
2650 * @entry_h: pointer to buffer that receives the new flow entry's handle
2653 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2654 u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
2655 void *data, struct ice_flow_action *acts, u8 acts_cnt,
2658 struct ice_flow_entry *e = NULL;
2659 struct ice_flow_prof *prof;
2660 enum ice_status status = ICE_SUCCESS;
2662 /* ACL entries must indicate an action */
2663 if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
2664 return ICE_ERR_PARAM;
2666 /* No flow entry data is expected for RSS */
2667 if (!entry_h || (!data && blk != ICE_BLK_RSS))
2668 return ICE_ERR_BAD_PTR;
2670 if (!ice_is_vsi_valid(hw, vsi_handle))
2671 return ICE_ERR_PARAM;
2673 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2675 prof = ice_flow_find_prof_id(hw, blk, prof_id);
2677 status = ICE_ERR_DOES_NOT_EXIST;
2679 /* Allocate memory for the entry being added and associate
2680 * the VSI to the found flow profile
2682 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
2684 status = ICE_ERR_NO_MEMORY;
2686 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
2689 ice_release_lock(&hw->fl_profs_locks[blk]);
2694 e->vsi_handle = vsi_handle;
2700 /* RSS will add only one entry per VSI per profile */
2703 /* ACL will handle the entry management */
2704 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
2709 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
2719 status = ICE_ERR_NOT_IMPL;
2723 if (blk != ICE_BLK_ACL) {
2724 /* ACL will handle the entry management */
2725 ice_acquire_lock(&prof->entries_lock);
2726 LIST_ADD(&e->l_entry, &prof->entries);
2727 ice_release_lock(&prof->entries_lock);
2730 *entry_h = ICE_FLOW_ENTRY_HNDL(e);
2735 ice_free(hw, e->entry);
2743 * ice_flow_rem_entry - Remove a flow entry
2744 * @hw: pointer to the HW struct
2745 * @blk: classification stage
2746 * @entry_h: handle to the flow entry to be removed
2748 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
2751 struct ice_flow_entry *entry;
2752 struct ice_flow_prof *prof;
2753 enum ice_status status = ICE_SUCCESS;
2755 if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
2756 return ICE_ERR_PARAM;
2758 entry = ICE_FLOW_ENTRY_PTR((unsigned long)entry_h);
2760 /* Retain the pointer to the flow profile as the entry will be freed */
2764 ice_acquire_lock(&prof->entries_lock);
2765 status = ice_flow_rem_entry_sync(hw, blk, entry);
2766 ice_release_lock(&prof->entries_lock);
2773 * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
2774 * @seg: packet segment the field being set belongs to
2775 * @fld: field to be set
2776 * @field_type: type of the field
2777 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
2778 * entry's input buffer
2779 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
2781 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
2782 * entry's input buffer
2784 * This helper function stores information of a field being matched, including
2785 * the type of the field and the locations of the value to match, the mask, and
2786 * and the upper-bound value in the start of the input buffer for a flow entry.
2787 * This function should only be used for fixed-size data structures.
2789 * This function also opportunistically determines the protocol headers to be
2790 * present based on the fields being set. Some fields cannot be used alone to
2791 * determine the protocol headers present. Sometimes, fields for particular
2792 * protocol headers are not matched. In those cases, the protocol headers
2793 * must be explicitly set.
2796 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
2797 enum ice_flow_fld_match_type field_type, u16 val_loc,
2798 u16 mask_loc, u16 last_loc)
2800 u64 bit = BIT_ULL(fld);
2803 if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
2806 seg->fields[fld].type = field_type;
2807 seg->fields[fld].src.val = val_loc;
2808 seg->fields[fld].src.mask = mask_loc;
2809 seg->fields[fld].src.last = last_loc;
2811 ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
2815 * ice_flow_set_fld - specifies locations of field from entry's input buffer
2816 * @seg: packet segment the field being set belongs to
2817 * @fld: field to be set
2818 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
2819 * entry's input buffer
2820 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
2822 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
2823 * entry's input buffer
2824 * @range: indicate if field being matched is to be in a range
2826 * This function specifies the locations, in the form of byte offsets from the
2827 * start of the input buffer for a flow entry, from where the value to match,
2828 * the mask value, and upper value can be extracted. These locations are then
2829 * stored in the flow profile. When adding a flow entry associated with the
2830 * flow profile, these locations will be used to quickly extract the values and
2831 * create the content of a match entry. This function should only be used for
2832 * fixed-size data structures.
2835 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
2836 u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
2838 enum ice_flow_fld_match_type t = range ?
2839 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
2841 ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
2845 * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
2846 * @seg: packet segment the field being set belongs to
2847 * @fld: field to be set
2848 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
2849 * entry's input buffer
2850 * @pref_loc: location of prefix value from entry's input buffer
2851 * @pref_sz: size of the location holding the prefix value
2853 * This function specifies the locations, in the form of byte offsets from the
2854 * start of the input buffer for a flow entry, from where the value to match
2855 * and the IPv4 prefix value can be extracted. These locations are then stored
2856 * in the flow profile. When adding flow entries to the associated flow profile,
2857 * these locations can be used to quickly extract the values to create the
2858 * content of a match entry. This function should only be used for fixed-size
2862 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
2863 u16 val_loc, u16 pref_loc, u8 pref_sz)
2865 /* For this type of field, the "mask" location is for the prefix value's
2866 * location and the "last" location is for the size of the location of
2869 ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
2870 pref_loc, (u16)pref_sz);
2874 * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
2875 * @seg: packet segment the field being set belongs to
2876 * @off: offset of the raw field from the beginning of the segment in bytes
2877 * @len: length of the raw pattern to be matched
2878 * @val_loc: location of the value to match from entry's input buffer
2879 * @mask_loc: location of mask value from entry's input buffer
2881 * This function specifies the offset of the raw field to be match from the
2882 * beginning of the specified packet segment, and the locations, in the form of
2883 * byte offsets from the start of the input buffer for a flow entry, from where
2884 * the value to match and the mask value to be extracted. These locations are
2885 * then stored in the flow profile. When adding flow entries to the associated
2886 * flow profile, these locations can be used to quickly extract the values to
2887 * create the content of a match entry. This function should only be used for
2888 * fixed-size data structures.
2891 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
2892 u16 val_loc, u16 mask_loc)
2894 if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
2895 seg->raws[seg->raws_cnt].off = off;
2896 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
2897 seg->raws[seg->raws_cnt].info.src.val = val_loc;
2898 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
2899 /* The "last" field is used to store the length of the field */
2900 seg->raws[seg->raws_cnt].info.src.last = len;
2903 /* Overflows of "raws" will be handled as an error condition later in
2904 * the flow when this information is processed.
2909 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
2910 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
2912 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
2913 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
2915 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
2916 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
2917 ICE_FLOW_SEG_HDR_SCTP)
2919 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
2920 (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
2921 ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
2922 ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
2925 * ice_flow_set_rss_seg_info - setup packet segments for RSS
2926 * @segs: pointer to the flow field segment(s)
2927 * @hash_fields: fields to be hashed on for the segment(s)
2928 * @flow_hdr: protocol header fields within a packet segment
2930 * Helper function to extract fields from hash bitmap and use flow
2931 * header value to set flow field segment for further use in flow
2932 * profile entry or removal.
2934 static enum ice_status
2935 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
2938 u64 val = hash_fields;
2941 for (i = 0; val && i < ICE_FLOW_FIELD_IDX_MAX; i++) {
2942 u64 bit = BIT_ULL(i);
2945 ice_flow_set_fld(segs, (enum ice_flow_field)i,
2946 ICE_FLOW_FLD_OFF_INVAL,
2947 ICE_FLOW_FLD_OFF_INVAL,
2948 ICE_FLOW_FLD_OFF_INVAL, false);
2952 ICE_FLOW_SET_HDRS(segs, flow_hdr);
2954 if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
2955 ~ICE_FLOW_RSS_HDRS_INNER_MASK)
2956 return ICE_ERR_PARAM;
2958 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
2959 if (val && !ice_is_pow2(val))
2962 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
2963 if (val && !ice_is_pow2(val))
2970 * ice_rem_vsi_rss_list - remove VSI from RSS list
2971 * @hw: pointer to the hardware structure
2972 * @vsi_handle: software VSI handle
2974 * Remove the VSI from all RSS configurations in the list.
2976 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
2978 struct ice_rss_cfg *r, *tmp;
2980 if (LIST_EMPTY(&hw->rss_list_head))
2983 ice_acquire_lock(&hw->rss_locks);
2984 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
2985 ice_rss_cfg, l_entry) {
2986 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
2987 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
2988 LIST_DEL(&r->l_entry);
2992 ice_release_lock(&hw->rss_locks);
2996 * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
2997 * @hw: pointer to the hardware structure
2998 * @vsi_handle: software VSI handle
3000 * This function will iterate through all flow profiles and disassociate
3001 * the VSI from that profile. If the flow profile has no VSIs it will
3004 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3006 const enum ice_block blk = ICE_BLK_RSS;
3007 struct ice_flow_prof *p, *t;
3008 enum ice_status status = ICE_SUCCESS;
3010 if (!ice_is_vsi_valid(hw, vsi_handle))
3011 return ICE_ERR_PARAM;
3013 if (LIST_EMPTY(&hw->fl_profs[blk]))
3016 ice_acquire_lock(&hw->fl_profs_locks[blk]);
3017 LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3019 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3020 status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3024 if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3025 status = ice_flow_rem_prof_sync(hw, blk, p);
3031 ice_release_lock(&hw->fl_profs_locks[blk]);
3037 * ice_rem_rss_list - remove RSS configuration from list
3038 * @hw: pointer to the hardware structure
3039 * @vsi_handle: software VSI handle
3040 * @prof: pointer to flow profile
3042 * Assumption: lock has already been acquired for RSS list
3045 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3047 struct ice_rss_cfg *r, *tmp;
3049 /* Search for RSS hash fields associated to the VSI that match the
3050 * hash configurations associated to the flow profile. If found
3051 * remove from the RSS entry list of the VSI context and delete entry.
3053 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3054 ice_rss_cfg, l_entry) {
3055 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3056 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3057 ice_clear_bit(vsi_handle, r->vsis);
3058 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3059 LIST_DEL(&r->l_entry);
3068 * ice_add_rss_list - add RSS configuration to list
3069 * @hw: pointer to the hardware structure
3070 * @vsi_handle: software VSI handle
3071 * @prof: pointer to flow profile
3073 * Assumption: lock has already been acquired for RSS list
3075 static enum ice_status
3076 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3078 struct ice_rss_cfg *r, *rss_cfg;
3080 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3081 ice_rss_cfg, l_entry)
3082 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3083 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3084 ice_set_bit(vsi_handle, r->vsis);
3088 rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3090 return ICE_ERR_NO_MEMORY;
3092 rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
3093 rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
3094 rss_cfg->symm = prof->cfg.symm;
3095 ice_set_bit(vsi_handle, rss_cfg->vsis);
3097 LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3102 #define ICE_FLOW_PROF_HASH_S 0
3103 #define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3104 #define ICE_FLOW_PROF_HDR_S 32
3105 #define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3106 #define ICE_FLOW_PROF_ENCAP_S 63
3107 #define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
3109 #define ICE_RSS_OUTER_HEADERS 1
3110 #define ICE_RSS_INNER_HEADERS 2
3112 /* Flow profile ID format:
3113 * [0:31] - Packet match fields
3114 * [32:62] - Protocol header
3115 * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
3117 #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
3118 (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3119 (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3120 ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
3123 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3125 u32 s = ((src % 4) << 3); /* byte shift */
3126 u32 v = dst | 0x80; /* value to program */
3127 u8 i = src / 4; /* register index */
3130 reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3131 reg = (reg & ~(0xff << s)) | (v << s);
3132 wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3136 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3139 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3142 for (i = 0; i < len; i++) {
3143 ice_rss_config_xor_word(hw, prof_id,
3144 /* Yes, field vector in GLQF_HSYMM and
3145 * GLQF_HINSET is inversed!
3147 fv_last_word - (src + i),
3148 fv_last_word - (dst + i));
3149 ice_rss_config_xor_word(hw, prof_id,
3150 fv_last_word - (dst + i),
3151 fv_last_word - (src + i));
3156 ice_rss_update_symm(struct ice_hw *hw,
3157 struct ice_flow_prof *prof)
3159 struct ice_prof_map *map;
3162 map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3163 prof_id = map->prof_id;
3165 /* clear to default */
3166 for (m = 0; m < 6; m++)
3167 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3168 if (prof->cfg.symm) {
3169 struct ice_flow_seg_info *seg =
3170 &prof->segs[prof->segs_cnt - 1];
3172 struct ice_flow_seg_xtrct *ipv4_src =
3173 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3174 struct ice_flow_seg_xtrct *ipv4_dst =
3175 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3176 struct ice_flow_seg_xtrct *ipv6_src =
3177 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3178 struct ice_flow_seg_xtrct *ipv6_dst =
3179 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3181 struct ice_flow_seg_xtrct *tcp_src =
3182 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3183 struct ice_flow_seg_xtrct *tcp_dst =
3184 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3186 struct ice_flow_seg_xtrct *udp_src =
3187 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3188 struct ice_flow_seg_xtrct *udp_dst =
3189 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3191 struct ice_flow_seg_xtrct *sctp_src =
3192 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3193 struct ice_flow_seg_xtrct *sctp_dst =
3194 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3197 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3198 ice_rss_config_xor(hw, prof_id,
3199 ipv4_src->idx, ipv4_dst->idx, 2);
3202 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3203 ice_rss_config_xor(hw, prof_id,
3204 ipv6_src->idx, ipv6_dst->idx, 8);
3207 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3208 ice_rss_config_xor(hw, prof_id,
3209 tcp_src->idx, tcp_dst->idx, 1);
3212 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3213 ice_rss_config_xor(hw, prof_id,
3214 udp_src->idx, udp_dst->idx, 1);
3217 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3218 ice_rss_config_xor(hw, prof_id,
3219 sctp_src->idx, sctp_dst->idx, 1);
3224 * ice_add_rss_cfg_sync - add an RSS configuration
3225 * @hw: pointer to the hardware structure
3226 * @vsi_handle: software VSI handle
3227 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3228 * @addl_hdrs: protocol header fields
3229 * @segs_cnt: packet segment count
3230 * @symm: symmetric hash enable/disable
3232 * Assumption: lock has already been acquired for RSS list
3234 static enum ice_status
3235 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3236 u32 addl_hdrs, u8 segs_cnt, bool symm)
3238 const enum ice_block blk = ICE_BLK_RSS;
3239 struct ice_flow_prof *prof = NULL;
3240 struct ice_flow_seg_info *segs;
3241 enum ice_status status;
3243 if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
3244 return ICE_ERR_PARAM;
3246 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3249 return ICE_ERR_NO_MEMORY;
3251 /* Construct the packet segment info from the hashed fields */
3252 status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3257 /* Search for a flow profile that has matching headers, hash fields
3258 * and has the input VSI associated to it. If found, no further
3259 * operations required and exit.
3261 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3263 ICE_FLOW_FIND_PROF_CHK_FLDS |
3264 ICE_FLOW_FIND_PROF_CHK_VSI);
3266 if (prof->cfg.symm == symm)
3268 prof->cfg.symm = symm;
3272 /* Check if a flow profile exists with the same protocol headers and
3273 * associated with the input VSI. If so disasscociate the VSI from
3274 * this profile. The VSI will be added to a new profile created with
3275 * the protocol header and new hash field configuration.
3277 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3278 vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3280 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3282 ice_rem_rss_list(hw, vsi_handle, prof);
3286 /* Remove profile if it has no VSIs associated */
3287 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3288 status = ice_flow_rem_prof(hw, blk, prof->id);
3294 /* Search for a profile that has same match fields only. If this
3295 * exists then associate the VSI to this profile.
3297 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3299 ICE_FLOW_FIND_PROF_CHK_FLDS);
3301 if (prof->cfg.symm == symm) {
3302 status = ice_flow_assoc_prof(hw, blk, prof,
3305 status = ice_add_rss_list(hw, vsi_handle,
3308 /* if a profile exist but with different symmetric
3309 * requirement, just return error.
3311 status = ICE_ERR_NOT_SUPPORTED;
3316 /* Create a new flow profile with generated profile and packet
3317 * segment information.
3319 status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3320 ICE_FLOW_GEN_PROFID(hashed_flds,
3321 segs[segs_cnt - 1].hdrs,
3323 segs, segs_cnt, NULL, 0, &prof);
3327 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3328 /* If association to a new flow profile failed then this profile can
3332 ice_flow_rem_prof(hw, blk, prof->id);
3336 status = ice_add_rss_list(hw, vsi_handle, prof);
3338 prof->cfg.symm = symm;
3341 ice_rss_update_symm(hw, prof);
3349 * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3350 * @hw: pointer to the hardware structure
3351 * @vsi_handle: software VSI handle
3352 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3353 * @addl_hdrs: protocol header fields
3354 * @symm: symmetric hash enable/disable
3356 * This function will generate a flow profile based on fields associated with
3357 * the input fields to hash on, the flow type and use the VSI number to add
3358 * a flow entry to the profile.
3361 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3362 u32 addl_hdrs, bool symm)
3364 enum ice_status status;
3366 if (hashed_flds == ICE_HASH_INVALID ||
3367 !ice_is_vsi_valid(hw, vsi_handle))
3368 return ICE_ERR_PARAM;
3370 ice_acquire_lock(&hw->rss_locks);
3371 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3372 ICE_RSS_OUTER_HEADERS, symm);
3374 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3375 addl_hdrs, ICE_RSS_INNER_HEADERS,
3377 ice_release_lock(&hw->rss_locks);
3383 * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3384 * @hw: pointer to the hardware structure
3385 * @vsi_handle: software VSI handle
3386 * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3387 * @addl_hdrs: Protocol header fields within a packet segment
3388 * @segs_cnt: packet segment count
3390 * Assumption: lock has already been acquired for RSS list
3392 static enum ice_status
3393 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3394 u32 addl_hdrs, u8 segs_cnt)
3396 const enum ice_block blk = ICE_BLK_RSS;
3397 struct ice_flow_seg_info *segs;
3398 struct ice_flow_prof *prof;
3399 enum ice_status status;
3401 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3404 return ICE_ERR_NO_MEMORY;
3406 /* Construct the packet segment info from the hashed fields */
3407 status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3412 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3414 ICE_FLOW_FIND_PROF_CHK_FLDS);
3416 status = ICE_ERR_DOES_NOT_EXIST;
3420 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3424 /* Remove RSS configuration from VSI context before deleting
3427 ice_rem_rss_list(hw, vsi_handle, prof);
3429 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
3430 status = ice_flow_rem_prof(hw, blk, prof->id);
3438 * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
3439 * @hw: pointer to the hardware structure
3440 * @vsi_handle: software VSI handle
3441 * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3442 * @addl_hdrs: Protocol header fields within a packet segment
3444 * This function will lookup the flow profile based on the input
3445 * hash field bitmap, iterate through the profile entry list of
3446 * that profile and find entry associated with input VSI to be
3447 * removed. Calls are made to underlying flow apis which will in
3448 * turn build or update buffers for RSS XLT1 section.
3451 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3454 enum ice_status status;
3456 if (hashed_flds == ICE_HASH_INVALID ||
3457 !ice_is_vsi_valid(hw, vsi_handle))
3458 return ICE_ERR_PARAM;
3460 ice_acquire_lock(&hw->rss_locks);
3461 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3462 ICE_RSS_OUTER_HEADERS);
3464 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3465 addl_hdrs, ICE_RSS_INNER_HEADERS);
3466 ice_release_lock(&hw->rss_locks);
3472 * ice_replay_rss_cfg - replay RSS configurations associated with VSI
3473 * @hw: pointer to the hardware structure
3474 * @vsi_handle: software VSI handle
3476 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3478 enum ice_status status = ICE_SUCCESS;
3479 struct ice_rss_cfg *r;
3481 if (!ice_is_vsi_valid(hw, vsi_handle))
3482 return ICE_ERR_PARAM;
3484 ice_acquire_lock(&hw->rss_locks);
3485 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3486 ice_rss_cfg, l_entry) {
3487 if (ice_is_bit_set(r->vsis, vsi_handle)) {
3488 status = ice_add_rss_cfg_sync(hw, vsi_handle,
3491 ICE_RSS_OUTER_HEADERS,
3495 status = ice_add_rss_cfg_sync(hw, vsi_handle,
3498 ICE_RSS_INNER_HEADERS,
3504 ice_release_lock(&hw->rss_locks);
3510 * ice_get_rss_cfg - returns hashed fields for the given header types
3511 * @hw: pointer to the hardware structure
3512 * @vsi_handle: software VSI handle
3513 * @hdrs: protocol header type
3515 * This function will return the match fields of the first instance of flow
3516 * profile having the given header types and containing input VSI
3518 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
3520 struct ice_rss_cfg *r, *rss_cfg = NULL;
3522 /* verify if the protocol header is non zero and VSI is valid */
3523 if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
3524 return ICE_HASH_INVALID;
3526 ice_acquire_lock(&hw->rss_locks);
3527 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3528 ice_rss_cfg, l_entry)
3529 if (ice_is_bit_set(r->vsis, vsi_handle) &&
3530 r->packet_hdr == hdrs) {
3534 ice_release_lock(&hw->rss_locks);
3536 return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID;