1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2019
5 #include "ice_common.h"
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE 2
10 #define ICE_FLOW_FLD_SZ_VLAN 2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR 4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR 16
13 #define ICE_FLOW_FLD_SZ_IP_DSCP 1
14 #define ICE_FLOW_FLD_SZ_IP_TTL 1
15 #define ICE_FLOW_FLD_SZ_IP_PROT 1
16 #define ICE_FLOW_FLD_SZ_PORT 2
17 #define ICE_FLOW_FLD_SZ_TCP_FLAGS 1
18 #define ICE_FLOW_FLD_SZ_ICMP_TYPE 1
19 #define ICE_FLOW_FLD_SZ_ICMP_CODE 1
20 #define ICE_FLOW_FLD_SZ_ARP_OPER 2
21 #define ICE_FLOW_FLD_SZ_GRE_KEYID 4
22 #define ICE_FLOW_FLD_SZ_GTP_TEID 4
23 #define ICE_FLOW_FLD_SZ_GTP_QFI 2
24 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID 2
26 /* Describe properties of a protocol header field */
27 struct ice_flow_field_info {
28 enum ice_flow_seg_hdr hdr;
29 s16 off; /* Offset from start of a protocol header, in bits */
30 u16 size; /* Size of fields in bits */
31 u16 mask; /* 16-bit mask for field */
34 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
36 .off = (_offset_bytes) * BITS_PER_BYTE, \
37 .size = (_size_bytes) * BITS_PER_BYTE, \
41 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
43 .off = (_offset_bytes) * BITS_PER_BYTE, \
44 .size = (_size_bytes) * BITS_PER_BYTE, \
48 /* Table containing properties of supported protocol header fields */
50 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
52 /* ICE_FLOW_FIELD_IDX_ETH_DA */
53 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
54 /* ICE_FLOW_FIELD_IDX_ETH_SA */
55 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
56 /* ICE_FLOW_FIELD_IDX_S_VLAN */
57 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
58 /* ICE_FLOW_FIELD_IDX_C_VLAN */
59 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
60 /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
61 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 12, ICE_FLOW_FLD_SZ_ETH_TYPE),
63 /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
64 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
66 /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
67 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
69 /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
70 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
71 ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
72 /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
73 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
74 ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
75 /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
76 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
77 ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
78 /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
79 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
80 ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
81 /* ICE_FLOW_FIELD_IDX_IPV4_SA */
82 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
83 /* ICE_FLOW_FIELD_IDX_IPV4_DA */
84 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
85 /* ICE_FLOW_FIELD_IDX_IPV6_SA */
86 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
87 /* ICE_FLOW_FIELD_IDX_IPV6_DA */
88 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
90 /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
91 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
92 /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
93 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
94 /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
95 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
96 /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
97 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
98 /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
99 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
100 /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
101 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
102 /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
103 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
105 /* ICE_FLOW_FIELD_IDX_ARP_SIP */
106 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
107 /* ICE_FLOW_FIELD_IDX_ARP_DIP */
108 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
109 /* ICE_FLOW_FIELD_IDX_ARP_SHA */
110 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
111 /* ICE_FLOW_FIELD_IDX_ARP_DHA */
112 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
113 /* ICE_FLOW_FIELD_IDX_ARP_OP */
114 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
116 /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
117 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
118 /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
119 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
121 /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
122 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
124 /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
125 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
126 ICE_FLOW_FLD_SZ_GTP_TEID),
127 /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
128 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
129 ICE_FLOW_FLD_SZ_GTP_TEID),
130 /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
131 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
132 ICE_FLOW_FLD_SZ_GTP_TEID),
133 /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
134 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
135 ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
136 /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
137 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
138 ICE_FLOW_FLD_SZ_GTP_TEID),
139 /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
140 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
141 ICE_FLOW_FLD_SZ_GTP_TEID),
143 /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
144 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
145 ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
148 /* Bitmaps indicating relevant packet types for a particular protocol header
150 * Packet types for packets with an Outer/First/Single MAC header
152 static const u32 ice_ptypes_mac_ofos[] = {
153 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
154 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
155 0x00000000, 0x03FFF000, 0x7FFFFFE0, 0x00000000,
156 0x00000000, 0x00000000, 0x00000000, 0x00000000,
157 0x00000000, 0x00000000, 0x00000000, 0x00000000,
158 0x00000000, 0x00000000, 0x00000000, 0x00000000,
159 0x00000000, 0x00000000, 0x00000000, 0x00000000,
160 0x00000000, 0x00000000, 0x00000000, 0x00000000,
163 /* Packet types for packets with an Innermost/Last MAC VLAN header */
164 static const u32 ice_ptypes_macvlan_il[] = {
165 0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
166 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
167 0x00000000, 0x00000000, 0x00000000, 0x00000000,
168 0x00000000, 0x00000000, 0x00000000, 0x00000000,
169 0x00000000, 0x00000000, 0x00000000, 0x00000000,
170 0x00000000, 0x00000000, 0x00000000, 0x00000000,
171 0x00000000, 0x00000000, 0x00000000, 0x00000000,
172 0x00000000, 0x00000000, 0x00000000, 0x00000000,
175 /* Packet types for packets with an Outer/First/Single IPv4 header */
176 static const u32 ice_ptypes_ipv4_ofos[] = {
177 0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
178 0x00000000, 0x00000000, 0x00000000, 0x00000000,
179 0x0003000F, 0x000FC000, 0x03E0F800, 0x00000000,
180 0x00000000, 0x00000000, 0x00000000, 0x00000000,
181 0x00000000, 0x00000000, 0x00000000, 0x00000000,
182 0x00000000, 0x00000000, 0x00000000, 0x00000000,
183 0x00000000, 0x00000000, 0x00000000, 0x00000000,
184 0x00000000, 0x00000000, 0x00000000, 0x00000000,
187 /* Packet types for packets with an Innermost/Last IPv4 header */
188 static const u32 ice_ptypes_ipv4_il[] = {
189 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
190 0x0000000E, 0x00000000, 0x00000000, 0x00000000,
191 0x00000000, 0x00000000, 0x001FF800, 0x00000000,
192 0x00000000, 0x00000000, 0x00000000, 0x00000000,
193 0x00000000, 0x00000000, 0x00000000, 0x00000000,
194 0x00000000, 0x00000000, 0x00000000, 0x00000000,
195 0x00000000, 0x00000000, 0x00000000, 0x00000000,
196 0x00000000, 0x00000000, 0x00000000, 0x00000000,
199 /* Packet types for packets with an Outer/First/Single IPv6 header */
200 static const u32 ice_ptypes_ipv6_ofos[] = {
201 0x00000000, 0x00000000, 0x77000000, 0x10002000,
202 0x00000000, 0x00000000, 0x00000000, 0x00000000,
203 0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000000,
204 0x00000000, 0x00000000, 0x00000000, 0x00000000,
205 0x00000000, 0x00000000, 0x00000000, 0x00000000,
206 0x00000000, 0x00000000, 0x00000000, 0x00000000,
207 0x00000000, 0x00000000, 0x00000000, 0x00000000,
208 0x00000000, 0x00000000, 0x00000000, 0x00000000,
211 /* Packet types for packets with an Innermost/Last IPv6 header */
212 static const u32 ice_ptypes_ipv6_il[] = {
213 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
214 0x00000770, 0x00000000, 0x00000000, 0x00000000,
215 0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
216 0x00000000, 0x00000000, 0x00000000, 0x00000000,
217 0x00000000, 0x00000000, 0x00000000, 0x00000000,
218 0x00000000, 0x00000000, 0x00000000, 0x00000000,
219 0x00000000, 0x00000000, 0x00000000, 0x00000000,
220 0x00000000, 0x00000000, 0x00000000, 0x00000000,
223 /* Packet types for packets with an Outermost/First ARP header */
224 static const u32 ice_ptypes_arp_of[] = {
225 0x00000800, 0x00000000, 0x00000000, 0x00000000,
226 0x00000000, 0x00000000, 0x00000000, 0x00000000,
227 0x00000000, 0x00000000, 0x00000000, 0x00000000,
228 0x00000000, 0x00000000, 0x00000000, 0x00000000,
229 0x00000000, 0x00000000, 0x00000000, 0x00000000,
230 0x00000000, 0x00000000, 0x00000000, 0x00000000,
231 0x00000000, 0x00000000, 0x00000000, 0x00000000,
232 0x00000000, 0x00000000, 0x00000000, 0x00000000,
235 /* UDP Packet types for non-tunneled packets or tunneled
236 * packets with inner UDP.
238 static const u32 ice_ptypes_udp_il[] = {
239 0x81000000, 0x20204040, 0x04000010, 0x80810102,
240 0x00000040, 0x00000000, 0x00000000, 0x00000000,
241 0x00000000, 0x00410000, 0x10842000, 0x00000000,
242 0x00000000, 0x00000000, 0x00000000, 0x00000000,
243 0x00000000, 0x00000000, 0x00000000, 0x00000000,
244 0x00000000, 0x00000000, 0x00000000, 0x00000000,
245 0x00000000, 0x00000000, 0x00000000, 0x00000000,
246 0x00000000, 0x00000000, 0x00000000, 0x00000000,
249 /* Packet types for packets with an Innermost/Last TCP header */
250 static const u32 ice_ptypes_tcp_il[] = {
251 0x04000000, 0x80810102, 0x10000040, 0x02040408,
252 0x00000102, 0x00000000, 0x00000000, 0x00000000,
253 0x00000000, 0x00820000, 0x21084000, 0x00000000,
254 0x00000000, 0x00000000, 0x00000000, 0x00000000,
255 0x00000000, 0x00000000, 0x00000000, 0x00000000,
256 0x00000000, 0x00000000, 0x00000000, 0x00000000,
257 0x00000000, 0x00000000, 0x00000000, 0x00000000,
258 0x00000000, 0x00000000, 0x00000000, 0x00000000,
261 /* Packet types for packets with an Innermost/Last SCTP header */
262 static const u32 ice_ptypes_sctp_il[] = {
263 0x08000000, 0x01020204, 0x20000081, 0x04080810,
264 0x00000204, 0x00000000, 0x00000000, 0x00000000,
265 0x00000000, 0x01040000, 0x00000000, 0x00000000,
266 0x00000000, 0x00000000, 0x00000000, 0x00000000,
267 0x00000000, 0x00000000, 0x00000000, 0x00000000,
268 0x00000000, 0x00000000, 0x00000000, 0x00000000,
269 0x00000000, 0x00000000, 0x00000000, 0x00000000,
270 0x00000000, 0x00000000, 0x00000000, 0x00000000,
273 /* Packet types for packets with an Outermost/First ICMP header */
274 static const u32 ice_ptypes_icmp_of[] = {
275 0x10000000, 0x00000000, 0x00000000, 0x00000000,
276 0x00000000, 0x00000000, 0x00000000, 0x00000000,
277 0x00000000, 0x00000000, 0x00000000, 0x00000000,
278 0x00000000, 0x00000000, 0x00000000, 0x00000000,
279 0x00000000, 0x00000000, 0x00000000, 0x00000000,
280 0x00000000, 0x00000000, 0x00000000, 0x00000000,
281 0x00000000, 0x00000000, 0x00000000, 0x00000000,
282 0x00000000, 0x00000000, 0x00000000, 0x00000000,
285 /* Packet types for packets with an Innermost/Last ICMP header */
286 static const u32 ice_ptypes_icmp_il[] = {
287 0x00000000, 0x02040408, 0x40000102, 0x08101020,
288 0x00000408, 0x00000000, 0x00000000, 0x00000000,
289 0x00000000, 0x00000000, 0x42108000, 0x00000000,
290 0x00000000, 0x00000000, 0x00000000, 0x00000000,
291 0x00000000, 0x00000000, 0x00000000, 0x00000000,
292 0x00000000, 0x00000000, 0x00000000, 0x00000000,
293 0x00000000, 0x00000000, 0x00000000, 0x00000000,
294 0x00000000, 0x00000000, 0x00000000, 0x00000000,
297 /* Packet types for packets with an Outermost/First GRE header */
298 static const u32 ice_ptypes_gre_of[] = {
299 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
300 0x0000017E, 0x00000000, 0x00000000, 0x00000000,
301 0x00000000, 0x00000000, 0x00000000, 0x00000000,
302 0x00000000, 0x00000000, 0x00000000, 0x00000000,
303 0x00000000, 0x00000000, 0x00000000, 0x00000000,
304 0x00000000, 0x00000000, 0x00000000, 0x00000000,
305 0x00000000, 0x00000000, 0x00000000, 0x00000000,
306 0x00000000, 0x00000000, 0x00000000, 0x00000000,
309 /* Packet types for packets with an Innermost/Last MAC header */
310 static const u32 ice_ptypes_mac_il[] = {
311 0x00000000, 0x00000000, 0x00000000, 0x00000000,
312 0x00000000, 0x00000000, 0x00000000, 0x00000000,
313 0x00000000, 0x00000000, 0x00000000, 0x00000000,
314 0x00000000, 0x00000000, 0x00000000, 0x00000000,
315 0x00000000, 0x00000000, 0x00000000, 0x00000000,
316 0x00000000, 0x00000000, 0x00000000, 0x00000000,
317 0x00000000, 0x00000000, 0x00000000, 0x00000000,
318 0x00000000, 0x00000000, 0x00000000, 0x00000000,
321 /* Packet types for GTPC */
322 static const u32 ice_ptypes_gtpc[] = {
323 0x00000000, 0x00000000, 0x00000000, 0x00000000,
324 0x00000000, 0x00000000, 0x00000000, 0x00000000,
325 0x00000000, 0x00000000, 0x00000180, 0x00000000,
326 0x00000000, 0x00000000, 0x00000000, 0x00000000,
327 0x00000000, 0x00000000, 0x00000000, 0x00000000,
328 0x00000000, 0x00000000, 0x00000000, 0x00000000,
329 0x00000000, 0x00000000, 0x00000000, 0x00000000,
330 0x00000000, 0x00000000, 0x00000000, 0x00000000,
333 /* Packet types for GTPC with TEID */
334 static const u32 ice_ptypes_gtpc_tid[] = {
335 0x00000000, 0x00000000, 0x00000000, 0x00000000,
336 0x00000000, 0x00000000, 0x00000000, 0x00000000,
337 0x00000000, 0x00000000, 0x00000060, 0x00000000,
338 0x00000000, 0x00000000, 0x00000000, 0x00000000,
339 0x00000000, 0x00000000, 0x00000000, 0x00000000,
340 0x00000000, 0x00000000, 0x00000000, 0x00000000,
341 0x00000000, 0x00000000, 0x00000000, 0x00000000,
342 0x00000000, 0x00000000, 0x00000000, 0x00000000,
345 /* Packet types for GTPU */
346 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
347 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
348 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
349 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
350 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
351 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
352 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
353 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
354 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
355 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
356 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
357 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
358 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
359 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
360 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
361 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
362 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
363 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
364 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
365 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
366 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
369 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
370 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
371 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
372 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
373 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
374 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
375 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
376 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
377 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
378 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
379 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
380 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
381 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
382 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
383 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
384 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
385 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
386 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
387 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
388 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
389 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
392 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
393 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
394 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
395 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
396 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
397 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
398 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
399 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
400 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
401 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
402 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
403 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
404 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
405 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
406 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
407 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
408 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
409 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
410 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
411 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
412 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
415 static const u32 ice_ptypes_gtpu[] = {
416 0x00000000, 0x00000000, 0x00000000, 0x00000000,
417 0x00000000, 0x00000000, 0x00000000, 0x00000000,
418 0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
419 0x00000000, 0x00000000, 0x00000000, 0x00000000,
420 0x00000000, 0x00000000, 0x00000000, 0x00000000,
421 0x00000000, 0x00000000, 0x00000000, 0x00000000,
422 0x00000000, 0x00000000, 0x00000000, 0x00000000,
423 0x00000000, 0x00000000, 0x00000000, 0x00000000,
426 /* Packet types for pppoe */
427 static const u32 ice_ptypes_pppoe[] = {
428 0x00000000, 0x00000000, 0x00000000, 0x00000000,
429 0x00000000, 0x00000000, 0x00000000, 0x00000000,
430 0x00000000, 0x03FFF000, 0x00000000, 0x00000000,
431 0x00000000, 0x00000000, 0x00000000, 0x00000000,
432 0x00000000, 0x00000000, 0x00000000, 0x00000000,
433 0x00000000, 0x00000000, 0x00000000, 0x00000000,
434 0x00000000, 0x00000000, 0x00000000, 0x00000000,
435 0x00000000, 0x00000000, 0x00000000, 0x00000000,
438 /* Manage parameters and info. used during the creation of a flow profile */
439 struct ice_flow_prof_params {
441 u16 entry_length; /* # of bytes formatted entry will require */
443 struct ice_flow_prof *prof;
445 /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
446 * This will give us the direction flags.
448 struct ice_fv_word es[ICE_MAX_FV_WORDS];
449 /* attributes can be used to add attributes to a particular PTYPE */
450 const struct ice_ptype_attributes *attr;
453 u16 mask[ICE_MAX_FV_WORDS];
454 ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
457 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
458 (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
459 ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU)
461 #define ICE_FLOW_SEG_HDRS_L2_MASK \
462 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
463 #define ICE_FLOW_SEG_HDRS_L3_MASK \
464 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
465 ICE_FLOW_SEG_HDR_ARP)
466 #define ICE_FLOW_SEG_HDRS_L4_MASK \
467 (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
468 ICE_FLOW_SEG_HDR_SCTP)
471 * ice_flow_val_hdrs - validates packet segments for valid protocol headers
472 * @segs: array of one or more packet segments that describe the flow
473 * @segs_cnt: number of packet segments provided
475 static enum ice_status
476 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
480 for (i = 0; i < segs_cnt; i++) {
481 /* Multiple L3 headers */
482 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
483 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
484 return ICE_ERR_PARAM;
486 /* Multiple L4 headers */
487 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
488 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
489 return ICE_ERR_PARAM;
495 /* Sizes of fixed known protocol headers without header options */
496 #define ICE_FLOW_PROT_HDR_SZ_MAC 14
497 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
498 #define ICE_FLOW_PROT_HDR_SZ_IPV4 20
499 #define ICE_FLOW_PROT_HDR_SZ_IPV6 40
500 #define ICE_FLOW_PROT_HDR_SZ_ARP 28
501 #define ICE_FLOW_PROT_HDR_SZ_ICMP 8
502 #define ICE_FLOW_PROT_HDR_SZ_TCP 20
503 #define ICE_FLOW_PROT_HDR_SZ_UDP 8
504 #define ICE_FLOW_PROT_HDR_SZ_SCTP 12
507 * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
508 * @params: information about the flow to be processed
509 * @seg: index of packet segment whose header size is to be determined
511 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
516 sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
517 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
520 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
521 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
522 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
523 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
524 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
525 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
526 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
527 /* A L3 header is required if L4 is specified */
531 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
532 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
533 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
534 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
535 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
536 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
537 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
538 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
544 * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
545 * @params: information about the flow to be processed
547 * This function identifies the packet types associated with the protocol
548 * headers being present in packet segments of the specified flow profile.
550 static enum ice_status
551 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
553 struct ice_flow_prof *prof;
556 ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
561 for (i = 0; i < params->prof->segs_cnt; i++) {
562 const ice_bitmap_t *src;
565 hdrs = prof->segs[i].hdrs;
567 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
568 src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
569 (const ice_bitmap_t *)ice_ptypes_mac_il;
570 ice_and_bitmap(params->ptypes, params->ptypes, src,
574 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
575 src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
576 ice_and_bitmap(params->ptypes, params->ptypes, src,
580 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
581 ice_and_bitmap(params->ptypes, params->ptypes,
582 (const ice_bitmap_t *)ice_ptypes_arp_of,
586 if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
587 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
588 ice_and_bitmap(params->ptypes, params->ptypes, src,
592 if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
593 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
594 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
595 ice_and_bitmap(params->ptypes, params->ptypes, src,
597 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
598 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
599 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
600 ice_and_bitmap(params->ptypes, params->ptypes, src,
604 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
605 src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
606 (const ice_bitmap_t *)ice_ptypes_icmp_il;
607 ice_and_bitmap(params->ptypes, params->ptypes, src,
609 } else if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
610 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
611 ice_and_bitmap(params->ptypes, params->ptypes, src,
613 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
614 ice_and_bitmap(params->ptypes, params->ptypes,
615 (const ice_bitmap_t *)ice_ptypes_tcp_il,
617 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
618 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
619 ice_and_bitmap(params->ptypes, params->ptypes, src,
621 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
623 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
624 ice_and_bitmap(params->ptypes, params->ptypes,
625 src, ICE_FLOW_PTYPE_MAX);
627 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
628 src = (const ice_bitmap_t *)ice_ptypes_gtpc;
629 ice_and_bitmap(params->ptypes, params->ptypes,
630 src, ICE_FLOW_PTYPE_MAX);
631 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
632 src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
633 ice_and_bitmap(params->ptypes, params->ptypes,
634 src, ICE_FLOW_PTYPE_MAX);
635 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
636 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
637 ice_and_bitmap(params->ptypes, params->ptypes,
638 src, ICE_FLOW_PTYPE_MAX);
640 /* Attributes for GTP packet with downlink */
641 params->attr = ice_attr_gtpu_down;
642 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
643 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
644 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
645 ice_and_bitmap(params->ptypes, params->ptypes,
646 src, ICE_FLOW_PTYPE_MAX);
648 /* Attributes for GTP packet with uplink */
649 params->attr = ice_attr_gtpu_up;
650 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
651 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
652 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
653 ice_and_bitmap(params->ptypes, params->ptypes,
654 src, ICE_FLOW_PTYPE_MAX);
656 /* Attributes for GTP packet with Extension Header */
657 params->attr = ice_attr_gtpu_eh;
658 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
659 } else if ((hdrs & ICE_FLOW_SEG_HDR_GTPU) ==
660 ICE_FLOW_SEG_HDR_GTPU) {
661 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
662 ice_and_bitmap(params->ptypes, params->ptypes,
663 src, ICE_FLOW_PTYPE_MAX);
671 * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
672 * @hw: pointer to the HW struct
673 * @params: information about the flow to be processed
674 * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
676 * This function will allocate an extraction sequence entries for a DWORD size
677 * chunk of the packet flags.
679 static enum ice_status
680 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
681 struct ice_flow_prof_params *params,
682 enum ice_flex_mdid_pkt_flags flags)
684 u8 fv_words = hw->blk[params->blk].es.fvw;
687 /* Make sure the number of extraction sequence entries required does not
688 * exceed the block's capacity.
690 if (params->es_cnt >= fv_words)
691 return ICE_ERR_MAX_LIMIT;
693 /* some blocks require a reversed field vector layout */
694 if (hw->blk[params->blk].es.reverse)
695 idx = fv_words - params->es_cnt - 1;
697 idx = params->es_cnt;
699 params->es[idx].prot_id = ICE_PROT_META_ID;
700 params->es[idx].off = flags;
707 * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
708 * @hw: pointer to the HW struct
709 * @params: information about the flow to be processed
710 * @seg: packet segment index of the field to be extracted
711 * @fld: ID of field to be extracted
712 * @match: bitfield of all fields
714 * This function determines the protocol ID, offset, and size of the given
715 * field. It then allocates one or more extraction sequence entries for the
716 * given field, and fill the entries with protocol ID and offset information.
718 static enum ice_status
719 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
720 u8 seg, enum ice_flow_field fld, u64 match)
722 enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
723 enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
724 u8 fv_words = hw->blk[params->blk].es.fvw;
725 struct ice_flow_fld_info *flds;
726 u16 cnt, ese_bits, i;
732 flds = params->prof->segs[seg].fields;
735 case ICE_FLOW_FIELD_IDX_ETH_DA:
736 case ICE_FLOW_FIELD_IDX_ETH_SA:
737 case ICE_FLOW_FIELD_IDX_S_VLAN:
738 case ICE_FLOW_FIELD_IDX_C_VLAN:
739 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
741 case ICE_FLOW_FIELD_IDX_ETH_TYPE:
742 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
744 case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
745 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
747 case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
748 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
750 case ICE_FLOW_FIELD_IDX_IPV4_TTL:
751 case ICE_FLOW_FIELD_IDX_IPV4_PROT:
752 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
754 /* TTL and PROT share the same extraction seq. entry.
755 * Each is considered a sibling to the other in terms of sharing
756 * the same extraction sequence entry.
758 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
759 sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
760 else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
761 sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
763 /* If the sibling field is also included, that field's
764 * mask needs to be included.
766 if (match & BIT(sib))
767 sib_mask = ice_flds_info[sib].mask;
769 case ICE_FLOW_FIELD_IDX_IPV6_TTL:
770 case ICE_FLOW_FIELD_IDX_IPV6_PROT:
771 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
773 /* TTL and PROT share the same extraction seq. entry.
774 * Each is considered a sibling to the other in terms of sharing
775 * the same extraction sequence entry.
777 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
778 sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
779 else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
780 sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
782 /* If the sibling field is also included, that field's
783 * mask needs to be included.
785 if (match & BIT(sib))
786 sib_mask = ice_flds_info[sib].mask;
788 case ICE_FLOW_FIELD_IDX_IPV4_SA:
789 case ICE_FLOW_FIELD_IDX_IPV4_DA:
790 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
792 case ICE_FLOW_FIELD_IDX_IPV6_SA:
793 case ICE_FLOW_FIELD_IDX_IPV6_DA:
794 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
796 case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
797 case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
798 case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
799 prot_id = ICE_PROT_TCP_IL;
801 case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
802 case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
803 prot_id = ICE_PROT_UDP_IL_OR_S;
805 case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
806 case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
807 prot_id = ICE_PROT_SCTP_IL;
809 case ICE_FLOW_FIELD_IDX_GTPC_TEID:
810 case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
811 case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
812 case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
813 case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
814 case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
815 /* GTP is accessed through UDP OF protocol */
816 prot_id = ICE_PROT_UDP_OF;
818 case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
819 prot_id = ICE_PROT_PPPOE;
821 case ICE_FLOW_FIELD_IDX_ARP_SIP:
822 case ICE_FLOW_FIELD_IDX_ARP_DIP:
823 case ICE_FLOW_FIELD_IDX_ARP_SHA:
824 case ICE_FLOW_FIELD_IDX_ARP_DHA:
825 case ICE_FLOW_FIELD_IDX_ARP_OP:
826 prot_id = ICE_PROT_ARP_OF;
828 case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
829 case ICE_FLOW_FIELD_IDX_ICMP_CODE:
830 /* ICMP type and code share the same extraction seq. entry */
831 prot_id = (params->prof->segs[seg].hdrs &
832 ICE_FLOW_SEG_HDR_IPV4) ?
833 ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
834 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
835 ICE_FLOW_FIELD_IDX_ICMP_CODE :
836 ICE_FLOW_FIELD_IDX_ICMP_TYPE;
838 case ICE_FLOW_FIELD_IDX_GRE_KEYID:
839 prot_id = ICE_PROT_GRE_OF;
842 return ICE_ERR_NOT_IMPL;
845 /* Each extraction sequence entry is a word in size, and extracts a
846 * word-aligned offset from a protocol header.
848 ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
850 flds[fld].xtrct.prot_id = prot_id;
851 flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
852 ICE_FLOW_FV_EXTRACT_SZ;
853 flds[fld].xtrct.disp = (u8)((ice_flds_info[fld].off + adj) % ese_bits);
854 flds[fld].xtrct.idx = params->es_cnt;
855 flds[fld].xtrct.mask = ice_flds_info[fld].mask;
857 /* Adjust the next field-entry index after accommodating the number of
858 * entries this field consumes
860 cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
861 ice_flds_info[fld].size, ese_bits);
863 /* Fill in the extraction sequence entries needed for this field */
864 off = flds[fld].xtrct.off;
865 mask = flds[fld].xtrct.mask;
866 for (i = 0; i < cnt; i++) {
867 /* Only consume an extraction sequence entry if there is no
868 * sibling field associated with this field or the sibling entry
869 * already extracts the word shared with this field.
871 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
872 flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
873 flds[sib].xtrct.off != off) {
876 /* Make sure the number of extraction sequence required
877 * does not exceed the block's capability
879 if (params->es_cnt >= fv_words)
880 return ICE_ERR_MAX_LIMIT;
882 /* some blocks require a reversed field vector layout */
883 if (hw->blk[params->blk].es.reverse)
884 idx = fv_words - params->es_cnt - 1;
886 idx = params->es_cnt;
888 params->es[idx].prot_id = prot_id;
889 params->es[idx].off = off;
890 params->mask[idx] = mask | sib_mask;
894 off += ICE_FLOW_FV_EXTRACT_SZ;
901 * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
902 * @hw: pointer to the HW struct
903 * @params: information about the flow to be processed
904 * @seg: index of packet segment whose raw fields are to be be extracted
906 static enum ice_status
907 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
914 if (!params->prof->segs[seg].raws_cnt)
917 if (params->prof->segs[seg].raws_cnt >
918 ARRAY_SIZE(params->prof->segs[seg].raws))
919 return ICE_ERR_MAX_LIMIT;
921 /* Offsets within the segment headers are not supported */
922 hdrs_sz = ice_flow_calc_seg_sz(params, seg);
924 return ICE_ERR_PARAM;
926 fv_words = hw->blk[params->blk].es.fvw;
928 for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
929 struct ice_flow_seg_fld_raw *raw;
932 raw = ¶ms->prof->segs[seg].raws[i];
934 /* Storing extraction information */
935 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
936 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
937 ICE_FLOW_FV_EXTRACT_SZ;
938 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
940 raw->info.xtrct.idx = params->es_cnt;
942 /* Determine the number of field vector entries this raw field
945 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
946 (raw->info.src.last * BITS_PER_BYTE),
947 (ICE_FLOW_FV_EXTRACT_SZ *
949 off = raw->info.xtrct.off;
950 for (j = 0; j < cnt; j++) {
953 /* Make sure the number of extraction sequence required
954 * does not exceed the block's capability
956 if (params->es_cnt >= hw->blk[params->blk].es.count ||
957 params->es_cnt >= ICE_MAX_FV_WORDS)
958 return ICE_ERR_MAX_LIMIT;
960 /* some blocks require a reversed field vector layout */
961 if (hw->blk[params->blk].es.reverse)
962 idx = fv_words - params->es_cnt - 1;
964 idx = params->es_cnt;
966 params->es[idx].prot_id = raw->info.xtrct.prot_id;
967 params->es[idx].off = off;
969 off += ICE_FLOW_FV_EXTRACT_SZ;
977 * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
978 * @hw: pointer to the HW struct
979 * @params: information about the flow to be processed
981 * This function iterates through all matched fields in the given segments, and
982 * creates an extraction sequence for the fields.
984 static enum ice_status
985 ice_flow_create_xtrct_seq(struct ice_hw *hw,
986 struct ice_flow_prof_params *params)
988 enum ice_status status = ICE_SUCCESS;
991 /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
994 if (params->blk == ICE_BLK_ACL) {
995 status = ice_flow_xtract_pkt_flags(hw, params,
996 ICE_RX_MDID_PKT_FLAGS_15_0);
1001 for (i = 0; i < params->prof->segs_cnt; i++) {
1002 u64 match = params->prof->segs[i].match;
1003 enum ice_flow_field j;
1005 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1006 const u64 bit = BIT_ULL(j);
1009 status = ice_flow_xtract_fld(hw, params, i, j,
1017 /* Process raw matching bytes */
1018 status = ice_flow_xtract_raws(hw, params, i);
1027 * ice_flow_sel_acl_scen - returns the specific scenario
1028 * @hw: pointer to the hardware structure
1029 * @params: information about the flow to be processed
1031 * This function will return the specific scenario based on the
1032 * params passed to it
1034 static enum ice_status
1035 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1037 /* Find the best-fit scenario for the provided match width */
1038 struct ice_acl_scen *cand_scen = NULL, *scen;
1041 return ICE_ERR_DOES_NOT_EXIST;
1043 /* Loop through each scenario and match against the scenario width
1044 * to select the specific scenario
1046 LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1047 if (scen->eff_width >= params->entry_length &&
1048 (!cand_scen || cand_scen->eff_width > scen->eff_width))
1051 return ICE_ERR_DOES_NOT_EXIST;
1053 params->prof->cfg.scen = cand_scen;
1059 * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1060 * @params: information about the flow to be processed
1062 static enum ice_status
1063 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1065 u16 index, i, range_idx = 0;
1067 index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1069 for (i = 0; i < params->prof->segs_cnt; i++) {
1070 struct ice_flow_seg_info *seg = ¶ms->prof->segs[i];
1071 u64 match = seg->match;
1074 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1075 struct ice_flow_fld_info *fld;
1076 const u64 bit = BIT_ULL(j);
1081 fld = &seg->fields[j];
1082 fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1084 if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1085 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1087 /* Range checking only supported for single
1090 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1092 BITS_PER_BYTE * 2) > 1)
1093 return ICE_ERR_PARAM;
1095 /* Ranges must define low and high values */
1096 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1097 fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1098 return ICE_ERR_PARAM;
1100 fld->entry.val = range_idx++;
1102 /* Store adjusted byte-length of field for later
1103 * use, taking into account potential
1104 * non-byte-aligned displacement
1106 fld->entry.last = DIVIDE_AND_ROUND_UP
1107 (ice_flds_info[j].size +
1108 (fld->xtrct.disp % BITS_PER_BYTE),
1110 fld->entry.val = index;
1111 index += fld->entry.last;
1117 for (j = 0; j < seg->raws_cnt; j++) {
1118 struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1120 raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1121 raw->info.entry.val = index;
1122 raw->info.entry.last = raw->info.src.last;
1123 index += raw->info.entry.last;
1127 /* Currently only support using the byte selection base, which only
1128 * allows for an effective entry size of 30 bytes. Reject anything
1131 if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1132 return ICE_ERR_PARAM;
1134 /* Only 8 range checkers per profile, reject anything trying to use
1137 if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1138 return ICE_ERR_PARAM;
1140 /* Store # bytes required for entry for later use */
1141 params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1147 * ice_flow_proc_segs - process all packet segments associated with a profile
1148 * @hw: pointer to the HW struct
1149 * @params: information about the flow to be processed
1151 static enum ice_status
1152 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1154 enum ice_status status;
1156 status = ice_flow_proc_seg_hdrs(params);
1160 status = ice_flow_create_xtrct_seq(hw, params);
1164 switch (params->blk) {
1166 /* Only header information is provided for RSS configuration.
1167 * No further processing is needed.
1169 status = ICE_SUCCESS;
1172 status = ice_flow_acl_def_entry_frmt(params);
1175 status = ice_flow_sel_acl_scen(hw, params);
1180 status = ICE_SUCCESS;
1184 return ICE_ERR_NOT_IMPL;
1190 #define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001
1191 #define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002
1192 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004
1195 * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1196 * @hw: pointer to the HW struct
1197 * @blk: classification stage
1198 * @dir: flow direction
1199 * @segs: array of one or more packet segments that describe the flow
1200 * @segs_cnt: number of packet segments provided
1201 * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1202 * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1204 static struct ice_flow_prof *
1205 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1206 enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1207 u8 segs_cnt, u16 vsi_handle, u32 conds)
1209 struct ice_flow_prof *p, *prof = NULL;
1211 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1212 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
1213 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1214 segs_cnt && segs_cnt == p->segs_cnt) {
1217 /* Check for profile-VSI association if specified */
1218 if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1219 ice_is_vsi_valid(hw, vsi_handle) &&
1220 !ice_is_bit_set(p->vsis, vsi_handle))
1223 /* Protocol headers must be checked. Matched fields are
1224 * checked if specified.
1226 for (i = 0; i < segs_cnt; i++)
1227 if (segs[i].hdrs != p->segs[i].hdrs ||
1228 ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1229 segs[i].match != p->segs[i].match))
1232 /* A match is found if all segments are matched */
1233 if (i == segs_cnt) {
1239 ice_release_lock(&hw->fl_profs_locks[blk]);
1245 * ice_flow_find_prof - Look up a profile matching headers and matched fields
1246 * @hw: pointer to the HW struct
1247 * @blk: classification stage
1248 * @dir: flow direction
1249 * @segs: array of one or more packet segments that describe the flow
1250 * @segs_cnt: number of packet segments provided
1253 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1254 struct ice_flow_seg_info *segs, u8 segs_cnt)
1256 struct ice_flow_prof *p;
1258 p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1259 ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1261 return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1265 * ice_flow_find_prof_id - Look up a profile with given profile ID
1266 * @hw: pointer to the HW struct
1267 * @blk: classification stage
1268 * @prof_id: unique ID to identify this flow profile
1270 static struct ice_flow_prof *
1271 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1273 struct ice_flow_prof *p;
1275 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
1276 if (p->id == prof_id)
1284 * ice_dealloc_flow_entry - Deallocate flow entry memory
1285 * @hw: pointer to the HW struct
1286 * @entry: flow entry to be removed
1289 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1295 ice_free(hw, entry->entry);
1297 if (entry->range_buf) {
1298 ice_free(hw, entry->range_buf);
1299 entry->range_buf = NULL;
1303 ice_free(hw, entry->acts);
1305 entry->acts_cnt = 0;
1308 ice_free(hw, entry);
1311 #define ICE_ACL_INVALID_SCEN 0x3f
1314 * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any pf
1315 * @hw: pointer to the hardware structure
1316 * @prof: pointer to flow profile
1317 * @buf: destination buffer function writes partial xtrct sequence to
1319 * returns ICE_SUCCESS if no pf is associated to the given profile
1320 * returns ICE_ERR_IN_USE if at least one pf is associated to the given profile
1321 * returns other error code for real error
1323 static enum ice_status
1324 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1325 struct ice_aqc_acl_prof_generic_frmt *buf)
1327 enum ice_status status;
1330 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1334 status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1338 /* If all pf's associated scenarios are all 0 or all
1339 * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1340 * not been configured yet.
1342 if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1343 buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1344 buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1345 buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1348 if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1349 buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1350 buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1351 buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1352 buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1353 buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1354 buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1355 buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1358 return ICE_ERR_IN_USE;
1362 * ice_flow_acl_free_act_cntr - Free the acl rule's actions
1363 * @hw: pointer to the hardware structure
1364 * @acts: array of actions to be performed on a match
1365 * @acts_cnt: number of actions
1367 static enum ice_status
1368 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1373 for (i = 0; i < acts_cnt; i++) {
1374 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1375 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1376 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1377 struct ice_acl_cntrs cntrs;
1378 enum ice_status status;
1380 cntrs.bank = 0; /* Only bank0 for the moment */
1382 LE16_TO_CPU(acts[i].data.acl_act.value);
1384 LE16_TO_CPU(acts[i].data.acl_act.value);
1386 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1387 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1389 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1391 status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1400 * ice_flow_acl_disassoc_scen - Disassociate the scenario to the Profile
1401 * @hw: pointer to the hardware structure
1402 * @prof: pointer to flow profile
1404 * Disassociate the scenario to the Profile for the PF of the VSI.
1406 static enum ice_status
1407 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1409 struct ice_aqc_acl_prof_generic_frmt buf;
1410 enum ice_status status = ICE_SUCCESS;
1413 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1415 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1419 status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1423 /* Clear scenario for this pf */
1424 buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1425 status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1431 * ice_flow_rem_entry_sync - Remove a flow entry
1432 * @hw: pointer to the HW struct
1433 * @blk: classification stage
1434 * @entry: flow entry to be removed
1436 static enum ice_status
1437 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1438 struct ice_flow_entry *entry)
1441 return ICE_ERR_BAD_PTR;
1443 if (blk == ICE_BLK_ACL) {
1444 enum ice_status status;
1447 return ICE_ERR_BAD_PTR;
1449 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1450 entry->scen_entry_idx);
1454 /* Checks if we need to release an ACL counter. */
1455 if (entry->acts_cnt && entry->acts)
1456 ice_flow_acl_free_act_cntr(hw, entry->acts,
1460 LIST_DEL(&entry->l_entry);
1462 ice_dealloc_flow_entry(hw, entry);
1468 * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1469 * @hw: pointer to the HW struct
1470 * @blk: classification stage
1471 * @dir: flow direction
1472 * @prof_id: unique ID to identify this flow profile
1473 * @segs: array of one or more packet segments that describe the flow
1474 * @segs_cnt: number of packet segments provided
1475 * @acts: array of default actions
1476 * @acts_cnt: number of default actions
1477 * @prof: stores the returned flow profile added
1479 * Assumption: the caller has acquired the lock to the profile list
1481 static enum ice_status
1482 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1483 enum ice_flow_dir dir, u64 prof_id,
1484 struct ice_flow_seg_info *segs, u8 segs_cnt,
1485 struct ice_flow_action *acts, u8 acts_cnt,
1486 struct ice_flow_prof **prof)
1488 struct ice_flow_prof_params params;
1489 enum ice_status status;
1492 if (!prof || (acts_cnt && !acts))
1493 return ICE_ERR_BAD_PTR;
1495 ice_memset(¶ms, 0, sizeof(params), ICE_NONDMA_MEM);
1496 params.prof = (struct ice_flow_prof *)
1497 ice_malloc(hw, sizeof(*params.prof));
1499 return ICE_ERR_NO_MEMORY;
1501 /* initialize extraction sequence to all invalid (0xff) */
1502 for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1503 params.es[i].prot_id = ICE_PROT_INVALID;
1504 params.es[i].off = ICE_FV_OFFSET_INVAL;
1508 params.prof->id = prof_id;
1509 params.prof->dir = dir;
1510 params.prof->segs_cnt = segs_cnt;
1512 /* Make a copy of the segments that need to be persistent in the flow
1515 for (i = 0; i < segs_cnt; i++)
1516 ice_memcpy(¶ms.prof->segs[i], &segs[i], sizeof(*segs),
1517 ICE_NONDMA_TO_NONDMA);
1519 /* Make a copy of the actions that need to be persistent in the flow
1523 params.prof->acts = (struct ice_flow_action *)
1524 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
1525 ICE_NONDMA_TO_NONDMA);
1527 if (!params.prof->acts) {
1528 status = ICE_ERR_NO_MEMORY;
1533 status = ice_flow_proc_segs(hw, ¶ms);
1535 ice_debug(hw, ICE_DBG_FLOW,
1536 "Error processing a flow's packet segments\n");
1540 /* Add a HW profile for this flow profile */
1541 status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes,
1542 params.attr, params.attr_cnt, params.es,
1545 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
1549 INIT_LIST_HEAD(¶ms.prof->entries);
1550 ice_init_lock(¶ms.prof->entries_lock);
1551 *prof = params.prof;
1555 if (params.prof->acts)
1556 ice_free(hw, params.prof->acts);
1557 ice_free(hw, params.prof);
1564 * ice_flow_rem_prof_sync - remove a flow profile
1565 * @hw: pointer to the hardware structure
1566 * @blk: classification stage
1567 * @prof: pointer to flow profile to remove
1569 * Assumption: the caller has acquired the lock to the profile list
1571 static enum ice_status
1572 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
1573 struct ice_flow_prof *prof)
1575 enum ice_status status;
1577 /* Remove all remaining flow entries before removing the flow profile */
1578 if (!LIST_EMPTY(&prof->entries)) {
1579 struct ice_flow_entry *e, *t;
1581 ice_acquire_lock(&prof->entries_lock);
1583 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
1585 status = ice_flow_rem_entry_sync(hw, blk, e);
1590 ice_release_lock(&prof->entries_lock);
1593 if (blk == ICE_BLK_ACL) {
1594 struct ice_aqc_acl_profile_ranges query_rng_buf;
1595 struct ice_aqc_acl_prof_generic_frmt buf;
1598 /* Deassociate the scenario to the Profile for the PF */
1599 status = ice_flow_acl_disassoc_scen(hw, prof);
1603 /* Clear the range-checker if the profile ID is no longer
1606 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1607 if (status && status != ICE_ERR_IN_USE) {
1609 } else if (!status) {
1610 /* Clear the range-checker value for profile ID */
1611 ice_memset(&query_rng_buf, 0,
1612 sizeof(struct ice_aqc_acl_profile_ranges),
1615 status = ice_flow_get_hw_prof(hw, blk, prof->id,
1620 status = ice_prog_acl_prof_ranges(hw, prof_id,
1621 &query_rng_buf, NULL);
1627 /* Remove all hardware profiles associated with this flow profile */
1628 status = ice_rem_prof(hw, blk, prof->id);
1630 LIST_DEL(&prof->l_entry);
1631 ice_destroy_lock(&prof->entries_lock);
1633 ice_free(hw, prof->acts);
1641 * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
1642 * @buf: Destination buffer function writes partial xtrct sequence to
1643 * @info: Info about field
1646 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
1647 struct ice_flow_fld_info *info)
1652 src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
1653 info->xtrct.disp / BITS_PER_BYTE;
1654 dst = info->entry.val;
1655 for (i = 0; i < info->entry.last; i++)
1656 /* HW stores field vector words in LE, convert words back to BE
1657 * so constructed entries will end up in network order
1659 buf->byte_selection[dst++] = src++ ^ 1;
1663 * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
1664 * @hw: pointer to the hardware structure
1665 * @prof: pointer to flow profile
1667 static enum ice_status
1668 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
1670 struct ice_aqc_acl_prof_generic_frmt buf;
1671 struct ice_flow_fld_info *info;
1672 enum ice_status status;
1676 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1678 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1682 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1683 if (status && status != ICE_ERR_IN_USE)
1687 /* Program the profile dependent configuration. This is done
1688 * only once regardless of the number of PFs using that profile
1690 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1692 for (i = 0; i < prof->segs_cnt; i++) {
1693 struct ice_flow_seg_info *seg = &prof->segs[i];
1694 u64 match = seg->match;
1697 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1698 const u64 bit = BIT_ULL(j);
1703 info = &seg->fields[j];
1705 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
1706 buf.word_selection[info->entry.val] =
1709 ice_flow_acl_set_xtrct_seq_fld(&buf,
1715 for (j = 0; j < seg->raws_cnt; j++) {
1716 info = &seg->raws[j].info;
1717 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
1721 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
1722 ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
1726 /* Update the current PF */
1727 buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
1728 status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1734 * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
1735 * @hw: pointer to the hardware structure
1736 * @blk: classification stage
1737 * @vsi_handle: software VSI handle
1738 * @vsig: target VSI group
1740 * Assumption: the caller has already verified that the VSI to
1741 * be added has the same characteristics as the VSIG and will
1742 * thereby have access to all resources added to that VSIG.
1745 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
1748 enum ice_status status;
1750 if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
1751 return ICE_ERR_PARAM;
1753 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1754 status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
1756 ice_release_lock(&hw->fl_profs_locks[blk]);
1762 * ice_flow_assoc_prof - associate a VSI with a flow profile
1763 * @hw: pointer to the hardware structure
1764 * @blk: classification stage
1765 * @prof: pointer to flow profile
1766 * @vsi_handle: software VSI handle
1768 * Assumption: the caller has acquired the lock to the profile list
1769 * and the software VSI handle has been validated
1771 static enum ice_status
1772 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
1773 struct ice_flow_prof *prof, u16 vsi_handle)
1775 enum ice_status status = ICE_SUCCESS;
1777 if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
1778 if (blk == ICE_BLK_ACL) {
1779 status = ice_flow_acl_set_xtrct_seq(hw, prof);
1783 status = ice_add_prof_id_flow(hw, blk,
1784 ice_get_hw_vsi_num(hw,
1788 ice_set_bit(vsi_handle, prof->vsis);
1790 ice_debug(hw, ICE_DBG_FLOW,
1791 "HW profile add failed, %d\n",
1799 * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
1800 * @hw: pointer to the hardware structure
1801 * @blk: classification stage
1802 * @prof: pointer to flow profile
1803 * @vsi_handle: software VSI handle
1805 * Assumption: the caller has acquired the lock to the profile list
1806 * and the software VSI handle has been validated
1808 static enum ice_status
1809 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
1810 struct ice_flow_prof *prof, u16 vsi_handle)
1812 enum ice_status status = ICE_SUCCESS;
1814 if (ice_is_bit_set(prof->vsis, vsi_handle)) {
1815 status = ice_rem_prof_id_flow(hw, blk,
1816 ice_get_hw_vsi_num(hw,
1820 ice_clear_bit(vsi_handle, prof->vsis);
1822 ice_debug(hw, ICE_DBG_FLOW,
1823 "HW profile remove failed, %d\n",
1831 * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
1832 * @hw: pointer to the HW struct
1833 * @blk: classification stage
1834 * @dir: flow direction
1835 * @prof_id: unique ID to identify this flow profile
1836 * @segs: array of one or more packet segments that describe the flow
1837 * @segs_cnt: number of packet segments provided
1838 * @acts: array of default actions
1839 * @acts_cnt: number of default actions
1840 * @prof: stores the returned flow profile added
1843 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1844 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
1845 struct ice_flow_action *acts, u8 acts_cnt,
1846 struct ice_flow_prof **prof)
1848 enum ice_status status;
1850 if (segs_cnt > ICE_FLOW_SEG_MAX)
1851 return ICE_ERR_MAX_LIMIT;
1854 return ICE_ERR_PARAM;
1857 return ICE_ERR_BAD_PTR;
1859 status = ice_flow_val_hdrs(segs, segs_cnt);
1863 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1865 status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
1866 acts, acts_cnt, prof);
1868 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
1870 ice_release_lock(&hw->fl_profs_locks[blk]);
1876 * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
1877 * @hw: pointer to the HW struct
1878 * @blk: the block for which the flow profile is to be removed
1879 * @prof_id: unique ID of the flow profile to be removed
1882 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1884 struct ice_flow_prof *prof;
1885 enum ice_status status;
1887 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1889 prof = ice_flow_find_prof_id(hw, blk, prof_id);
1891 status = ICE_ERR_DOES_NOT_EXIST;
1895 /* prof becomes invalid after the call */
1896 status = ice_flow_rem_prof_sync(hw, blk, prof);
1899 ice_release_lock(&hw->fl_profs_locks[blk]);
1905 * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
1906 * @hw: pointer to the HW struct
1907 * @blk: classification stage
1908 * @prof_id: the profile ID handle
1909 * @hw_prof_id: pointer to variable to receive the HW profile ID
1912 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
1915 struct ice_prof_map *map;
1917 map = ice_search_prof_id(hw, blk, prof_id);
1919 *hw_prof_id = map->prof_id;
1923 return ICE_ERR_DOES_NOT_EXIST;
1927 * ice_flow_find_entry - look for a flow entry using its unique ID
1928 * @hw: pointer to the HW struct
1929 * @blk: classification stage
1930 * @entry_id: unique ID to identify this flow entry
1932 * This function looks for the flow entry with the specified unique ID in all
1933 * flow profiles of the specified classification stage. If the entry is found,
1934 * and it returns the handle to the flow entry. Otherwise, it returns
1935 * ICE_FLOW_ENTRY_ID_INVAL.
1937 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
1939 struct ice_flow_entry *found = NULL;
1940 struct ice_flow_prof *p;
1942 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1944 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
1945 struct ice_flow_entry *e;
1947 ice_acquire_lock(&p->entries_lock);
1948 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
1949 if (e->id == entry_id) {
1953 ice_release_lock(&p->entries_lock);
1959 ice_release_lock(&hw->fl_profs_locks[blk]);
1961 return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
1965 * ice_flow_acl_check_actions - Checks the acl rule's actions
1966 * @hw: pointer to the hardware structure
1967 * @acts: array of actions to be performed on a match
1968 * @acts_cnt: number of actions
1969 * @cnt_alloc: indicates if a ACL counter has been allocated.
1971 static enum ice_status
1972 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
1973 u8 acts_cnt, bool *cnt_alloc)
1975 ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
1978 ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
1981 if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
1982 return ICE_ERR_OUT_OF_RANGE;
1984 for (i = 0; i < acts_cnt; i++) {
1985 if (acts[i].type != ICE_FLOW_ACT_NOP &&
1986 acts[i].type != ICE_FLOW_ACT_DROP &&
1987 acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
1988 acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
1991 /* If the caller want to add two actions of the same type, then
1992 * it is considered invalid configuration.
1994 if (ice_test_and_set_bit(acts[i].type, dup_check))
1995 return ICE_ERR_PARAM;
1998 /* Checks if ACL counters are needed. */
1999 for (i = 0; i < acts_cnt; i++) {
2000 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2001 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2002 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2003 struct ice_acl_cntrs cntrs;
2004 enum ice_status status;
2007 cntrs.bank = 0; /* Only bank0 for the moment */
2009 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2010 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2012 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2014 status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2017 /* Counter index within the bank */
2018 acts[i].data.acl_act.value =
2019 CPU_TO_LE16(cntrs.first_cntr);
2028 * ice_flow_acl_frmt_entry_range - Format an acl range checker for a given field
2029 * @fld: number of the given field
2030 * @info: info about field
2031 * @range_buf: range checker configuration buffer
2032 * @data: pointer to a data buffer containing flow entry's match values/masks
2033 * @range: Input/output param indicating which range checkers are being used
2036 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2037 struct ice_aqc_acl_profile_ranges *range_buf,
2038 u8 *data, u8 *range)
2042 /* If not specified, default mask is all bits in field */
2043 new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2044 BIT(ice_flds_info[fld].size) - 1 :
2045 (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2047 /* If the mask is 0, then we don't need to worry about this input
2048 * range checker value.
2052 (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2054 (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2055 u8 range_idx = info->entry.val;
2057 range_buf->checker_cfg[range_idx].low_boundary =
2058 CPU_TO_BE16(new_low);
2059 range_buf->checker_cfg[range_idx].high_boundary =
2060 CPU_TO_BE16(new_high);
2061 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2063 /* Indicate which range checker is being used */
2064 *range |= BIT(range_idx);
2069 * ice_flow_acl_frmt_entry_fld - Partially format acl entry for a given field
2070 * @fld: number of the given field
2071 * @info: info about the field
2072 * @buf: buffer containing the entry
2073 * @dontcare: buffer containing don't care mask for entry
2074 * @data: pointer to a data buffer containing flow entry's match values/masks
2077 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2078 u8 *dontcare, u8 *data)
2080 u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2081 bool use_mask = false;
2084 src = info->src.val;
2085 mask = info->src.mask;
2086 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2087 disp = info->xtrct.disp % BITS_PER_BYTE;
2089 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2092 for (k = 0; k < info->entry.last; k++, dst++) {
2093 /* Add overflow bits from previous byte */
2094 buf[dst] = (tmp_s & 0xff00) >> 8;
2096 /* If mask is not valid, tmp_m is always zero, so just setting
2097 * dontcare to 0 (no masked bits). If mask is valid, pulls in
2098 * overflow bits of mask from prev byte
2100 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2102 /* If there is displacement, last byte will only contain
2103 * displaced data, but there is no more data to read from user
2104 * buffer, so skip so as not to potentially read beyond end of
2107 if (!disp || k < info->entry.last - 1) {
2108 /* Store shifted data to use in next byte */
2109 tmp_s = data[src++] << disp;
2111 /* Add current (shifted) byte */
2112 buf[dst] |= tmp_s & 0xff;
2114 /* Handle mask if valid */
2116 tmp_m = (~data[mask++] & 0xff) << disp;
2117 dontcare[dst] |= tmp_m & 0xff;
2122 /* Fill in don't care bits at beginning of field */
2124 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2125 for (k = 0; k < disp; k++)
2126 dontcare[dst] |= BIT(k);
2129 end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2131 /* Fill in don't care bits at end of field */
2133 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2134 info->entry.last - 1;
2135 for (k = end_disp; k < BITS_PER_BYTE; k++)
2136 dontcare[dst] |= BIT(k);
2141 * ice_flow_acl_frmt_entry - Format acl entry
2142 * @hw: pointer to the hardware structure
2143 * @prof: pointer to flow profile
2144 * @e: pointer to the flow entry
2145 * @data: pointer to a data buffer containing flow entry's match values/masks
2146 * @acts: array of actions to be performed on a match
2147 * @acts_cnt: number of actions
2149 * Formats the key (and key_inverse) to be matched from the data passed in,
2150 * along with data from the flow profile. This key/key_inverse pair makes up
2151 * the 'entry' for an acl flow entry.
2153 static enum ice_status
2154 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2155 struct ice_flow_entry *e, u8 *data,
2156 struct ice_flow_action *acts, u8 acts_cnt)
2158 u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2159 struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2160 enum ice_status status;
2165 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2169 /* Format the result action */
2171 status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2175 status = ICE_ERR_NO_MEMORY;
2177 e->acts = (struct ice_flow_action *)
2178 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2179 ICE_NONDMA_TO_NONDMA);
2184 e->acts_cnt = acts_cnt;
2186 /* Format the matching data */
2187 buf_sz = prof->cfg.scen->width;
2188 buf = (u8 *)ice_malloc(hw, buf_sz);
2192 dontcare = (u8 *)ice_malloc(hw, buf_sz);
2196 /* 'key' buffer will store both key and key_inverse, so must be twice
2199 key = (u8 *)ice_malloc(hw, buf_sz * 2);
2203 range_buf = (struct ice_aqc_acl_profile_ranges *)
2204 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2208 /* Set don't care mask to all 1's to start, will zero out used bytes */
2209 ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2211 for (i = 0; i < prof->segs_cnt; i++) {
2212 struct ice_flow_seg_info *seg = &prof->segs[i];
2213 u64 match = seg->match;
2216 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
2217 struct ice_flow_fld_info *info;
2218 const u64 bit = BIT_ULL(j);
2223 info = &seg->fields[j];
2225 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2226 ice_flow_acl_frmt_entry_range(j, info,
2230 ice_flow_acl_frmt_entry_fld(j, info, buf,
2236 for (j = 0; j < seg->raws_cnt; j++) {
2237 struct ice_flow_fld_info *info = &seg->raws[j].info;
2238 u16 dst, src, mask, k;
2239 bool use_mask = false;
2241 src = info->src.val;
2242 dst = info->entry.val -
2243 ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2244 mask = info->src.mask;
2246 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2249 for (k = 0; k < info->entry.last; k++, dst++) {
2250 buf[dst] = data[src++];
2252 dontcare[dst] = ~data[mask++];
2259 buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2260 dontcare[prof->cfg.scen->pid_idx] = 0;
2262 /* Format the buffer for direction flags */
2263 dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2265 if (prof->dir == ICE_FLOW_RX)
2266 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2269 buf[prof->cfg.scen->rng_chk_idx] = range;
2270 /* Mark any unused range checkers as don't care */
2271 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2272 e->range_buf = range_buf;
2274 ice_free(hw, range_buf);
2277 status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2283 e->entry_sz = buf_sz * 2;
2290 ice_free(hw, dontcare);
2295 if (status && range_buf) {
2296 ice_free(hw, range_buf);
2297 e->range_buf = NULL;
2300 if (status && e->acts) {
2301 ice_free(hw, e->acts);
2306 if (status && cnt_alloc)
2307 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2313 * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2314 * the compared data.
2315 * @prof: pointer to flow profile
2316 * @e: pointer to the comparing flow entry
2317 * @do_chg_action: decide if we want to change the ACL action
2318 * @do_add_entry: decide if we want to add the new ACL entry
2319 * @do_rem_entry: decide if we want to remove the current ACL entry
2321 * Find an ACL scenario entry that matches the compared data. In the same time,
2322 * this function also figure out:
2323 * a/ If we want to change the ACL action
2324 * b/ If we want to add the new ACL entry
2325 * c/ If we want to remove the current ACL entry
2327 static struct ice_flow_entry *
2328 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2329 struct ice_flow_entry *e, bool *do_chg_action,
2330 bool *do_add_entry, bool *do_rem_entry)
2332 struct ice_flow_entry *p, *return_entry = NULL;
2336 * a/ There exists an entry with same matching data, but different
2337 * priority, then we remove this existing ACL entry. Then, we
2338 * will add the new entry to the ACL scenario.
2339 * b/ There exists an entry with same matching data, priority, and
2340 * result action, then we do nothing
2341 * c/ There exists an entry with same matching data, priority, but
2342 * different, action, then do only change the action's entry.
2343 * d/ Else, we add this new entry to the ACL scenario.
2345 *do_chg_action = false;
2346 *do_add_entry = true;
2347 *do_rem_entry = false;
2348 LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2349 if (memcmp(p->entry, e->entry, p->entry_sz))
2352 /* From this point, we have the same matching_data. */
2353 *do_add_entry = false;
2356 if (p->priority != e->priority) {
2357 /* matching data && !priority */
2358 *do_add_entry = true;
2359 *do_rem_entry = true;
2363 /* From this point, we will have matching_data && priority */
2364 if (p->acts_cnt != e->acts_cnt)
2365 *do_chg_action = true;
2366 for (i = 0; i < p->acts_cnt; i++) {
2367 bool found_not_match = false;
2369 for (j = 0; j < e->acts_cnt; j++)
2370 if (memcmp(&p->acts[i], &e->acts[j],
2371 sizeof(struct ice_flow_action))) {
2372 found_not_match = true;
2376 if (found_not_match) {
2377 *do_chg_action = true;
2382 /* (do_chg_action = true) means :
2383 * matching_data && priority && !result_action
2384 * (do_chg_action = false) means :
2385 * matching_data && priority && result_action
2390 return return_entry;
2394 * ice_flow_acl_convert_to_acl_prior - Convert to ACL priority
2397 static enum ice_acl_entry_prior
2398 ice_flow_acl_convert_to_acl_prior(enum ice_flow_priority p)
2400 enum ice_acl_entry_prior acl_prior;
2403 case ICE_FLOW_PRIO_LOW:
2404 acl_prior = ICE_LOW;
2406 case ICE_FLOW_PRIO_NORMAL:
2407 acl_prior = ICE_NORMAL;
2409 case ICE_FLOW_PRIO_HIGH:
2410 acl_prior = ICE_HIGH;
2413 acl_prior = ICE_NORMAL;
2421 * ice_flow_acl_union_rng_chk - Perform union operation between two
2422 * range-range checker buffers
2423 * @dst_buf: pointer to destination range checker buffer
2424 * @src_buf: pointer to source range checker buffer
2426 * For this function, we do the union between dst_buf and src_buf
2427 * range checker buffer, and we will save the result back to dst_buf
2429 static enum ice_status
2430 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2431 struct ice_aqc_acl_profile_ranges *src_buf)
2435 if (!dst_buf || !src_buf)
2436 return ICE_ERR_BAD_PTR;
2438 for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2439 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2440 bool will_populate = false;
2442 in_data = &src_buf->checker_cfg[i];
2447 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2448 cfg_data = &dst_buf->checker_cfg[j];
2450 if (!cfg_data->mask ||
2451 !memcmp(cfg_data, in_data,
2452 sizeof(struct ice_acl_rng_data))) {
2453 will_populate = true;
2458 if (will_populate) {
2459 ice_memcpy(cfg_data, in_data,
2460 sizeof(struct ice_acl_rng_data),
2461 ICE_NONDMA_TO_NONDMA);
2463 /* No available slot left to program range checker */
2464 return ICE_ERR_MAX_LIMIT;
2472 * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2473 * @hw: pointer to the hardware structure
2474 * @prof: pointer to flow profile
2475 * @entry: double pointer to the flow entry
2477 * For this function, we will look at the current added entries in the
2478 * corresponding ACL scenario. Then, we will perform matching logic to
2479 * see if we want to add/modify/do nothing with this new entry.
2481 static enum ice_status
2482 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2483 struct ice_flow_entry **entry)
2485 bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2486 struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2487 struct ice_acl_act_entry *acts = NULL;
2488 struct ice_flow_entry *exist;
2489 enum ice_status status = ICE_SUCCESS;
2490 struct ice_flow_entry *e;
2493 if (!entry || !(*entry) || !prof)
2494 return ICE_ERR_BAD_PTR;
2498 do_chg_rng_chk = false;
2502 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2507 /* Query the current range-checker value in FW */
2508 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2512 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2513 sizeof(struct ice_aqc_acl_profile_ranges),
2514 ICE_NONDMA_TO_NONDMA);
2516 /* Generate the new range-checker value */
2517 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2521 /* Reconfigure the range check if the buffer is changed. */
2522 do_chg_rng_chk = false;
2523 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2524 sizeof(struct ice_aqc_acl_profile_ranges))) {
2525 status = ice_prog_acl_prof_ranges(hw, prof_id,
2526 &cfg_rng_buf, NULL);
2530 do_chg_rng_chk = true;
2534 /* Figure out if we want to (change the ACL action) and/or
2535 * (Add the new ACL entry) and/or (Remove the current ACL entry)
2537 exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
2538 &do_add_entry, &do_rem_entry);
2541 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
2546 /* Prepare the result action buffer */
2547 acts = (struct ice_acl_act_entry *)ice_calloc
2548 (hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
2549 for (i = 0; i < e->acts_cnt; i++)
2550 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
2551 sizeof(struct ice_acl_act_entry),
2552 ICE_NONDMA_TO_NONDMA);
2555 enum ice_acl_entry_prior prior;
2559 keys = (u8 *)e->entry;
2560 inverts = keys + (e->entry_sz / 2);
2561 prior = ice_flow_acl_convert_to_acl_prior(e->priority);
2563 status = ice_acl_add_entry(hw, prof->cfg.scen, prior, keys,
2564 inverts, acts, e->acts_cnt,
2569 e->scen_entry_idx = entry_idx;
2570 LIST_ADD(&e->l_entry, &prof->entries);
2572 if (do_chg_action) {
2573 /* For the action memory info, update the SW's copy of
2574 * exist entry with e's action memory info
2576 ice_free(hw, exist->acts);
2577 exist->acts_cnt = e->acts_cnt;
2578 exist->acts = (struct ice_flow_action *)
2579 ice_calloc(hw, exist->acts_cnt,
2580 sizeof(struct ice_flow_action));
2583 status = ICE_ERR_NO_MEMORY;
2587 ice_memcpy(exist->acts, e->acts,
2588 sizeof(struct ice_flow_action) * e->acts_cnt,
2589 ICE_NONDMA_TO_NONDMA);
2591 status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
2593 exist->scen_entry_idx);
2598 if (do_chg_rng_chk) {
2599 /* In this case, we want to update the range checker
2600 * information of the exist entry
2602 status = ice_flow_acl_union_rng_chk(exist->range_buf,
2608 /* As we don't add the new entry to our SW DB, deallocate its
2609 * memories, and return the exist entry to the caller
2611 ice_dealloc_flow_entry(hw, e);
2622 * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
2623 * @hw: pointer to the hardware structure
2624 * @prof: pointer to flow profile
2625 * @e: double pointer to the flow entry
2627 static enum ice_status
2628 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2629 struct ice_flow_entry **e)
2631 enum ice_status status;
2633 ice_acquire_lock(&prof->entries_lock);
2634 status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
2635 ice_release_lock(&prof->entries_lock);
2641 * ice_flow_add_entry - Add a flow entry
2642 * @hw: pointer to the HW struct
2643 * @blk: classification stage
2644 * @prof_id: ID of the profile to add a new flow entry to
2645 * @entry_id: unique ID to identify this flow entry
2646 * @vsi_handle: software VSI handle for the flow entry
2647 * @prio: priority of the flow entry
2648 * @data: pointer to a data buffer containing flow entry's match values/masks
2649 * @acts: arrays of actions to be performed on a match
2650 * @acts_cnt: number of actions
2651 * @entry_h: pointer to buffer that receives the new flow entry's handle
2654 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2655 u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
2656 void *data, struct ice_flow_action *acts, u8 acts_cnt,
2659 struct ice_flow_prof *prof = NULL;
2660 struct ice_flow_entry *e = NULL;
2661 enum ice_status status = ICE_SUCCESS;
2663 /* ACL entries must indicate an action */
2664 if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
2665 return ICE_ERR_PARAM;
2667 /* No flow entry data is expected for RSS */
2668 if (!entry_h || (!data && blk != ICE_BLK_RSS))
2669 return ICE_ERR_BAD_PTR;
2671 if (!ice_is_vsi_valid(hw, vsi_handle))
2672 return ICE_ERR_PARAM;
2674 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2676 prof = ice_flow_find_prof_id(hw, blk, prof_id);
2678 status = ICE_ERR_DOES_NOT_EXIST;
2680 /* Allocate memory for the entry being added and associate
2681 * the VSI to the found flow profile
2683 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
2685 status = ICE_ERR_NO_MEMORY;
2687 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
2690 ice_release_lock(&hw->fl_profs_locks[blk]);
2695 e->vsi_handle = vsi_handle;
2701 /* RSS will add only one entry per VSI per profile */
2704 /* ACL will handle the entry management */
2705 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
2710 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
2720 status = ICE_ERR_NOT_IMPL;
2724 if (blk != ICE_BLK_ACL) {
2725 /* ACL will handle the entry management */
2726 ice_acquire_lock(&prof->entries_lock);
2727 LIST_ADD(&e->l_entry, &prof->entries);
2728 ice_release_lock(&prof->entries_lock);
2731 *entry_h = ICE_FLOW_ENTRY_HNDL(e);
2736 ice_free(hw, e->entry);
2744 * ice_flow_rem_entry - Remove a flow entry
2745 * @hw: pointer to the HW struct
2746 * @blk: classification stage
2747 * @entry_h: handle to the flow entry to be removed
2749 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
2752 struct ice_flow_entry *entry;
2753 struct ice_flow_prof *prof;
2754 enum ice_status status = ICE_SUCCESS;
2756 if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
2757 return ICE_ERR_PARAM;
2759 entry = ICE_FLOW_ENTRY_PTR((unsigned long)entry_h);
2761 /* Retain the pointer to the flow profile as the entry will be freed */
2765 ice_acquire_lock(&prof->entries_lock);
2766 status = ice_flow_rem_entry_sync(hw, blk, entry);
2767 ice_release_lock(&prof->entries_lock);
2774 * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
2775 * @seg: packet segment the field being set belongs to
2776 * @fld: field to be set
2777 * @field_type: type of the field
2778 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
2779 * entry's input buffer
2780 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
2782 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
2783 * entry's input buffer
2785 * This helper function stores information of a field being matched, including
2786 * the type of the field and the locations of the value to match, the mask, and
2787 * and the upper-bound value in the start of the input buffer for a flow entry.
2788 * This function should only be used for fixed-size data structures.
2790 * This function also opportunistically determines the protocol headers to be
2791 * present based on the fields being set. Some fields cannot be used alone to
2792 * determine the protocol headers present. Sometimes, fields for particular
2793 * protocol headers are not matched. In those cases, the protocol headers
2794 * must be explicitly set.
2797 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
2798 enum ice_flow_fld_match_type field_type, u16 val_loc,
2799 u16 mask_loc, u16 last_loc)
2801 u64 bit = BIT_ULL(fld);
2804 if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
2807 seg->fields[fld].type = field_type;
2808 seg->fields[fld].src.val = val_loc;
2809 seg->fields[fld].src.mask = mask_loc;
2810 seg->fields[fld].src.last = last_loc;
2812 ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
2816 * ice_flow_set_fld - specifies locations of field from entry's input buffer
2817 * @seg: packet segment the field being set belongs to
2818 * @fld: field to be set
2819 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
2820 * entry's input buffer
2821 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
2823 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
2824 * entry's input buffer
2825 * @range: indicate if field being matched is to be in a range
2827 * This function specifies the locations, in the form of byte offsets from the
2828 * start of the input buffer for a flow entry, from where the value to match,
2829 * the mask value, and upper value can be extracted. These locations are then
2830 * stored in the flow profile. When adding a flow entry associated with the
2831 * flow profile, these locations will be used to quickly extract the values and
2832 * create the content of a match entry. This function should only be used for
2833 * fixed-size data structures.
2836 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
2837 u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
2839 enum ice_flow_fld_match_type t = range ?
2840 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
2842 ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
2846 * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
2847 * @seg: packet segment the field being set belongs to
2848 * @fld: field to be set
2849 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
2850 * entry's input buffer
2851 * @pref_loc: location of prefix value from entry's input buffer
2852 * @pref_sz: size of the location holding the prefix value
2854 * This function specifies the locations, in the form of byte offsets from the
2855 * start of the input buffer for a flow entry, from where the value to match
2856 * and the IPv4 prefix value can be extracted. These locations are then stored
2857 * in the flow profile. When adding flow entries to the associated flow profile,
2858 * these locations can be used to quickly extract the values to create the
2859 * content of a match entry. This function should only be used for fixed-size
2863 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
2864 u16 val_loc, u16 pref_loc, u8 pref_sz)
2866 /* For this type of field, the "mask" location is for the prefix value's
2867 * location and the "last" location is for the size of the location of
2870 ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
2871 pref_loc, (u16)pref_sz);
2875 * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
2876 * @seg: packet segment the field being set belongs to
2877 * @off: offset of the raw field from the beginning of the segment in bytes
2878 * @len: length of the raw pattern to be matched
2879 * @val_loc: location of the value to match from entry's input buffer
2880 * @mask_loc: location of mask value from entry's input buffer
2882 * This function specifies the offset of the raw field to be match from the
2883 * beginning of the specified packet segment, and the locations, in the form of
2884 * byte offsets from the start of the input buffer for a flow entry, from where
2885 * the value to match and the mask value to be extracted. These locations are
2886 * then stored in the flow profile. When adding flow entries to the associated
2887 * flow profile, these locations can be used to quickly extract the values to
2888 * create the content of a match entry. This function should only be used for
2889 * fixed-size data structures.
2892 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
2893 u16 val_loc, u16 mask_loc)
2895 if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
2896 seg->raws[seg->raws_cnt].off = off;
2897 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
2898 seg->raws[seg->raws_cnt].info.src.val = val_loc;
2899 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
2900 /* The "last" field is used to store the length of the field */
2901 seg->raws[seg->raws_cnt].info.src.last = len;
2904 /* Overflows of "raws" will be handled as an error condition later in
2905 * the flow when this information is processed.
2910 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
2911 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
2913 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
2914 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
2916 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
2917 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
2918 ICE_FLOW_SEG_HDR_SCTP)
2920 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
2921 (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
2922 ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
2923 ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
2926 * ice_flow_set_rss_seg_info - setup packet segments for RSS
2927 * @segs: pointer to the flow field segment(s)
2928 * @hash_fields: fields to be hashed on for the segment(s)
2929 * @flow_hdr: protocol header fields within a packet segment
2931 * Helper function to extract fields from hash bitmap and use flow
2932 * header value to set flow field segment for further use in flow
2933 * profile entry or removal.
2935 static enum ice_status
2936 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
2939 u64 val = hash_fields;
2942 for (i = 0; val && i < ICE_FLOW_FIELD_IDX_MAX; i++) {
2943 u64 bit = BIT_ULL(i);
2946 ice_flow_set_fld(segs, (enum ice_flow_field)i,
2947 ICE_FLOW_FLD_OFF_INVAL,
2948 ICE_FLOW_FLD_OFF_INVAL,
2949 ICE_FLOW_FLD_OFF_INVAL, false);
2953 ICE_FLOW_SET_HDRS(segs, flow_hdr);
2955 if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
2956 ~ICE_FLOW_RSS_HDRS_INNER_MASK)
2957 return ICE_ERR_PARAM;
2959 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
2960 if (val && !ice_is_pow2(val))
2963 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
2964 if (val && !ice_is_pow2(val))
2971 * ice_rem_vsi_rss_list - remove VSI from RSS list
2972 * @hw: pointer to the hardware structure
2973 * @vsi_handle: software VSI handle
2975 * Remove the VSI from all RSS configurations in the list.
2977 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
2979 struct ice_rss_cfg *r, *tmp;
2981 if (LIST_EMPTY(&hw->rss_list_head))
2984 ice_acquire_lock(&hw->rss_locks);
2985 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
2986 ice_rss_cfg, l_entry) {
2987 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
2988 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
2989 LIST_DEL(&r->l_entry);
2993 ice_release_lock(&hw->rss_locks);
2997 * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
2998 * @hw: pointer to the hardware structure
2999 * @vsi_handle: software VSI handle
3001 * This function will iterate through all flow profiles and disassociate
3002 * the VSI from that profile. If the flow profile has no VSIs it will
3005 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3007 const enum ice_block blk = ICE_BLK_RSS;
3008 struct ice_flow_prof *p, *t;
3009 enum ice_status status = ICE_SUCCESS;
3011 if (!ice_is_vsi_valid(hw, vsi_handle))
3012 return ICE_ERR_PARAM;
3014 if (LIST_EMPTY(&hw->fl_profs[blk]))
3017 ice_acquire_lock(&hw->fl_profs_locks[blk]);
3018 LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3020 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3021 status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3025 if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3026 status = ice_flow_rem_prof_sync(hw, blk, p);
3032 ice_release_lock(&hw->fl_profs_locks[blk]);
3038 * ice_rem_rss_list - remove RSS configuration from list
3039 * @hw: pointer to the hardware structure
3040 * @vsi_handle: software VSI handle
3041 * @prof: pointer to flow profile
3043 * Assumption: lock has already been acquired for RSS list
3046 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3048 struct ice_rss_cfg *r, *tmp;
3050 /* Search for RSS hash fields associated to the VSI that match the
3051 * hash configurations associated to the flow profile. If found
3052 * remove from the RSS entry list of the VSI context and delete entry.
3054 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3055 ice_rss_cfg, l_entry) {
3056 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3057 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3058 ice_clear_bit(vsi_handle, r->vsis);
3059 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3060 LIST_DEL(&r->l_entry);
3069 * ice_add_rss_list - add RSS configuration to list
3070 * @hw: pointer to the hardware structure
3071 * @vsi_handle: software VSI handle
3072 * @prof: pointer to flow profile
3074 * Assumption: lock has already been acquired for RSS list
3076 static enum ice_status
3077 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3079 struct ice_rss_cfg *r, *rss_cfg;
3081 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3082 ice_rss_cfg, l_entry)
3083 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3084 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3085 ice_set_bit(vsi_handle, r->vsis);
3089 rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3091 return ICE_ERR_NO_MEMORY;
3093 rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
3094 rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
3095 rss_cfg->symm = prof->cfg.symm;
3096 ice_set_bit(vsi_handle, rss_cfg->vsis);
3098 LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3103 #define ICE_FLOW_PROF_HASH_S 0
3104 #define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3105 #define ICE_FLOW_PROF_HDR_S 32
3106 #define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3107 #define ICE_FLOW_PROF_ENCAP_S 63
3108 #define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
3110 #define ICE_RSS_OUTER_HEADERS 1
3111 #define ICE_RSS_INNER_HEADERS 2
3113 /* Flow profile ID format:
3114 * [0:31] - Packet match fields
3115 * [32:62] - Protocol header
3116 * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
3118 #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
3119 (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3120 (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3121 ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
3124 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3126 u32 s = ((src % 4) << 3); /* byte shift */
3127 u32 v = dst | 0x80; /* value to program */
3128 u8 i = src / 4; /* register index */
3131 reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3132 reg = (reg & ~(0xff << s)) | (v << s);
3133 wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3137 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3140 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3143 for (i = 0; i < len; i++) {
3144 ice_rss_config_xor_word(hw, prof_id,
3145 /* Yes, field vector in GLQF_HSYMM and
3146 * GLQF_HINSET is inversed!
3148 fv_last_word - (src + i),
3149 fv_last_word - (dst + i));
3150 ice_rss_config_xor_word(hw, prof_id,
3151 fv_last_word - (dst + i),
3152 fv_last_word - (src + i));
3157 ice_rss_update_symm(struct ice_hw *hw,
3158 struct ice_flow_prof *prof)
3160 struct ice_prof_map *map;
3163 map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3164 prof_id = map->prof_id;
3166 /* clear to default */
3167 for (m = 0; m < 6; m++)
3168 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3169 if (prof->cfg.symm) {
3170 struct ice_flow_seg_info *seg =
3171 &prof->segs[prof->segs_cnt - 1];
3173 struct ice_flow_seg_xtrct *ipv4_src =
3174 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3175 struct ice_flow_seg_xtrct *ipv4_dst =
3176 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3177 struct ice_flow_seg_xtrct *ipv6_src =
3178 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3179 struct ice_flow_seg_xtrct *ipv6_dst =
3180 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3182 struct ice_flow_seg_xtrct *tcp_src =
3183 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3184 struct ice_flow_seg_xtrct *tcp_dst =
3185 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3187 struct ice_flow_seg_xtrct *udp_src =
3188 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3189 struct ice_flow_seg_xtrct *udp_dst =
3190 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3192 struct ice_flow_seg_xtrct *sctp_src =
3193 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3194 struct ice_flow_seg_xtrct *sctp_dst =
3195 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3198 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3199 ice_rss_config_xor(hw, prof_id,
3200 ipv4_src->idx, ipv4_dst->idx, 2);
3203 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3204 ice_rss_config_xor(hw, prof_id,
3205 ipv6_src->idx, ipv6_dst->idx, 8);
3208 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3209 ice_rss_config_xor(hw, prof_id,
3210 tcp_src->idx, tcp_dst->idx, 1);
3213 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3214 ice_rss_config_xor(hw, prof_id,
3215 udp_src->idx, udp_dst->idx, 1);
3218 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3219 ice_rss_config_xor(hw, prof_id,
3220 sctp_src->idx, sctp_dst->idx, 1);
3225 * ice_add_rss_cfg_sync - add an RSS configuration
3226 * @hw: pointer to the hardware structure
3227 * @vsi_handle: software VSI handle
3228 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3229 * @addl_hdrs: protocol header fields
3230 * @segs_cnt: packet segment count
3231 * @symm: symmetric hash enable/disable
3233 * Assumption: lock has already been acquired for RSS list
3235 static enum ice_status
3236 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3237 u32 addl_hdrs, u8 segs_cnt, bool symm)
3239 const enum ice_block blk = ICE_BLK_RSS;
3240 struct ice_flow_prof *prof = NULL;
3241 struct ice_flow_seg_info *segs;
3242 enum ice_status status;
3244 if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
3245 return ICE_ERR_PARAM;
3247 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3250 return ICE_ERR_NO_MEMORY;
3252 /* Construct the packet segment info from the hashed fields */
3253 status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3258 /* Search for a flow profile that has matching headers, hash fields
3259 * and has the input VSI associated to it. If found, no further
3260 * operations required and exit.
3262 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3264 ICE_FLOW_FIND_PROF_CHK_FLDS |
3265 ICE_FLOW_FIND_PROF_CHK_VSI);
3267 if (prof->cfg.symm == symm)
3269 prof->cfg.symm = symm;
3273 /* Check if a flow profile exists with the same protocol headers and
3274 * associated with the input VSI. If so disasscociate the VSI from
3275 * this profile. The VSI will be added to a new profile created with
3276 * the protocol header and new hash field configuration.
3278 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3279 vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3281 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3283 ice_rem_rss_list(hw, vsi_handle, prof);
3287 /* Remove profile if it has no VSIs associated */
3288 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3289 status = ice_flow_rem_prof(hw, blk, prof->id);
3295 /* Search for a profile that has same match fields only. If this
3296 * exists then associate the VSI to this profile.
3298 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3300 ICE_FLOW_FIND_PROF_CHK_FLDS);
3302 if (prof->cfg.symm == symm) {
3303 status = ice_flow_assoc_prof(hw, blk, prof,
3306 status = ice_add_rss_list(hw, vsi_handle,
3309 /* if a profile exist but with different symmetric
3310 * requirement, just return error.
3312 status = ICE_ERR_NOT_SUPPORTED;
3317 /* Create a new flow profile with generated profile and packet
3318 * segment information.
3320 status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3321 ICE_FLOW_GEN_PROFID(hashed_flds,
3322 segs[segs_cnt - 1].hdrs,
3324 segs, segs_cnt, NULL, 0, &prof);
3328 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3329 /* If association to a new flow profile failed then this profile can
3333 ice_flow_rem_prof(hw, blk, prof->id);
3337 status = ice_add_rss_list(hw, vsi_handle, prof);
3339 prof->cfg.symm = symm;
3342 ice_rss_update_symm(hw, prof);
3350 * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3351 * @hw: pointer to the hardware structure
3352 * @vsi_handle: software VSI handle
3353 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3354 * @addl_hdrs: protocol header fields
3355 * @symm: symmetric hash enable/disable
3357 * This function will generate a flow profile based on fields associated with
3358 * the input fields to hash on, the flow type and use the VSI number to add
3359 * a flow entry to the profile.
3362 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3363 u32 addl_hdrs, bool symm)
3365 enum ice_status status;
3367 if (hashed_flds == ICE_HASH_INVALID ||
3368 !ice_is_vsi_valid(hw, vsi_handle))
3369 return ICE_ERR_PARAM;
3371 ice_acquire_lock(&hw->rss_locks);
3372 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3373 ICE_RSS_OUTER_HEADERS, symm);
3375 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3376 addl_hdrs, ICE_RSS_INNER_HEADERS,
3378 ice_release_lock(&hw->rss_locks);
3384 * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3385 * @hw: pointer to the hardware structure
3386 * @vsi_handle: software VSI handle
3387 * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3388 * @addl_hdrs: Protocol header fields within a packet segment
3389 * @segs_cnt: packet segment count
3391 * Assumption: lock has already been acquired for RSS list
3393 static enum ice_status
3394 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3395 u32 addl_hdrs, u8 segs_cnt)
3397 const enum ice_block blk = ICE_BLK_RSS;
3398 struct ice_flow_seg_info *segs;
3399 struct ice_flow_prof *prof;
3400 enum ice_status status;
3402 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3405 return ICE_ERR_NO_MEMORY;
3407 /* Construct the packet segment info from the hashed fields */
3408 status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3413 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3415 ICE_FLOW_FIND_PROF_CHK_FLDS);
3417 status = ICE_ERR_DOES_NOT_EXIST;
3421 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3425 /* Remove RSS configuration from VSI context before deleting
3428 ice_rem_rss_list(hw, vsi_handle, prof);
3430 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
3431 status = ice_flow_rem_prof(hw, blk, prof->id);
3439 * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
3440 * @hw: pointer to the hardware structure
3441 * @vsi_handle: software VSI handle
3442 * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3443 * @addl_hdrs: Protocol header fields within a packet segment
3445 * This function will lookup the flow profile based on the input
3446 * hash field bitmap, iterate through the profile entry list of
3447 * that profile and find entry associated with input VSI to be
3448 * removed. Calls are made to underlying flow apis which will in
3449 * turn build or update buffers for RSS XLT1 section.
3452 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3455 enum ice_status status;
3457 if (hashed_flds == ICE_HASH_INVALID ||
3458 !ice_is_vsi_valid(hw, vsi_handle))
3459 return ICE_ERR_PARAM;
3461 ice_acquire_lock(&hw->rss_locks);
3462 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3463 ICE_RSS_OUTER_HEADERS);
3465 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3466 addl_hdrs, ICE_RSS_INNER_HEADERS);
3467 ice_release_lock(&hw->rss_locks);
3473 * ice_replay_rss_cfg - replay RSS configurations associated with VSI
3474 * @hw: pointer to the hardware structure
3475 * @vsi_handle: software VSI handle
3477 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3479 enum ice_status status = ICE_SUCCESS;
3480 struct ice_rss_cfg *r;
3482 if (!ice_is_vsi_valid(hw, vsi_handle))
3483 return ICE_ERR_PARAM;
3485 ice_acquire_lock(&hw->rss_locks);
3486 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3487 ice_rss_cfg, l_entry) {
3488 if (ice_is_bit_set(r->vsis, vsi_handle)) {
3489 status = ice_add_rss_cfg_sync(hw, vsi_handle,
3492 ICE_RSS_OUTER_HEADERS,
3496 status = ice_add_rss_cfg_sync(hw, vsi_handle,
3499 ICE_RSS_INNER_HEADERS,
3505 ice_release_lock(&hw->rss_locks);
3511 * ice_get_rss_cfg - returns hashed fields for the given header types
3512 * @hw: pointer to the hardware structure
3513 * @vsi_handle: software VSI handle
3514 * @hdrs: protocol header type
3516 * This function will return the match fields of the first instance of flow
3517 * profile having the given header types and containing input VSI
3519 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
3521 struct ice_rss_cfg *r, *rss_cfg = NULL;
3523 /* verify if the protocol header is non zero and VSI is valid */
3524 if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
3525 return ICE_HASH_INVALID;
3527 ice_acquire_lock(&hw->rss_locks);
3528 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3529 ice_rss_cfg, l_entry)
3530 if (ice_is_bit_set(r->vsis, vsi_handle) &&
3531 r->packet_hdr == hdrs) {
3535 ice_release_lock(&hw->rss_locks);
3537 return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID;