1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020
5 #include "ice_common.h"
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE 2
10 #define ICE_FLOW_FLD_SZ_VLAN 2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR 4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR 16
13 #define ICE_FLOW_FLD_SZ_IP_DSCP 1
14 #define ICE_FLOW_FLD_SZ_IP_TTL 1
15 #define ICE_FLOW_FLD_SZ_IP_PROT 1
16 #define ICE_FLOW_FLD_SZ_PORT 2
17 #define ICE_FLOW_FLD_SZ_TCP_FLAGS 1
18 #define ICE_FLOW_FLD_SZ_ICMP_TYPE 1
19 #define ICE_FLOW_FLD_SZ_ICMP_CODE 1
20 #define ICE_FLOW_FLD_SZ_ARP_OPER 2
21 #define ICE_FLOW_FLD_SZ_GRE_KEYID 4
22 #define ICE_FLOW_FLD_SZ_GTP_TEID 4
23 #define ICE_FLOW_FLD_SZ_GTP_QFI 2
24 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID 2
26 /* Describe properties of a protocol header field */
27 struct ice_flow_field_info {
28 enum ice_flow_seg_hdr hdr;
29 s16 off; /* Offset from start of a protocol header, in bits */
30 u16 size; /* Size of fields in bits */
31 u16 mask; /* 16-bit mask for field */
34 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
36 .off = (_offset_bytes) * BITS_PER_BYTE, \
37 .size = (_size_bytes) * BITS_PER_BYTE, \
41 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
43 .off = (_offset_bytes) * BITS_PER_BYTE, \
44 .size = (_size_bytes) * BITS_PER_BYTE, \
48 /* Table containing properties of supported protocol header fields */
50 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
52 /* ICE_FLOW_FIELD_IDX_ETH_DA */
53 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
54 /* ICE_FLOW_FIELD_IDX_ETH_SA */
55 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
56 /* ICE_FLOW_FIELD_IDX_S_VLAN */
57 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
58 /* ICE_FLOW_FIELD_IDX_C_VLAN */
59 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
60 /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
61 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 12, ICE_FLOW_FLD_SZ_ETH_TYPE),
63 /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
64 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
66 /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
67 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
69 /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
70 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
71 ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
72 /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
73 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
74 ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
75 /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
76 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
77 ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
78 /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
79 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
80 ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
81 /* ICE_FLOW_FIELD_IDX_IPV4_SA */
82 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
83 /* ICE_FLOW_FIELD_IDX_IPV4_DA */
84 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
85 /* ICE_FLOW_FIELD_IDX_IPV6_SA */
86 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
87 /* ICE_FLOW_FIELD_IDX_IPV6_DA */
88 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
90 /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
91 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
92 /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
93 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
94 /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
95 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
96 /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
97 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
98 /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
99 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
100 /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
101 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
102 /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
103 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
105 /* ICE_FLOW_FIELD_IDX_ARP_SIP */
106 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
107 /* ICE_FLOW_FIELD_IDX_ARP_DIP */
108 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
109 /* ICE_FLOW_FIELD_IDX_ARP_SHA */
110 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
111 /* ICE_FLOW_FIELD_IDX_ARP_DHA */
112 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
113 /* ICE_FLOW_FIELD_IDX_ARP_OP */
114 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
116 /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
117 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
118 /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
119 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
121 /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
122 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
124 /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
125 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
126 ICE_FLOW_FLD_SZ_GTP_TEID),
127 /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
128 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
129 ICE_FLOW_FLD_SZ_GTP_TEID),
130 /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
131 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
132 ICE_FLOW_FLD_SZ_GTP_TEID),
133 /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
134 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
135 ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
136 /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
137 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
138 ICE_FLOW_FLD_SZ_GTP_TEID),
139 /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
140 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
141 ICE_FLOW_FLD_SZ_GTP_TEID),
143 /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
144 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
145 ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
148 /* Bitmaps indicating relevant packet types for a particular protocol header
150 * Packet types for packets with an Outer/First/Single MAC header
152 static const u32 ice_ptypes_mac_ofos[] = {
153 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
154 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
155 0x00000000, 0x03FFF000, 0x7FFFFFE0, 0x00000000,
156 0x00000000, 0x00000000, 0x00000000, 0x00000000,
157 0x00000000, 0x00000000, 0x00000000, 0x00000000,
158 0x00000000, 0x00000000, 0x00000000, 0x00000000,
159 0x00000000, 0x00000000, 0x00000000, 0x00000000,
160 0x00000000, 0x00000000, 0x00000000, 0x00000000,
163 /* Packet types for packets with an Innermost/Last MAC VLAN header */
164 static const u32 ice_ptypes_macvlan_il[] = {
165 0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
166 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
167 0x00000000, 0x00000000, 0x00000000, 0x00000000,
168 0x00000000, 0x00000000, 0x00000000, 0x00000000,
169 0x00000000, 0x00000000, 0x00000000, 0x00000000,
170 0x00000000, 0x00000000, 0x00000000, 0x00000000,
171 0x00000000, 0x00000000, 0x00000000, 0x00000000,
172 0x00000000, 0x00000000, 0x00000000, 0x00000000,
175 /* Packet types for packets with an Outer/First/Single IPv4 header */
176 static const u32 ice_ptypes_ipv4_ofos[] = {
177 0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
178 0x00000000, 0x00000000, 0x00000000, 0x00000000,
179 0x0003000F, 0x000FC000, 0x03E0F800, 0x00000000,
180 0x00000000, 0x00000000, 0x00000000, 0x00000000,
181 0x00000000, 0x00000000, 0x00000000, 0x00000000,
182 0x00000000, 0x00000000, 0x00000000, 0x00000000,
183 0x00000000, 0x00000000, 0x00000000, 0x00000000,
184 0x00000000, 0x00000000, 0x00000000, 0x00000000,
187 /* Packet types for packets with an Innermost/Last IPv4 header */
188 static const u32 ice_ptypes_ipv4_il[] = {
189 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
190 0x0000000E, 0x00000000, 0x00000000, 0x00000000,
191 0x00000000, 0x00000000, 0x001FF800, 0x00000000,
192 0x00000000, 0x00000000, 0x00000000, 0x00000000,
193 0x00000000, 0x00000000, 0x00000000, 0x00000000,
194 0x00000000, 0x00000000, 0x00000000, 0x00000000,
195 0x00000000, 0x00000000, 0x00000000, 0x00000000,
196 0x00000000, 0x00000000, 0x00000000, 0x00000000,
199 /* Packet types for packets with an Outer/First/Single IPv6 header */
200 static const u32 ice_ptypes_ipv6_ofos[] = {
201 0x00000000, 0x00000000, 0x77000000, 0x10002000,
202 0x00000000, 0x00000000, 0x00000000, 0x00000000,
203 0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000000,
204 0x00000000, 0x00000000, 0x00000000, 0x00000000,
205 0x00000000, 0x00000000, 0x00000000, 0x00000000,
206 0x00000000, 0x00000000, 0x00000000, 0x00000000,
207 0x00000000, 0x00000000, 0x00000000, 0x00000000,
208 0x00000000, 0x00000000, 0x00000000, 0x00000000,
211 /* Packet types for packets with an Innermost/Last IPv6 header */
212 static const u32 ice_ptypes_ipv6_il[] = {
213 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
214 0x00000770, 0x00000000, 0x00000000, 0x00000000,
215 0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
216 0x00000000, 0x00000000, 0x00000000, 0x00000000,
217 0x00000000, 0x00000000, 0x00000000, 0x00000000,
218 0x00000000, 0x00000000, 0x00000000, 0x00000000,
219 0x00000000, 0x00000000, 0x00000000, 0x00000000,
220 0x00000000, 0x00000000, 0x00000000, 0x00000000,
223 /* Packet types for packets with an Outermost/First ARP header */
224 static const u32 ice_ptypes_arp_of[] = {
225 0x00000800, 0x00000000, 0x00000000, 0x00000000,
226 0x00000000, 0x00000000, 0x00000000, 0x00000000,
227 0x00000000, 0x00000000, 0x00000000, 0x00000000,
228 0x00000000, 0x00000000, 0x00000000, 0x00000000,
229 0x00000000, 0x00000000, 0x00000000, 0x00000000,
230 0x00000000, 0x00000000, 0x00000000, 0x00000000,
231 0x00000000, 0x00000000, 0x00000000, 0x00000000,
232 0x00000000, 0x00000000, 0x00000000, 0x00000000,
235 /* UDP Packet types for non-tunneled packets or tunneled
236 * packets with inner UDP.
238 static const u32 ice_ptypes_udp_il[] = {
239 0x81000000, 0x20204040, 0x04000010, 0x80810102,
240 0x00000040, 0x00000000, 0x00000000, 0x00000000,
241 0x00000000, 0x00410000, 0x10842000, 0x00000000,
242 0x00000000, 0x00000000, 0x00000000, 0x00000000,
243 0x00000000, 0x00000000, 0x00000000, 0x00000000,
244 0x00000000, 0x00000000, 0x00000000, 0x00000000,
245 0x00000000, 0x00000000, 0x00000000, 0x00000000,
246 0x00000000, 0x00000000, 0x00000000, 0x00000000,
249 /* Packet types for packets with an Innermost/Last TCP header */
250 static const u32 ice_ptypes_tcp_il[] = {
251 0x04000000, 0x80810102, 0x10000040, 0x02040408,
252 0x00000102, 0x00000000, 0x00000000, 0x00000000,
253 0x00000000, 0x00820000, 0x21084000, 0x00000000,
254 0x00000000, 0x00000000, 0x00000000, 0x00000000,
255 0x00000000, 0x00000000, 0x00000000, 0x00000000,
256 0x00000000, 0x00000000, 0x00000000, 0x00000000,
257 0x00000000, 0x00000000, 0x00000000, 0x00000000,
258 0x00000000, 0x00000000, 0x00000000, 0x00000000,
261 /* Packet types for packets with an Innermost/Last SCTP header */
262 static const u32 ice_ptypes_sctp_il[] = {
263 0x08000000, 0x01020204, 0x20000081, 0x04080810,
264 0x00000204, 0x00000000, 0x00000000, 0x00000000,
265 0x00000000, 0x01040000, 0x00000000, 0x00000000,
266 0x00000000, 0x00000000, 0x00000000, 0x00000000,
267 0x00000000, 0x00000000, 0x00000000, 0x00000000,
268 0x00000000, 0x00000000, 0x00000000, 0x00000000,
269 0x00000000, 0x00000000, 0x00000000, 0x00000000,
270 0x00000000, 0x00000000, 0x00000000, 0x00000000,
273 /* Packet types for packets with an Outermost/First ICMP header */
274 static const u32 ice_ptypes_icmp_of[] = {
275 0x10000000, 0x00000000, 0x00000000, 0x00000000,
276 0x00000000, 0x00000000, 0x00000000, 0x00000000,
277 0x00000000, 0x00000000, 0x00000000, 0x00000000,
278 0x00000000, 0x00000000, 0x00000000, 0x00000000,
279 0x00000000, 0x00000000, 0x00000000, 0x00000000,
280 0x00000000, 0x00000000, 0x00000000, 0x00000000,
281 0x00000000, 0x00000000, 0x00000000, 0x00000000,
282 0x00000000, 0x00000000, 0x00000000, 0x00000000,
285 /* Packet types for packets with an Innermost/Last ICMP header */
286 static const u32 ice_ptypes_icmp_il[] = {
287 0x00000000, 0x02040408, 0x40000102, 0x08101020,
288 0x00000408, 0x00000000, 0x00000000, 0x00000000,
289 0x00000000, 0x00000000, 0x42108000, 0x00000000,
290 0x00000000, 0x00000000, 0x00000000, 0x00000000,
291 0x00000000, 0x00000000, 0x00000000, 0x00000000,
292 0x00000000, 0x00000000, 0x00000000, 0x00000000,
293 0x00000000, 0x00000000, 0x00000000, 0x00000000,
294 0x00000000, 0x00000000, 0x00000000, 0x00000000,
297 /* Packet types for packets with an Outermost/First GRE header */
298 static const u32 ice_ptypes_gre_of[] = {
299 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
300 0x0000017E, 0x00000000, 0x00000000, 0x00000000,
301 0x00000000, 0x00000000, 0x00000000, 0x00000000,
302 0x00000000, 0x00000000, 0x00000000, 0x00000000,
303 0x00000000, 0x00000000, 0x00000000, 0x00000000,
304 0x00000000, 0x00000000, 0x00000000, 0x00000000,
305 0x00000000, 0x00000000, 0x00000000, 0x00000000,
306 0x00000000, 0x00000000, 0x00000000, 0x00000000,
309 /* Packet types for packets with an Innermost/Last MAC header */
310 static const u32 ice_ptypes_mac_il[] = {
311 0x00000000, 0x00000000, 0x00000000, 0x00000000,
312 0x00000000, 0x00000000, 0x00000000, 0x00000000,
313 0x00000000, 0x00000000, 0x00000000, 0x00000000,
314 0x00000000, 0x00000000, 0x00000000, 0x00000000,
315 0x00000000, 0x00000000, 0x00000000, 0x00000000,
316 0x00000000, 0x00000000, 0x00000000, 0x00000000,
317 0x00000000, 0x00000000, 0x00000000, 0x00000000,
318 0x00000000, 0x00000000, 0x00000000, 0x00000000,
321 /* Packet types for GTPC */
322 static const u32 ice_ptypes_gtpc[] = {
323 0x00000000, 0x00000000, 0x00000000, 0x00000000,
324 0x00000000, 0x00000000, 0x00000000, 0x00000000,
325 0x00000000, 0x00000000, 0x00000180, 0x00000000,
326 0x00000000, 0x00000000, 0x00000000, 0x00000000,
327 0x00000000, 0x00000000, 0x00000000, 0x00000000,
328 0x00000000, 0x00000000, 0x00000000, 0x00000000,
329 0x00000000, 0x00000000, 0x00000000, 0x00000000,
330 0x00000000, 0x00000000, 0x00000000, 0x00000000,
333 /* Packet types for GTPC with TEID */
334 static const u32 ice_ptypes_gtpc_tid[] = {
335 0x00000000, 0x00000000, 0x00000000, 0x00000000,
336 0x00000000, 0x00000000, 0x00000000, 0x00000000,
337 0x00000000, 0x00000000, 0x00000060, 0x00000000,
338 0x00000000, 0x00000000, 0x00000000, 0x00000000,
339 0x00000000, 0x00000000, 0x00000000, 0x00000000,
340 0x00000000, 0x00000000, 0x00000000, 0x00000000,
341 0x00000000, 0x00000000, 0x00000000, 0x00000000,
342 0x00000000, 0x00000000, 0x00000000, 0x00000000,
345 /* Packet types for GTPU */
346 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
347 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
348 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
349 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
350 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
351 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
352 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
353 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
354 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
355 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
356 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
357 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
358 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
359 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
360 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
361 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
362 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
363 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
364 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
365 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
366 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
369 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
370 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
371 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
372 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
373 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
374 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
375 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
376 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
377 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
378 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
379 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
380 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
381 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
382 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
383 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
384 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
385 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
386 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
387 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
388 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
389 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
392 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
393 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
394 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
395 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
396 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
397 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
398 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
399 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
400 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
401 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
402 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
403 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
404 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
405 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
406 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
407 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
408 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
409 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
410 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
411 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
412 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
415 static const u32 ice_ptypes_gtpu[] = {
416 0x00000000, 0x00000000, 0x00000000, 0x00000000,
417 0x00000000, 0x00000000, 0x00000000, 0x00000000,
418 0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
419 0x00000000, 0x00000000, 0x00000000, 0x00000000,
420 0x00000000, 0x00000000, 0x00000000, 0x00000000,
421 0x00000000, 0x00000000, 0x00000000, 0x00000000,
422 0x00000000, 0x00000000, 0x00000000, 0x00000000,
423 0x00000000, 0x00000000, 0x00000000, 0x00000000,
426 /* Packet types for pppoe */
427 static const u32 ice_ptypes_pppoe[] = {
428 0x00000000, 0x00000000, 0x00000000, 0x00000000,
429 0x00000000, 0x00000000, 0x00000000, 0x00000000,
430 0x00000000, 0x03FFF000, 0x00000000, 0x00000000,
431 0x00000000, 0x00000000, 0x00000000, 0x00000000,
432 0x00000000, 0x00000000, 0x00000000, 0x00000000,
433 0x00000000, 0x00000000, 0x00000000, 0x00000000,
434 0x00000000, 0x00000000, 0x00000000, 0x00000000,
435 0x00000000, 0x00000000, 0x00000000, 0x00000000,
438 /* Manage parameters and info. used during the creation of a flow profile */
439 struct ice_flow_prof_params {
441 u16 entry_length; /* # of bytes formatted entry will require */
443 struct ice_flow_prof *prof;
445 /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
446 * This will give us the direction flags.
448 struct ice_fv_word es[ICE_MAX_FV_WORDS];
449 /* attributes can be used to add attributes to a particular PTYPE */
450 const struct ice_ptype_attributes *attr;
453 u16 mask[ICE_MAX_FV_WORDS];
454 ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
457 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
458 (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
459 ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU)
461 #define ICE_FLOW_SEG_HDRS_L2_MASK \
462 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
463 #define ICE_FLOW_SEG_HDRS_L3_MASK \
464 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
465 ICE_FLOW_SEG_HDR_ARP)
466 #define ICE_FLOW_SEG_HDRS_L4_MASK \
467 (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
468 ICE_FLOW_SEG_HDR_SCTP)
471 * ice_flow_val_hdrs - validates packet segments for valid protocol headers
472 * @segs: array of one or more packet segments that describe the flow
473 * @segs_cnt: number of packet segments provided
475 static enum ice_status
476 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
480 for (i = 0; i < segs_cnt; i++) {
481 /* Multiple L3 headers */
482 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
483 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
484 return ICE_ERR_PARAM;
486 /* Multiple L4 headers */
487 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
488 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
489 return ICE_ERR_PARAM;
495 /* Sizes of fixed known protocol headers without header options */
496 #define ICE_FLOW_PROT_HDR_SZ_MAC 14
497 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
498 #define ICE_FLOW_PROT_HDR_SZ_IPV4 20
499 #define ICE_FLOW_PROT_HDR_SZ_IPV6 40
500 #define ICE_FLOW_PROT_HDR_SZ_ARP 28
501 #define ICE_FLOW_PROT_HDR_SZ_ICMP 8
502 #define ICE_FLOW_PROT_HDR_SZ_TCP 20
503 #define ICE_FLOW_PROT_HDR_SZ_UDP 8
504 #define ICE_FLOW_PROT_HDR_SZ_SCTP 12
507 * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
508 * @params: information about the flow to be processed
509 * @seg: index of packet segment whose header size is to be determined
511 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
516 sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
517 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
520 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
521 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
522 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
523 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
524 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
525 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
526 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
527 /* A L3 header is required if L4 is specified */
531 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
532 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
533 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
534 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
535 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
536 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
537 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
538 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
544 * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
545 * @params: information about the flow to be processed
547 * This function identifies the packet types associated with the protocol
548 * headers being present in packet segments of the specified flow profile.
550 static enum ice_status
551 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
553 struct ice_flow_prof *prof;
556 ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
561 for (i = 0; i < params->prof->segs_cnt; i++) {
562 const ice_bitmap_t *src;
565 hdrs = prof->segs[i].hdrs;
567 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
568 src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
569 (const ice_bitmap_t *)ice_ptypes_mac_il;
570 ice_and_bitmap(params->ptypes, params->ptypes, src,
574 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
575 src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
576 ice_and_bitmap(params->ptypes, params->ptypes, src,
580 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
581 ice_and_bitmap(params->ptypes, params->ptypes,
582 (const ice_bitmap_t *)ice_ptypes_arp_of,
586 if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
587 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
588 ice_and_bitmap(params->ptypes, params->ptypes, src,
592 if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
593 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
594 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
595 ice_and_bitmap(params->ptypes, params->ptypes, src,
597 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
598 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
599 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
600 ice_and_bitmap(params->ptypes, params->ptypes, src,
604 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
605 src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
606 (const ice_bitmap_t *)ice_ptypes_icmp_il;
607 ice_and_bitmap(params->ptypes, params->ptypes, src,
609 } else if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
610 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
611 ice_and_bitmap(params->ptypes, params->ptypes, src,
613 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
614 ice_and_bitmap(params->ptypes, params->ptypes,
615 (const ice_bitmap_t *)ice_ptypes_tcp_il,
617 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
618 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
619 ice_and_bitmap(params->ptypes, params->ptypes, src,
621 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
623 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
624 ice_and_bitmap(params->ptypes, params->ptypes,
625 src, ICE_FLOW_PTYPE_MAX);
627 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
628 src = (const ice_bitmap_t *)ice_ptypes_gtpc;
629 ice_and_bitmap(params->ptypes, params->ptypes,
630 src, ICE_FLOW_PTYPE_MAX);
631 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
632 src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
633 ice_and_bitmap(params->ptypes, params->ptypes,
634 src, ICE_FLOW_PTYPE_MAX);
635 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
636 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
637 ice_and_bitmap(params->ptypes, params->ptypes,
638 src, ICE_FLOW_PTYPE_MAX);
640 /* Attributes for GTP packet with downlink */
641 params->attr = ice_attr_gtpu_down;
642 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
643 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
644 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
645 ice_and_bitmap(params->ptypes, params->ptypes,
646 src, ICE_FLOW_PTYPE_MAX);
648 /* Attributes for GTP packet with uplink */
649 params->attr = ice_attr_gtpu_up;
650 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
651 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
652 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
653 ice_and_bitmap(params->ptypes, params->ptypes,
654 src, ICE_FLOW_PTYPE_MAX);
656 /* Attributes for GTP packet with Extension Header */
657 params->attr = ice_attr_gtpu_eh;
658 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
659 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
660 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
661 ice_and_bitmap(params->ptypes, params->ptypes,
662 src, ICE_FLOW_PTYPE_MAX);
670 * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
671 * @hw: pointer to the HW struct
672 * @params: information about the flow to be processed
673 * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
675 * This function will allocate an extraction sequence entries for a DWORD size
676 * chunk of the packet flags.
678 static enum ice_status
679 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
680 struct ice_flow_prof_params *params,
681 enum ice_flex_mdid_pkt_flags flags)
683 u8 fv_words = hw->blk[params->blk].es.fvw;
686 /* Make sure the number of extraction sequence entries required does not
687 * exceed the block's capacity.
689 if (params->es_cnt >= fv_words)
690 return ICE_ERR_MAX_LIMIT;
692 /* some blocks require a reversed field vector layout */
693 if (hw->blk[params->blk].es.reverse)
694 idx = fv_words - params->es_cnt - 1;
696 idx = params->es_cnt;
698 params->es[idx].prot_id = ICE_PROT_META_ID;
699 params->es[idx].off = flags;
706 * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
707 * @hw: pointer to the HW struct
708 * @params: information about the flow to be processed
709 * @seg: packet segment index of the field to be extracted
710 * @fld: ID of field to be extracted
711 * @match: bitfield of all fields
713 * This function determines the protocol ID, offset, and size of the given
714 * field. It then allocates one or more extraction sequence entries for the
715 * given field, and fill the entries with protocol ID and offset information.
717 static enum ice_status
718 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
719 u8 seg, enum ice_flow_field fld, u64 match)
721 enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
722 enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
723 u8 fv_words = hw->blk[params->blk].es.fvw;
724 struct ice_flow_fld_info *flds;
725 u16 cnt, ese_bits, i;
731 flds = params->prof->segs[seg].fields;
734 case ICE_FLOW_FIELD_IDX_ETH_DA:
735 case ICE_FLOW_FIELD_IDX_ETH_SA:
736 case ICE_FLOW_FIELD_IDX_S_VLAN:
737 case ICE_FLOW_FIELD_IDX_C_VLAN:
738 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
740 case ICE_FLOW_FIELD_IDX_ETH_TYPE:
741 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
743 case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
744 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
746 case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
747 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
749 case ICE_FLOW_FIELD_IDX_IPV4_TTL:
750 case ICE_FLOW_FIELD_IDX_IPV4_PROT:
751 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
753 /* TTL and PROT share the same extraction seq. entry.
754 * Each is considered a sibling to the other in terms of sharing
755 * the same extraction sequence entry.
757 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
758 sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
759 else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
760 sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
762 /* If the sibling field is also included, that field's
763 * mask needs to be included.
765 if (match & BIT(sib))
766 sib_mask = ice_flds_info[sib].mask;
768 case ICE_FLOW_FIELD_IDX_IPV6_TTL:
769 case ICE_FLOW_FIELD_IDX_IPV6_PROT:
770 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
772 /* TTL and PROT share the same extraction seq. entry.
773 * Each is considered a sibling to the other in terms of sharing
774 * the same extraction sequence entry.
776 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
777 sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
778 else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
779 sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
781 /* If the sibling field is also included, that field's
782 * mask needs to be included.
784 if (match & BIT(sib))
785 sib_mask = ice_flds_info[sib].mask;
787 case ICE_FLOW_FIELD_IDX_IPV4_SA:
788 case ICE_FLOW_FIELD_IDX_IPV4_DA:
789 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
791 case ICE_FLOW_FIELD_IDX_IPV6_SA:
792 case ICE_FLOW_FIELD_IDX_IPV6_DA:
793 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
795 case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
796 case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
797 case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
798 prot_id = ICE_PROT_TCP_IL;
800 case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
801 case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
802 prot_id = ICE_PROT_UDP_IL_OR_S;
804 case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
805 case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
806 prot_id = ICE_PROT_SCTP_IL;
808 case ICE_FLOW_FIELD_IDX_GTPC_TEID:
809 case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
810 case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
811 case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
812 case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
813 case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
814 /* GTP is accessed through UDP OF protocol */
815 prot_id = ICE_PROT_UDP_OF;
817 case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
818 prot_id = ICE_PROT_PPPOE;
820 case ICE_FLOW_FIELD_IDX_ARP_SIP:
821 case ICE_FLOW_FIELD_IDX_ARP_DIP:
822 case ICE_FLOW_FIELD_IDX_ARP_SHA:
823 case ICE_FLOW_FIELD_IDX_ARP_DHA:
824 case ICE_FLOW_FIELD_IDX_ARP_OP:
825 prot_id = ICE_PROT_ARP_OF;
827 case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
828 case ICE_FLOW_FIELD_IDX_ICMP_CODE:
829 /* ICMP type and code share the same extraction seq. entry */
830 prot_id = (params->prof->segs[seg].hdrs &
831 ICE_FLOW_SEG_HDR_IPV4) ?
832 ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
833 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
834 ICE_FLOW_FIELD_IDX_ICMP_CODE :
835 ICE_FLOW_FIELD_IDX_ICMP_TYPE;
837 case ICE_FLOW_FIELD_IDX_GRE_KEYID:
838 prot_id = ICE_PROT_GRE_OF;
841 return ICE_ERR_NOT_IMPL;
844 /* Each extraction sequence entry is a word in size, and extracts a
845 * word-aligned offset from a protocol header.
847 ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
849 flds[fld].xtrct.prot_id = prot_id;
850 flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
851 ICE_FLOW_FV_EXTRACT_SZ;
852 flds[fld].xtrct.disp = (u8)((ice_flds_info[fld].off + adj) % ese_bits);
853 flds[fld].xtrct.idx = params->es_cnt;
854 flds[fld].xtrct.mask = ice_flds_info[fld].mask;
856 /* Adjust the next field-entry index after accommodating the number of
857 * entries this field consumes
859 cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
860 ice_flds_info[fld].size, ese_bits);
862 /* Fill in the extraction sequence entries needed for this field */
863 off = flds[fld].xtrct.off;
864 mask = flds[fld].xtrct.mask;
865 for (i = 0; i < cnt; i++) {
866 /* Only consume an extraction sequence entry if there is no
867 * sibling field associated with this field or the sibling entry
868 * already extracts the word shared with this field.
870 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
871 flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
872 flds[sib].xtrct.off != off) {
875 /* Make sure the number of extraction sequence required
876 * does not exceed the block's capability
878 if (params->es_cnt >= fv_words)
879 return ICE_ERR_MAX_LIMIT;
881 /* some blocks require a reversed field vector layout */
882 if (hw->blk[params->blk].es.reverse)
883 idx = fv_words - params->es_cnt - 1;
885 idx = params->es_cnt;
887 params->es[idx].prot_id = prot_id;
888 params->es[idx].off = off;
889 params->mask[idx] = mask | sib_mask;
893 off += ICE_FLOW_FV_EXTRACT_SZ;
900 * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
901 * @hw: pointer to the HW struct
902 * @params: information about the flow to be processed
903 * @seg: index of packet segment whose raw fields are to be be extracted
905 static enum ice_status
906 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
913 if (!params->prof->segs[seg].raws_cnt)
916 if (params->prof->segs[seg].raws_cnt >
917 ARRAY_SIZE(params->prof->segs[seg].raws))
918 return ICE_ERR_MAX_LIMIT;
920 /* Offsets within the segment headers are not supported */
921 hdrs_sz = ice_flow_calc_seg_sz(params, seg);
923 return ICE_ERR_PARAM;
925 fv_words = hw->blk[params->blk].es.fvw;
927 for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
928 struct ice_flow_seg_fld_raw *raw;
931 raw = ¶ms->prof->segs[seg].raws[i];
933 /* Storing extraction information */
934 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
935 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
936 ICE_FLOW_FV_EXTRACT_SZ;
937 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
939 raw->info.xtrct.idx = params->es_cnt;
941 /* Determine the number of field vector entries this raw field
944 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
945 (raw->info.src.last * BITS_PER_BYTE),
946 (ICE_FLOW_FV_EXTRACT_SZ *
948 off = raw->info.xtrct.off;
949 for (j = 0; j < cnt; j++) {
952 /* Make sure the number of extraction sequence required
953 * does not exceed the block's capability
955 if (params->es_cnt >= hw->blk[params->blk].es.count ||
956 params->es_cnt >= ICE_MAX_FV_WORDS)
957 return ICE_ERR_MAX_LIMIT;
959 /* some blocks require a reversed field vector layout */
960 if (hw->blk[params->blk].es.reverse)
961 idx = fv_words - params->es_cnt - 1;
963 idx = params->es_cnt;
965 params->es[idx].prot_id = raw->info.xtrct.prot_id;
966 params->es[idx].off = off;
968 off += ICE_FLOW_FV_EXTRACT_SZ;
976 * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
977 * @hw: pointer to the HW struct
978 * @params: information about the flow to be processed
980 * This function iterates through all matched fields in the given segments, and
981 * creates an extraction sequence for the fields.
983 static enum ice_status
984 ice_flow_create_xtrct_seq(struct ice_hw *hw,
985 struct ice_flow_prof_params *params)
987 enum ice_status status = ICE_SUCCESS;
990 /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
993 if (params->blk == ICE_BLK_ACL) {
994 status = ice_flow_xtract_pkt_flags(hw, params,
995 ICE_RX_MDID_PKT_FLAGS_15_0);
1000 for (i = 0; i < params->prof->segs_cnt; i++) {
1001 u64 match = params->prof->segs[i].match;
1002 enum ice_flow_field j;
1004 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1005 const u64 bit = BIT_ULL(j);
1008 status = ice_flow_xtract_fld(hw, params, i, j,
1016 /* Process raw matching bytes */
1017 status = ice_flow_xtract_raws(hw, params, i);
1026 * ice_flow_sel_acl_scen - returns the specific scenario
1027 * @hw: pointer to the hardware structure
1028 * @params: information about the flow to be processed
1030 * This function will return the specific scenario based on the
1031 * params passed to it
1033 static enum ice_status
1034 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1036 /* Find the best-fit scenario for the provided match width */
1037 struct ice_acl_scen *cand_scen = NULL, *scen;
1040 return ICE_ERR_DOES_NOT_EXIST;
1042 /* Loop through each scenario and match against the scenario width
1043 * to select the specific scenario
1045 LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1046 if (scen->eff_width >= params->entry_length &&
1047 (!cand_scen || cand_scen->eff_width > scen->eff_width))
1050 return ICE_ERR_DOES_NOT_EXIST;
1052 params->prof->cfg.scen = cand_scen;
1058 * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1059 * @params: information about the flow to be processed
1061 static enum ice_status
1062 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1064 u16 index, i, range_idx = 0;
1066 index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1068 for (i = 0; i < params->prof->segs_cnt; i++) {
1069 struct ice_flow_seg_info *seg = ¶ms->prof->segs[i];
1070 u64 match = seg->match;
1073 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1074 struct ice_flow_fld_info *fld;
1075 const u64 bit = BIT_ULL(j);
1080 fld = &seg->fields[j];
1081 fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1083 if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1084 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1086 /* Range checking only supported for single
1089 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1091 BITS_PER_BYTE * 2) > 1)
1092 return ICE_ERR_PARAM;
1094 /* Ranges must define low and high values */
1095 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1096 fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1097 return ICE_ERR_PARAM;
1099 fld->entry.val = range_idx++;
1101 /* Store adjusted byte-length of field for later
1102 * use, taking into account potential
1103 * non-byte-aligned displacement
1105 fld->entry.last = DIVIDE_AND_ROUND_UP
1106 (ice_flds_info[j].size +
1107 (fld->xtrct.disp % BITS_PER_BYTE),
1109 fld->entry.val = index;
1110 index += fld->entry.last;
1116 for (j = 0; j < seg->raws_cnt; j++) {
1117 struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1119 raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1120 raw->info.entry.val = index;
1121 raw->info.entry.last = raw->info.src.last;
1122 index += raw->info.entry.last;
1126 /* Currently only support using the byte selection base, which only
1127 * allows for an effective entry size of 30 bytes. Reject anything
1130 if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1131 return ICE_ERR_PARAM;
1133 /* Only 8 range checkers per profile, reject anything trying to use
1136 if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1137 return ICE_ERR_PARAM;
1139 /* Store # bytes required for entry for later use */
1140 params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1146 * ice_flow_proc_segs - process all packet segments associated with a profile
1147 * @hw: pointer to the HW struct
1148 * @params: information about the flow to be processed
1150 static enum ice_status
1151 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1153 enum ice_status status;
1155 status = ice_flow_proc_seg_hdrs(params);
1159 status = ice_flow_create_xtrct_seq(hw, params);
1163 switch (params->blk) {
1166 status = ICE_SUCCESS;
1169 status = ice_flow_acl_def_entry_frmt(params);
1172 status = ice_flow_sel_acl_scen(hw, params);
1178 return ICE_ERR_NOT_IMPL;
1184 #define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001
1185 #define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002
1186 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004
1189 * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1190 * @hw: pointer to the HW struct
1191 * @blk: classification stage
1192 * @dir: flow direction
1193 * @segs: array of one or more packet segments that describe the flow
1194 * @segs_cnt: number of packet segments provided
1195 * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1196 * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1198 static struct ice_flow_prof *
1199 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1200 enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1201 u8 segs_cnt, u16 vsi_handle, u32 conds)
1203 struct ice_flow_prof *p, *prof = NULL;
1205 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1206 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
1207 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1208 segs_cnt && segs_cnt == p->segs_cnt) {
1211 /* Check for profile-VSI association if specified */
1212 if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1213 ice_is_vsi_valid(hw, vsi_handle) &&
1214 !ice_is_bit_set(p->vsis, vsi_handle))
1217 /* Protocol headers must be checked. Matched fields are
1218 * checked if specified.
1220 for (i = 0; i < segs_cnt; i++)
1221 if (segs[i].hdrs != p->segs[i].hdrs ||
1222 ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1223 segs[i].match != p->segs[i].match))
1226 /* A match is found if all segments are matched */
1227 if (i == segs_cnt) {
1233 ice_release_lock(&hw->fl_profs_locks[blk]);
1239 * ice_flow_find_prof - Look up a profile matching headers and matched fields
1240 * @hw: pointer to the HW struct
1241 * @blk: classification stage
1242 * @dir: flow direction
1243 * @segs: array of one or more packet segments that describe the flow
1244 * @segs_cnt: number of packet segments provided
1247 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1248 struct ice_flow_seg_info *segs, u8 segs_cnt)
1250 struct ice_flow_prof *p;
1252 p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1253 ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1255 return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1259 * ice_flow_find_prof_id - Look up a profile with given profile ID
1260 * @hw: pointer to the HW struct
1261 * @blk: classification stage
1262 * @prof_id: unique ID to identify this flow profile
1264 static struct ice_flow_prof *
1265 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1267 struct ice_flow_prof *p;
1269 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
1270 if (p->id == prof_id)
1278 * ice_dealloc_flow_entry - Deallocate flow entry memory
1279 * @hw: pointer to the HW struct
1280 * @entry: flow entry to be removed
1283 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1289 ice_free(hw, entry->entry);
1291 if (entry->range_buf) {
1292 ice_free(hw, entry->range_buf);
1293 entry->range_buf = NULL;
1297 ice_free(hw, entry->acts);
1299 entry->acts_cnt = 0;
1302 ice_free(hw, entry);
1305 #define ICE_ACL_INVALID_SCEN 0x3f
1308 * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any pf
1309 * @hw: pointer to the hardware structure
1310 * @prof: pointer to flow profile
1311 * @buf: destination buffer function writes partial xtrct sequence to
1313 * returns ICE_SUCCESS if no pf is associated to the given profile
1314 * returns ICE_ERR_IN_USE if at least one pf is associated to the given profile
1315 * returns other error code for real error
1317 static enum ice_status
1318 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1319 struct ice_aqc_acl_prof_generic_frmt *buf)
1321 enum ice_status status;
1324 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1328 status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1332 /* If all pf's associated scenarios are all 0 or all
1333 * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1334 * not been configured yet.
1336 if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1337 buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1338 buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1339 buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1342 if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1343 buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1344 buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1345 buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1346 buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1347 buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1348 buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1349 buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1352 return ICE_ERR_IN_USE;
1356 * ice_flow_acl_free_act_cntr - Free the acl rule's actions
1357 * @hw: pointer to the hardware structure
1358 * @acts: array of actions to be performed on a match
1359 * @acts_cnt: number of actions
1361 static enum ice_status
1362 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1367 for (i = 0; i < acts_cnt; i++) {
1368 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1369 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1370 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1371 struct ice_acl_cntrs cntrs;
1372 enum ice_status status;
1374 cntrs.bank = 0; /* Only bank0 for the moment */
1376 LE16_TO_CPU(acts[i].data.acl_act.value);
1378 LE16_TO_CPU(acts[i].data.acl_act.value);
1380 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1381 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1383 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1385 status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1394 * ice_flow_acl_disassoc_scen - Disassociate the scenario to the Profile
1395 * @hw: pointer to the hardware structure
1396 * @prof: pointer to flow profile
1398 * Disassociate the scenario to the Profile for the PF of the VSI.
1400 static enum ice_status
1401 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1403 struct ice_aqc_acl_prof_generic_frmt buf;
1404 enum ice_status status = ICE_SUCCESS;
1407 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1409 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1413 status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1417 /* Clear scenario for this pf */
1418 buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1419 status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1425 * ice_flow_rem_entry_sync - Remove a flow entry
1426 * @hw: pointer to the HW struct
1427 * @blk: classification stage
1428 * @entry: flow entry to be removed
1430 static enum ice_status
1431 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1432 struct ice_flow_entry *entry)
1435 return ICE_ERR_BAD_PTR;
1437 if (blk == ICE_BLK_ACL) {
1438 enum ice_status status;
1441 return ICE_ERR_BAD_PTR;
1443 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1444 entry->scen_entry_idx);
1448 /* Checks if we need to release an ACL counter. */
1449 if (entry->acts_cnt && entry->acts)
1450 ice_flow_acl_free_act_cntr(hw, entry->acts,
1454 LIST_DEL(&entry->l_entry);
1456 ice_dealloc_flow_entry(hw, entry);
1462 * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1463 * @hw: pointer to the HW struct
1464 * @blk: classification stage
1465 * @dir: flow direction
1466 * @prof_id: unique ID to identify this flow profile
1467 * @segs: array of one or more packet segments that describe the flow
1468 * @segs_cnt: number of packet segments provided
1469 * @acts: array of default actions
1470 * @acts_cnt: number of default actions
1471 * @prof: stores the returned flow profile added
1473 * Assumption: the caller has acquired the lock to the profile list
1475 static enum ice_status
1476 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1477 enum ice_flow_dir dir, u64 prof_id,
1478 struct ice_flow_seg_info *segs, u8 segs_cnt,
1479 struct ice_flow_action *acts, u8 acts_cnt,
1480 struct ice_flow_prof **prof)
1482 struct ice_flow_prof_params params;
1483 enum ice_status status;
1486 if (!prof || (acts_cnt && !acts))
1487 return ICE_ERR_BAD_PTR;
1489 ice_memset(¶ms, 0, sizeof(params), ICE_NONDMA_MEM);
1490 params.prof = (struct ice_flow_prof *)
1491 ice_malloc(hw, sizeof(*params.prof));
1493 return ICE_ERR_NO_MEMORY;
1495 /* initialize extraction sequence to all invalid (0xff) */
1496 for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1497 params.es[i].prot_id = ICE_PROT_INVALID;
1498 params.es[i].off = ICE_FV_OFFSET_INVAL;
1502 params.prof->id = prof_id;
1503 params.prof->dir = dir;
1504 params.prof->segs_cnt = segs_cnt;
1506 /* Make a copy of the segments that need to be persistent in the flow
1509 for (i = 0; i < segs_cnt; i++)
1510 ice_memcpy(¶ms.prof->segs[i], &segs[i], sizeof(*segs),
1511 ICE_NONDMA_TO_NONDMA);
1513 /* Make a copy of the actions that need to be persistent in the flow
1517 params.prof->acts = (struct ice_flow_action *)
1518 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
1519 ICE_NONDMA_TO_NONDMA);
1521 if (!params.prof->acts) {
1522 status = ICE_ERR_NO_MEMORY;
1527 status = ice_flow_proc_segs(hw, ¶ms);
1529 ice_debug(hw, ICE_DBG_FLOW,
1530 "Error processing a flow's packet segments\n");
1534 /* Add a HW profile for this flow profile */
1535 status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes,
1536 params.attr, params.attr_cnt, params.es,
1539 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
1543 INIT_LIST_HEAD(¶ms.prof->entries);
1544 ice_init_lock(¶ms.prof->entries_lock);
1545 *prof = params.prof;
1549 if (params.prof->acts)
1550 ice_free(hw, params.prof->acts);
1551 ice_free(hw, params.prof);
1558 * ice_flow_rem_prof_sync - remove a flow profile
1559 * @hw: pointer to the hardware structure
1560 * @blk: classification stage
1561 * @prof: pointer to flow profile to remove
1563 * Assumption: the caller has acquired the lock to the profile list
1565 static enum ice_status
1566 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
1567 struct ice_flow_prof *prof)
1569 enum ice_status status;
1571 /* Remove all remaining flow entries before removing the flow profile */
1572 if (!LIST_EMPTY(&prof->entries)) {
1573 struct ice_flow_entry *e, *t;
1575 ice_acquire_lock(&prof->entries_lock);
1577 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
1579 status = ice_flow_rem_entry_sync(hw, blk, e);
1584 ice_release_lock(&prof->entries_lock);
1587 if (blk == ICE_BLK_ACL) {
1588 struct ice_aqc_acl_profile_ranges query_rng_buf;
1589 struct ice_aqc_acl_prof_generic_frmt buf;
1592 /* Deassociate the scenario to the Profile for the PF */
1593 status = ice_flow_acl_disassoc_scen(hw, prof);
1597 /* Clear the range-checker if the profile ID is no longer
1600 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1601 if (status && status != ICE_ERR_IN_USE) {
1603 } else if (!status) {
1604 /* Clear the range-checker value for profile ID */
1605 ice_memset(&query_rng_buf, 0,
1606 sizeof(struct ice_aqc_acl_profile_ranges),
1609 status = ice_flow_get_hw_prof(hw, blk, prof->id,
1614 status = ice_prog_acl_prof_ranges(hw, prof_id,
1615 &query_rng_buf, NULL);
1621 /* Remove all hardware profiles associated with this flow profile */
1622 status = ice_rem_prof(hw, blk, prof->id);
1624 LIST_DEL(&prof->l_entry);
1625 ice_destroy_lock(&prof->entries_lock);
1627 ice_free(hw, prof->acts);
1635 * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
1636 * @buf: Destination buffer function writes partial xtrct sequence to
1637 * @info: Info about field
1640 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
1641 struct ice_flow_fld_info *info)
1646 src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
1647 info->xtrct.disp / BITS_PER_BYTE;
1648 dst = info->entry.val;
1649 for (i = 0; i < info->entry.last; i++)
1650 /* HW stores field vector words in LE, convert words back to BE
1651 * so constructed entries will end up in network order
1653 buf->byte_selection[dst++] = src++ ^ 1;
1657 * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
1658 * @hw: pointer to the hardware structure
1659 * @prof: pointer to flow profile
1661 static enum ice_status
1662 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
1664 struct ice_aqc_acl_prof_generic_frmt buf;
1665 struct ice_flow_fld_info *info;
1666 enum ice_status status;
1670 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1672 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1676 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1677 if (status && status != ICE_ERR_IN_USE)
1681 /* Program the profile dependent configuration. This is done
1682 * only once regardless of the number of PFs using that profile
1684 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1686 for (i = 0; i < prof->segs_cnt; i++) {
1687 struct ice_flow_seg_info *seg = &prof->segs[i];
1688 u64 match = seg->match;
1691 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1692 const u64 bit = BIT_ULL(j);
1697 info = &seg->fields[j];
1699 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
1700 buf.word_selection[info->entry.val] =
1703 ice_flow_acl_set_xtrct_seq_fld(&buf,
1709 for (j = 0; j < seg->raws_cnt; j++) {
1710 info = &seg->raws[j].info;
1711 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
1715 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
1716 ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
1720 /* Update the current PF */
1721 buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
1722 status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1728 * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
1729 * @hw: pointer to the hardware structure
1730 * @blk: classification stage
1731 * @vsi_handle: software VSI handle
1732 * @vsig: target VSI group
1734 * Assumption: the caller has already verified that the VSI to
1735 * be added has the same characteristics as the VSIG and will
1736 * thereby have access to all resources added to that VSIG.
1739 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
1742 enum ice_status status;
1744 if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
1745 return ICE_ERR_PARAM;
1747 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1748 status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
1750 ice_release_lock(&hw->fl_profs_locks[blk]);
1756 * ice_flow_assoc_prof - associate a VSI with a flow profile
1757 * @hw: pointer to the hardware structure
1758 * @blk: classification stage
1759 * @prof: pointer to flow profile
1760 * @vsi_handle: software VSI handle
1762 * Assumption: the caller has acquired the lock to the profile list
1763 * and the software VSI handle has been validated
1765 static enum ice_status
1766 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
1767 struct ice_flow_prof *prof, u16 vsi_handle)
1769 enum ice_status status = ICE_SUCCESS;
1771 if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
1772 if (blk == ICE_BLK_ACL) {
1773 status = ice_flow_acl_set_xtrct_seq(hw, prof);
1777 status = ice_add_prof_id_flow(hw, blk,
1778 ice_get_hw_vsi_num(hw,
1782 ice_set_bit(vsi_handle, prof->vsis);
1784 ice_debug(hw, ICE_DBG_FLOW,
1785 "HW profile add failed, %d\n",
1793 * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
1794 * @hw: pointer to the hardware structure
1795 * @blk: classification stage
1796 * @prof: pointer to flow profile
1797 * @vsi_handle: software VSI handle
1799 * Assumption: the caller has acquired the lock to the profile list
1800 * and the software VSI handle has been validated
1802 static enum ice_status
1803 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
1804 struct ice_flow_prof *prof, u16 vsi_handle)
1806 enum ice_status status = ICE_SUCCESS;
1808 if (ice_is_bit_set(prof->vsis, vsi_handle)) {
1809 status = ice_rem_prof_id_flow(hw, blk,
1810 ice_get_hw_vsi_num(hw,
1814 ice_clear_bit(vsi_handle, prof->vsis);
1816 ice_debug(hw, ICE_DBG_FLOW,
1817 "HW profile remove failed, %d\n",
1825 * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
1826 * @hw: pointer to the HW struct
1827 * @blk: classification stage
1828 * @dir: flow direction
1829 * @prof_id: unique ID to identify this flow profile
1830 * @segs: array of one or more packet segments that describe the flow
1831 * @segs_cnt: number of packet segments provided
1832 * @acts: array of default actions
1833 * @acts_cnt: number of default actions
1834 * @prof: stores the returned flow profile added
1837 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1838 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
1839 struct ice_flow_action *acts, u8 acts_cnt,
1840 struct ice_flow_prof **prof)
1842 enum ice_status status;
1844 if (segs_cnt > ICE_FLOW_SEG_MAX)
1845 return ICE_ERR_MAX_LIMIT;
1848 return ICE_ERR_PARAM;
1851 return ICE_ERR_BAD_PTR;
1853 status = ice_flow_val_hdrs(segs, segs_cnt);
1857 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1859 status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
1860 acts, acts_cnt, prof);
1862 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
1864 ice_release_lock(&hw->fl_profs_locks[blk]);
1870 * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
1871 * @hw: pointer to the HW struct
1872 * @blk: the block for which the flow profile is to be removed
1873 * @prof_id: unique ID of the flow profile to be removed
1876 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1878 struct ice_flow_prof *prof;
1879 enum ice_status status;
1881 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1883 prof = ice_flow_find_prof_id(hw, blk, prof_id);
1885 status = ICE_ERR_DOES_NOT_EXIST;
1889 /* prof becomes invalid after the call */
1890 status = ice_flow_rem_prof_sync(hw, blk, prof);
1893 ice_release_lock(&hw->fl_profs_locks[blk]);
1899 * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
1900 * @hw: pointer to the HW struct
1901 * @blk: classification stage
1902 * @prof_id: the profile ID handle
1903 * @hw_prof_id: pointer to variable to receive the HW profile ID
1906 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
1909 struct ice_prof_map *map;
1911 map = ice_search_prof_id(hw, blk, prof_id);
1913 *hw_prof_id = map->prof_id;
1917 return ICE_ERR_DOES_NOT_EXIST;
1921 * ice_flow_find_entry - look for a flow entry using its unique ID
1922 * @hw: pointer to the HW struct
1923 * @blk: classification stage
1924 * @entry_id: unique ID to identify this flow entry
1926 * This function looks for the flow entry with the specified unique ID in all
1927 * flow profiles of the specified classification stage. If the entry is found,
1928 * and it returns the handle to the flow entry. Otherwise, it returns
1929 * ICE_FLOW_ENTRY_ID_INVAL.
1931 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
1933 struct ice_flow_entry *found = NULL;
1934 struct ice_flow_prof *p;
1936 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1938 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
1939 struct ice_flow_entry *e;
1941 ice_acquire_lock(&p->entries_lock);
1942 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
1943 if (e->id == entry_id) {
1947 ice_release_lock(&p->entries_lock);
1953 ice_release_lock(&hw->fl_profs_locks[blk]);
1955 return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
1959 * ice_flow_acl_check_actions - Checks the acl rule's actions
1960 * @hw: pointer to the hardware structure
1961 * @acts: array of actions to be performed on a match
1962 * @acts_cnt: number of actions
1963 * @cnt_alloc: indicates if a ACL counter has been allocated.
1965 static enum ice_status
1966 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
1967 u8 acts_cnt, bool *cnt_alloc)
1969 ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
1972 ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
1975 if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
1976 return ICE_ERR_OUT_OF_RANGE;
1978 for (i = 0; i < acts_cnt; i++) {
1979 if (acts[i].type != ICE_FLOW_ACT_NOP &&
1980 acts[i].type != ICE_FLOW_ACT_DROP &&
1981 acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
1982 acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
1985 /* If the caller want to add two actions of the same type, then
1986 * it is considered invalid configuration.
1988 if (ice_test_and_set_bit(acts[i].type, dup_check))
1989 return ICE_ERR_PARAM;
1992 /* Checks if ACL counters are needed. */
1993 for (i = 0; i < acts_cnt; i++) {
1994 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1995 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1996 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1997 struct ice_acl_cntrs cntrs;
1998 enum ice_status status;
2001 cntrs.bank = 0; /* Only bank0 for the moment */
2003 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2004 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2006 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2008 status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2011 /* Counter index within the bank */
2012 acts[i].data.acl_act.value =
2013 CPU_TO_LE16(cntrs.first_cntr);
2022 * ice_flow_acl_frmt_entry_range - Format an acl range checker for a given field
2023 * @fld: number of the given field
2024 * @info: info about field
2025 * @range_buf: range checker configuration buffer
2026 * @data: pointer to a data buffer containing flow entry's match values/masks
2027 * @range: Input/output param indicating which range checkers are being used
2030 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2031 struct ice_aqc_acl_profile_ranges *range_buf,
2032 u8 *data, u8 *range)
2036 /* If not specified, default mask is all bits in field */
2037 new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2038 BIT(ice_flds_info[fld].size) - 1 :
2039 (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2041 /* If the mask is 0, then we don't need to worry about this input
2042 * range checker value.
2046 (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2048 (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2049 u8 range_idx = info->entry.val;
2051 range_buf->checker_cfg[range_idx].low_boundary =
2052 CPU_TO_BE16(new_low);
2053 range_buf->checker_cfg[range_idx].high_boundary =
2054 CPU_TO_BE16(new_high);
2055 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2057 /* Indicate which range checker is being used */
2058 *range |= BIT(range_idx);
2063 * ice_flow_acl_frmt_entry_fld - Partially format acl entry for a given field
2064 * @fld: number of the given field
2065 * @info: info about the field
2066 * @buf: buffer containing the entry
2067 * @dontcare: buffer containing don't care mask for entry
2068 * @data: pointer to a data buffer containing flow entry's match values/masks
2071 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2072 u8 *dontcare, u8 *data)
2074 u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2075 bool use_mask = false;
2078 src = info->src.val;
2079 mask = info->src.mask;
2080 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2081 disp = info->xtrct.disp % BITS_PER_BYTE;
2083 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2086 for (k = 0; k < info->entry.last; k++, dst++) {
2087 /* Add overflow bits from previous byte */
2088 buf[dst] = (tmp_s & 0xff00) >> 8;
2090 /* If mask is not valid, tmp_m is always zero, so just setting
2091 * dontcare to 0 (no masked bits). If mask is valid, pulls in
2092 * overflow bits of mask from prev byte
2094 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2096 /* If there is displacement, last byte will only contain
2097 * displaced data, but there is no more data to read from user
2098 * buffer, so skip so as not to potentially read beyond end of
2101 if (!disp || k < info->entry.last - 1) {
2102 /* Store shifted data to use in next byte */
2103 tmp_s = data[src++] << disp;
2105 /* Add current (shifted) byte */
2106 buf[dst] |= tmp_s & 0xff;
2108 /* Handle mask if valid */
2110 tmp_m = (~data[mask++] & 0xff) << disp;
2111 dontcare[dst] |= tmp_m & 0xff;
2116 /* Fill in don't care bits at beginning of field */
2118 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2119 for (k = 0; k < disp; k++)
2120 dontcare[dst] |= BIT(k);
2123 end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2125 /* Fill in don't care bits at end of field */
2127 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2128 info->entry.last - 1;
2129 for (k = end_disp; k < BITS_PER_BYTE; k++)
2130 dontcare[dst] |= BIT(k);
2135 * ice_flow_acl_frmt_entry - Format acl entry
2136 * @hw: pointer to the hardware structure
2137 * @prof: pointer to flow profile
2138 * @e: pointer to the flow entry
2139 * @data: pointer to a data buffer containing flow entry's match values/masks
2140 * @acts: array of actions to be performed on a match
2141 * @acts_cnt: number of actions
2143 * Formats the key (and key_inverse) to be matched from the data passed in,
2144 * along with data from the flow profile. This key/key_inverse pair makes up
2145 * the 'entry' for an acl flow entry.
2147 static enum ice_status
2148 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2149 struct ice_flow_entry *e, u8 *data,
2150 struct ice_flow_action *acts, u8 acts_cnt)
2152 u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2153 struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2154 enum ice_status status;
2159 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2163 /* Format the result action */
2165 status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2169 status = ICE_ERR_NO_MEMORY;
2171 e->acts = (struct ice_flow_action *)
2172 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2173 ICE_NONDMA_TO_NONDMA);
2178 e->acts_cnt = acts_cnt;
2180 /* Format the matching data */
2181 buf_sz = prof->cfg.scen->width;
2182 buf = (u8 *)ice_malloc(hw, buf_sz);
2186 dontcare = (u8 *)ice_malloc(hw, buf_sz);
2190 /* 'key' buffer will store both key and key_inverse, so must be twice
2193 key = (u8 *)ice_malloc(hw, buf_sz * 2);
2197 range_buf = (struct ice_aqc_acl_profile_ranges *)
2198 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2202 /* Set don't care mask to all 1's to start, will zero out used bytes */
2203 ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2205 for (i = 0; i < prof->segs_cnt; i++) {
2206 struct ice_flow_seg_info *seg = &prof->segs[i];
2207 u64 match = seg->match;
2210 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
2211 struct ice_flow_fld_info *info;
2212 const u64 bit = BIT_ULL(j);
2217 info = &seg->fields[j];
2219 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2220 ice_flow_acl_frmt_entry_range(j, info,
2224 ice_flow_acl_frmt_entry_fld(j, info, buf,
2230 for (j = 0; j < seg->raws_cnt; j++) {
2231 struct ice_flow_fld_info *info = &seg->raws[j].info;
2232 u16 dst, src, mask, k;
2233 bool use_mask = false;
2235 src = info->src.val;
2236 dst = info->entry.val -
2237 ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2238 mask = info->src.mask;
2240 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2243 for (k = 0; k < info->entry.last; k++, dst++) {
2244 buf[dst] = data[src++];
2246 dontcare[dst] = ~data[mask++];
2253 buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2254 dontcare[prof->cfg.scen->pid_idx] = 0;
2256 /* Format the buffer for direction flags */
2257 dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2259 if (prof->dir == ICE_FLOW_RX)
2260 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2263 buf[prof->cfg.scen->rng_chk_idx] = range;
2264 /* Mark any unused range checkers as don't care */
2265 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2266 e->range_buf = range_buf;
2268 ice_free(hw, range_buf);
2271 status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2277 e->entry_sz = buf_sz * 2;
2284 ice_free(hw, dontcare);
2289 if (status && range_buf) {
2290 ice_free(hw, range_buf);
2291 e->range_buf = NULL;
2294 if (status && e->acts) {
2295 ice_free(hw, e->acts);
2300 if (status && cnt_alloc)
2301 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2307 * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2308 * the compared data.
2309 * @prof: pointer to flow profile
2310 * @e: pointer to the comparing flow entry
2311 * @do_chg_action: decide if we want to change the ACL action
2312 * @do_add_entry: decide if we want to add the new ACL entry
2313 * @do_rem_entry: decide if we want to remove the current ACL entry
2315 * Find an ACL scenario entry that matches the compared data. In the same time,
2316 * this function also figure out:
2317 * a/ If we want to change the ACL action
2318 * b/ If we want to add the new ACL entry
2319 * c/ If we want to remove the current ACL entry
2321 static struct ice_flow_entry *
2322 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2323 struct ice_flow_entry *e, bool *do_chg_action,
2324 bool *do_add_entry, bool *do_rem_entry)
2326 struct ice_flow_entry *p, *return_entry = NULL;
2330 * a/ There exists an entry with same matching data, but different
2331 * priority, then we remove this existing ACL entry. Then, we
2332 * will add the new entry to the ACL scenario.
2333 * b/ There exists an entry with same matching data, priority, and
2334 * result action, then we do nothing
2335 * c/ There exists an entry with same matching data, priority, but
2336 * different, action, then do only change the action's entry.
2337 * d/ Else, we add this new entry to the ACL scenario.
2339 *do_chg_action = false;
2340 *do_add_entry = true;
2341 *do_rem_entry = false;
2342 LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2343 if (memcmp(p->entry, e->entry, p->entry_sz))
2346 /* From this point, we have the same matching_data. */
2347 *do_add_entry = false;
2350 if (p->priority != e->priority) {
2351 /* matching data && !priority */
2352 *do_add_entry = true;
2353 *do_rem_entry = true;
2357 /* From this point, we will have matching_data && priority */
2358 if (p->acts_cnt != e->acts_cnt)
2359 *do_chg_action = true;
2360 for (i = 0; i < p->acts_cnt; i++) {
2361 bool found_not_match = false;
2363 for (j = 0; j < e->acts_cnt; j++)
2364 if (memcmp(&p->acts[i], &e->acts[j],
2365 sizeof(struct ice_flow_action))) {
2366 found_not_match = true;
2370 if (found_not_match) {
2371 *do_chg_action = true;
2376 /* (do_chg_action = true) means :
2377 * matching_data && priority && !result_action
2378 * (do_chg_action = false) means :
2379 * matching_data && priority && result_action
2384 return return_entry;
2388 * ice_flow_acl_convert_to_acl_prior - Convert to ACL priority
2391 static enum ice_acl_entry_prior
2392 ice_flow_acl_convert_to_acl_prior(enum ice_flow_priority p)
2394 enum ice_acl_entry_prior acl_prior;
2397 case ICE_FLOW_PRIO_LOW:
2398 acl_prior = ICE_LOW;
2400 case ICE_FLOW_PRIO_NORMAL:
2401 acl_prior = ICE_NORMAL;
2403 case ICE_FLOW_PRIO_HIGH:
2404 acl_prior = ICE_HIGH;
2407 acl_prior = ICE_NORMAL;
2415 * ice_flow_acl_union_rng_chk - Perform union operation between two
2416 * range-range checker buffers
2417 * @dst_buf: pointer to destination range checker buffer
2418 * @src_buf: pointer to source range checker buffer
2420 * For this function, we do the union between dst_buf and src_buf
2421 * range checker buffer, and we will save the result back to dst_buf
2423 static enum ice_status
2424 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2425 struct ice_aqc_acl_profile_ranges *src_buf)
2429 if (!dst_buf || !src_buf)
2430 return ICE_ERR_BAD_PTR;
2432 for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2433 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2434 bool will_populate = false;
2436 in_data = &src_buf->checker_cfg[i];
2441 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2442 cfg_data = &dst_buf->checker_cfg[j];
2444 if (!cfg_data->mask ||
2445 !memcmp(cfg_data, in_data,
2446 sizeof(struct ice_acl_rng_data))) {
2447 will_populate = true;
2452 if (will_populate) {
2453 ice_memcpy(cfg_data, in_data,
2454 sizeof(struct ice_acl_rng_data),
2455 ICE_NONDMA_TO_NONDMA);
2457 /* No available slot left to program range checker */
2458 return ICE_ERR_MAX_LIMIT;
2466 * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2467 * @hw: pointer to the hardware structure
2468 * @prof: pointer to flow profile
2469 * @entry: double pointer to the flow entry
2471 * For this function, we will look at the current added entries in the
2472 * corresponding ACL scenario. Then, we will perform matching logic to
2473 * see if we want to add/modify/do nothing with this new entry.
2475 static enum ice_status
2476 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2477 struct ice_flow_entry **entry)
2479 bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2480 struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2481 struct ice_acl_act_entry *acts = NULL;
2482 struct ice_flow_entry *exist;
2483 enum ice_status status = ICE_SUCCESS;
2484 struct ice_flow_entry *e;
2487 if (!entry || !(*entry) || !prof)
2488 return ICE_ERR_BAD_PTR;
2492 do_chg_rng_chk = false;
2496 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2501 /* Query the current range-checker value in FW */
2502 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2506 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2507 sizeof(struct ice_aqc_acl_profile_ranges),
2508 ICE_NONDMA_TO_NONDMA);
2510 /* Generate the new range-checker value */
2511 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2515 /* Reconfigure the range check if the buffer is changed. */
2516 do_chg_rng_chk = false;
2517 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2518 sizeof(struct ice_aqc_acl_profile_ranges))) {
2519 status = ice_prog_acl_prof_ranges(hw, prof_id,
2520 &cfg_rng_buf, NULL);
2524 do_chg_rng_chk = true;
2528 /* Figure out if we want to (change the ACL action) and/or
2529 * (Add the new ACL entry) and/or (Remove the current ACL entry)
2531 exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
2532 &do_add_entry, &do_rem_entry);
2535 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
2540 /* Prepare the result action buffer */
2541 acts = (struct ice_acl_act_entry *)ice_calloc
2542 (hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
2543 for (i = 0; i < e->acts_cnt; i++)
2544 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
2545 sizeof(struct ice_acl_act_entry),
2546 ICE_NONDMA_TO_NONDMA);
2549 enum ice_acl_entry_prior prior;
2553 keys = (u8 *)e->entry;
2554 inverts = keys + (e->entry_sz / 2);
2555 prior = ice_flow_acl_convert_to_acl_prior(e->priority);
2557 status = ice_acl_add_entry(hw, prof->cfg.scen, prior, keys,
2558 inverts, acts, e->acts_cnt,
2563 e->scen_entry_idx = entry_idx;
2564 LIST_ADD(&e->l_entry, &prof->entries);
2566 if (do_chg_action) {
2567 /* For the action memory info, update the SW's copy of
2568 * exist entry with e's action memory info
2570 ice_free(hw, exist->acts);
2571 exist->acts_cnt = e->acts_cnt;
2572 exist->acts = (struct ice_flow_action *)
2573 ice_calloc(hw, exist->acts_cnt,
2574 sizeof(struct ice_flow_action));
2577 status = ICE_ERR_NO_MEMORY;
2581 ice_memcpy(exist->acts, e->acts,
2582 sizeof(struct ice_flow_action) * e->acts_cnt,
2583 ICE_NONDMA_TO_NONDMA);
2585 status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
2587 exist->scen_entry_idx);
2592 if (do_chg_rng_chk) {
2593 /* In this case, we want to update the range checker
2594 * information of the exist entry
2596 status = ice_flow_acl_union_rng_chk(exist->range_buf,
2602 /* As we don't add the new entry to our SW DB, deallocate its
2603 * memories, and return the exist entry to the caller
2605 ice_dealloc_flow_entry(hw, e);
2616 * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
2617 * @hw: pointer to the hardware structure
2618 * @prof: pointer to flow profile
2619 * @e: double pointer to the flow entry
2621 static enum ice_status
2622 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2623 struct ice_flow_entry **e)
2625 enum ice_status status;
2627 ice_acquire_lock(&prof->entries_lock);
2628 status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
2629 ice_release_lock(&prof->entries_lock);
2635 * ice_flow_add_entry - Add a flow entry
2636 * @hw: pointer to the HW struct
2637 * @blk: classification stage
2638 * @prof_id: ID of the profile to add a new flow entry to
2639 * @entry_id: unique ID to identify this flow entry
2640 * @vsi_handle: software VSI handle for the flow entry
2641 * @prio: priority of the flow entry
2642 * @data: pointer to a data buffer containing flow entry's match values/masks
2643 * @acts: arrays of actions to be performed on a match
2644 * @acts_cnt: number of actions
2645 * @entry_h: pointer to buffer that receives the new flow entry's handle
2648 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2649 u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
2650 void *data, struct ice_flow_action *acts, u8 acts_cnt,
2653 struct ice_flow_entry *e = NULL;
2654 struct ice_flow_prof *prof;
2655 enum ice_status status = ICE_SUCCESS;
2657 /* ACL entries must indicate an action */
2658 if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
2659 return ICE_ERR_PARAM;
2661 /* No flow entry data is expected for RSS */
2662 if (!entry_h || (!data && blk != ICE_BLK_RSS))
2663 return ICE_ERR_BAD_PTR;
2665 if (!ice_is_vsi_valid(hw, vsi_handle))
2666 return ICE_ERR_PARAM;
2668 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2670 prof = ice_flow_find_prof_id(hw, blk, prof_id);
2672 status = ICE_ERR_DOES_NOT_EXIST;
2674 /* Allocate memory for the entry being added and associate
2675 * the VSI to the found flow profile
2677 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
2679 status = ICE_ERR_NO_MEMORY;
2681 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
2684 ice_release_lock(&hw->fl_profs_locks[blk]);
2689 e->vsi_handle = vsi_handle;
2698 /* ACL will handle the entry management */
2699 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
2704 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
2712 status = ICE_ERR_NOT_IMPL;
2716 if (blk != ICE_BLK_ACL) {
2717 /* ACL will handle the entry management */
2718 ice_acquire_lock(&prof->entries_lock);
2719 LIST_ADD(&e->l_entry, &prof->entries);
2720 ice_release_lock(&prof->entries_lock);
2723 *entry_h = ICE_FLOW_ENTRY_HNDL(e);
2728 ice_free(hw, e->entry);
2736 * ice_flow_rem_entry - Remove a flow entry
2737 * @hw: pointer to the HW struct
2738 * @blk: classification stage
2739 * @entry_h: handle to the flow entry to be removed
2741 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
2744 struct ice_flow_entry *entry;
2745 struct ice_flow_prof *prof;
2746 enum ice_status status = ICE_SUCCESS;
2748 if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
2749 return ICE_ERR_PARAM;
2751 entry = ICE_FLOW_ENTRY_PTR((unsigned long)entry_h);
2753 /* Retain the pointer to the flow profile as the entry will be freed */
2757 ice_acquire_lock(&prof->entries_lock);
2758 status = ice_flow_rem_entry_sync(hw, blk, entry);
2759 ice_release_lock(&prof->entries_lock);
2766 * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
2767 * @seg: packet segment the field being set belongs to
2768 * @fld: field to be set
2769 * @field_type: type of the field
2770 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
2771 * entry's input buffer
2772 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
2774 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
2775 * entry's input buffer
2777 * This helper function stores information of a field being matched, including
2778 * the type of the field and the locations of the value to match, the mask, and
2779 * and the upper-bound value in the start of the input buffer for a flow entry.
2780 * This function should only be used for fixed-size data structures.
2782 * This function also opportunistically determines the protocol headers to be
2783 * present based on the fields being set. Some fields cannot be used alone to
2784 * determine the protocol headers present. Sometimes, fields for particular
2785 * protocol headers are not matched. In those cases, the protocol headers
2786 * must be explicitly set.
2789 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
2790 enum ice_flow_fld_match_type field_type, u16 val_loc,
2791 u16 mask_loc, u16 last_loc)
2793 u64 bit = BIT_ULL(fld);
2796 if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
2799 seg->fields[fld].type = field_type;
2800 seg->fields[fld].src.val = val_loc;
2801 seg->fields[fld].src.mask = mask_loc;
2802 seg->fields[fld].src.last = last_loc;
2804 ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
2808 * ice_flow_set_fld - specifies locations of field from entry's input buffer
2809 * @seg: packet segment the field being set belongs to
2810 * @fld: field to be set
2811 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
2812 * entry's input buffer
2813 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
2815 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
2816 * entry's input buffer
2817 * @range: indicate if field being matched is to be in a range
2819 * This function specifies the locations, in the form of byte offsets from the
2820 * start of the input buffer for a flow entry, from where the value to match,
2821 * the mask value, and upper value can be extracted. These locations are then
2822 * stored in the flow profile. When adding a flow entry associated with the
2823 * flow profile, these locations will be used to quickly extract the values and
2824 * create the content of a match entry. This function should only be used for
2825 * fixed-size data structures.
2828 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
2829 u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
2831 enum ice_flow_fld_match_type t = range ?
2832 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
2834 ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
2838 * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
2839 * @seg: packet segment the field being set belongs to
2840 * @fld: field to be set
2841 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
2842 * entry's input buffer
2843 * @pref_loc: location of prefix value from entry's input buffer
2844 * @pref_sz: size of the location holding the prefix value
2846 * This function specifies the locations, in the form of byte offsets from the
2847 * start of the input buffer for a flow entry, from where the value to match
2848 * and the IPv4 prefix value can be extracted. These locations are then stored
2849 * in the flow profile. When adding flow entries to the associated flow profile,
2850 * these locations can be used to quickly extract the values to create the
2851 * content of a match entry. This function should only be used for fixed-size
2855 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
2856 u16 val_loc, u16 pref_loc, u8 pref_sz)
2858 /* For this type of field, the "mask" location is for the prefix value's
2859 * location and the "last" location is for the size of the location of
2862 ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
2863 pref_loc, (u16)pref_sz);
2867 * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
2868 * @seg: packet segment the field being set belongs to
2869 * @off: offset of the raw field from the beginning of the segment in bytes
2870 * @len: length of the raw pattern to be matched
2871 * @val_loc: location of the value to match from entry's input buffer
2872 * @mask_loc: location of mask value from entry's input buffer
2874 * This function specifies the offset of the raw field to be match from the
2875 * beginning of the specified packet segment, and the locations, in the form of
2876 * byte offsets from the start of the input buffer for a flow entry, from where
2877 * the value to match and the mask value to be extracted. These locations are
2878 * then stored in the flow profile. When adding flow entries to the associated
2879 * flow profile, these locations can be used to quickly extract the values to
2880 * create the content of a match entry. This function should only be used for
2881 * fixed-size data structures.
2884 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
2885 u16 val_loc, u16 mask_loc)
2887 if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
2888 seg->raws[seg->raws_cnt].off = off;
2889 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
2890 seg->raws[seg->raws_cnt].info.src.val = val_loc;
2891 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
2892 /* The "last" field is used to store the length of the field */
2893 seg->raws[seg->raws_cnt].info.src.last = len;
2896 /* Overflows of "raws" will be handled as an error condition later in
2897 * the flow when this information is processed.
2902 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
2903 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
2905 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
2906 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
2908 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
2909 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
2910 ICE_FLOW_SEG_HDR_SCTP)
2912 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
2913 (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
2914 ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
2915 ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
2918 * ice_flow_set_rss_seg_info - setup packet segments for RSS
2919 * @segs: pointer to the flow field segment(s)
2920 * @hash_fields: fields to be hashed on for the segment(s)
2921 * @flow_hdr: protocol header fields within a packet segment
2923 * Helper function to extract fields from hash bitmap and use flow
2924 * header value to set flow field segment for further use in flow
2925 * profile entry or removal.
2927 static enum ice_status
2928 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
2931 u64 val = hash_fields;
2934 for (i = 0; val && i < ICE_FLOW_FIELD_IDX_MAX; i++) {
2935 u64 bit = BIT_ULL(i);
2938 ice_flow_set_fld(segs, (enum ice_flow_field)i,
2939 ICE_FLOW_FLD_OFF_INVAL,
2940 ICE_FLOW_FLD_OFF_INVAL,
2941 ICE_FLOW_FLD_OFF_INVAL, false);
2945 ICE_FLOW_SET_HDRS(segs, flow_hdr);
2947 if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
2948 ~ICE_FLOW_RSS_HDRS_INNER_MASK)
2949 return ICE_ERR_PARAM;
2951 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
2952 if (val && !ice_is_pow2(val))
2955 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
2956 if (val && !ice_is_pow2(val))
2963 * ice_rem_vsi_rss_list - remove VSI from RSS list
2964 * @hw: pointer to the hardware structure
2965 * @vsi_handle: software VSI handle
2967 * Remove the VSI from all RSS configurations in the list.
2969 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
2971 struct ice_rss_cfg *r, *tmp;
2973 if (LIST_EMPTY(&hw->rss_list_head))
2976 ice_acquire_lock(&hw->rss_locks);
2977 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
2978 ice_rss_cfg, l_entry) {
2979 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
2980 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
2981 LIST_DEL(&r->l_entry);
2985 ice_release_lock(&hw->rss_locks);
2989 * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
2990 * @hw: pointer to the hardware structure
2991 * @vsi_handle: software VSI handle
2993 * This function will iterate through all flow profiles and disassociate
2994 * the VSI from that profile. If the flow profile has no VSIs it will
2997 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
2999 const enum ice_block blk = ICE_BLK_RSS;
3000 struct ice_flow_prof *p, *t;
3001 enum ice_status status = ICE_SUCCESS;
3003 if (!ice_is_vsi_valid(hw, vsi_handle))
3004 return ICE_ERR_PARAM;
3006 if (LIST_EMPTY(&hw->fl_profs[blk]))
3009 ice_acquire_lock(&hw->fl_profs_locks[blk]);
3010 LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3012 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3013 status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3017 if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3018 status = ice_flow_rem_prof_sync(hw, blk, p);
3024 ice_release_lock(&hw->fl_profs_locks[blk]);
3030 * ice_rem_rss_list - remove RSS configuration from list
3031 * @hw: pointer to the hardware structure
3032 * @vsi_handle: software VSI handle
3033 * @prof: pointer to flow profile
3035 * Assumption: lock has already been acquired for RSS list
3038 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3040 struct ice_rss_cfg *r, *tmp;
3042 /* Search for RSS hash fields associated to the VSI that match the
3043 * hash configurations associated to the flow profile. If found
3044 * remove from the RSS entry list of the VSI context and delete entry.
3046 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3047 ice_rss_cfg, l_entry) {
3048 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3049 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3050 ice_clear_bit(vsi_handle, r->vsis);
3051 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3052 LIST_DEL(&r->l_entry);
3061 * ice_add_rss_list - add RSS configuration to list
3062 * @hw: pointer to the hardware structure
3063 * @vsi_handle: software VSI handle
3064 * @prof: pointer to flow profile
3066 * Assumption: lock has already been acquired for RSS list
3068 static enum ice_status
3069 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3071 struct ice_rss_cfg *r, *rss_cfg;
3073 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3074 ice_rss_cfg, l_entry)
3075 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3076 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3077 ice_set_bit(vsi_handle, r->vsis);
3081 rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3083 return ICE_ERR_NO_MEMORY;
3085 rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
3086 rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
3087 rss_cfg->symm = prof->cfg.symm;
3088 ice_set_bit(vsi_handle, rss_cfg->vsis);
3090 LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3095 #define ICE_FLOW_PROF_HASH_S 0
3096 #define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3097 #define ICE_FLOW_PROF_HDR_S 32
3098 #define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3099 #define ICE_FLOW_PROF_ENCAP_S 63
3100 #define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
3102 #define ICE_RSS_OUTER_HEADERS 1
3103 #define ICE_RSS_INNER_HEADERS 2
3105 /* Flow profile ID format:
3106 * [0:31] - Packet match fields
3107 * [32:62] - Protocol header
3108 * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
3110 #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
3111 (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3112 (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3113 ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
3116 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3118 u32 s = ((src % 4) << 3); /* byte shift */
3119 u32 v = dst | 0x80; /* value to program */
3120 u8 i = src / 4; /* register index */
3123 reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3124 reg = (reg & ~(0xff << s)) | (v << s);
3125 wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3129 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3132 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3135 for (i = 0; i < len; i++) {
3136 ice_rss_config_xor_word(hw, prof_id,
3137 /* Yes, field vector in GLQF_HSYMM and
3138 * GLQF_HINSET is inversed!
3140 fv_last_word - (src + i),
3141 fv_last_word - (dst + i));
3142 ice_rss_config_xor_word(hw, prof_id,
3143 fv_last_word - (dst + i),
3144 fv_last_word - (src + i));
3149 ice_rss_update_symm(struct ice_hw *hw,
3150 struct ice_flow_prof *prof)
3152 struct ice_prof_map *map;
3155 map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3156 prof_id = map->prof_id;
3158 /* clear to default */
3159 for (m = 0; m < 6; m++)
3160 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3161 if (prof->cfg.symm) {
3162 struct ice_flow_seg_info *seg =
3163 &prof->segs[prof->segs_cnt - 1];
3165 struct ice_flow_seg_xtrct *ipv4_src =
3166 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3167 struct ice_flow_seg_xtrct *ipv4_dst =
3168 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3169 struct ice_flow_seg_xtrct *ipv6_src =
3170 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3171 struct ice_flow_seg_xtrct *ipv6_dst =
3172 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3174 struct ice_flow_seg_xtrct *tcp_src =
3175 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3176 struct ice_flow_seg_xtrct *tcp_dst =
3177 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3179 struct ice_flow_seg_xtrct *udp_src =
3180 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3181 struct ice_flow_seg_xtrct *udp_dst =
3182 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3184 struct ice_flow_seg_xtrct *sctp_src =
3185 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3186 struct ice_flow_seg_xtrct *sctp_dst =
3187 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3190 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3191 ice_rss_config_xor(hw, prof_id,
3192 ipv4_src->idx, ipv4_dst->idx, 2);
3195 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3196 ice_rss_config_xor(hw, prof_id,
3197 ipv6_src->idx, ipv6_dst->idx, 8);
3200 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3201 ice_rss_config_xor(hw, prof_id,
3202 tcp_src->idx, tcp_dst->idx, 1);
3205 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3206 ice_rss_config_xor(hw, prof_id,
3207 udp_src->idx, udp_dst->idx, 1);
3210 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3211 ice_rss_config_xor(hw, prof_id,
3212 sctp_src->idx, sctp_dst->idx, 1);
3217 * ice_add_rss_cfg_sync - add an RSS configuration
3218 * @hw: pointer to the hardware structure
3219 * @vsi_handle: software VSI handle
3220 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3221 * @addl_hdrs: protocol header fields
3222 * @segs_cnt: packet segment count
3223 * @symm: symmetric hash enable/disable
3225 * Assumption: lock has already been acquired for RSS list
3227 static enum ice_status
3228 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3229 u32 addl_hdrs, u8 segs_cnt, bool symm)
3231 const enum ice_block blk = ICE_BLK_RSS;
3232 struct ice_flow_prof *prof = NULL;
3233 struct ice_flow_seg_info *segs;
3234 enum ice_status status;
3236 if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
3237 return ICE_ERR_PARAM;
3239 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3242 return ICE_ERR_NO_MEMORY;
3244 /* Construct the packet segment info from the hashed fields */
3245 status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3250 /* Search for a flow profile that has matching headers, hash fields
3251 * and has the input VSI associated to it. If found, no further
3252 * operations required and exit.
3254 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3256 ICE_FLOW_FIND_PROF_CHK_FLDS |
3257 ICE_FLOW_FIND_PROF_CHK_VSI);
3259 if (prof->cfg.symm == symm)
3261 prof->cfg.symm = symm;
3265 /* Check if a flow profile exists with the same protocol headers and
3266 * associated with the input VSI. If so disasscociate the VSI from
3267 * this profile. The VSI will be added to a new profile created with
3268 * the protocol header and new hash field configuration.
3270 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3271 vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3273 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3275 ice_rem_rss_list(hw, vsi_handle, prof);
3279 /* Remove profile if it has no VSIs associated */
3280 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3281 status = ice_flow_rem_prof(hw, blk, prof->id);
3287 /* Search for a profile that has same match fields only. If this
3288 * exists then associate the VSI to this profile.
3290 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3292 ICE_FLOW_FIND_PROF_CHK_FLDS);
3294 if (prof->cfg.symm == symm) {
3295 status = ice_flow_assoc_prof(hw, blk, prof,
3298 status = ice_add_rss_list(hw, vsi_handle,
3301 /* if a profile exist but with different symmetric
3302 * requirement, just return error.
3304 status = ICE_ERR_NOT_SUPPORTED;
3309 /* Create a new flow profile with generated profile and packet
3310 * segment information.
3312 status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3313 ICE_FLOW_GEN_PROFID(hashed_flds,
3314 segs[segs_cnt - 1].hdrs,
3316 segs, segs_cnt, NULL, 0, &prof);
3320 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3321 /* If association to a new flow profile failed then this profile can
3325 ice_flow_rem_prof(hw, blk, prof->id);
3329 status = ice_add_rss_list(hw, vsi_handle, prof);
3331 prof->cfg.symm = symm;
3334 ice_rss_update_symm(hw, prof);
3342 * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3343 * @hw: pointer to the hardware structure
3344 * @vsi_handle: software VSI handle
3345 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3346 * @addl_hdrs: protocol header fields
3347 * @symm: symmetric hash enable/disable
3349 * This function will generate a flow profile based on fields associated with
3350 * the input fields to hash on, the flow type and use the VSI number to add
3351 * a flow entry to the profile.
3354 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3355 u32 addl_hdrs, bool symm)
3357 enum ice_status status;
3359 if (hashed_flds == ICE_HASH_INVALID ||
3360 !ice_is_vsi_valid(hw, vsi_handle))
3361 return ICE_ERR_PARAM;
3363 ice_acquire_lock(&hw->rss_locks);
3364 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3365 ICE_RSS_OUTER_HEADERS, symm);
3367 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3368 addl_hdrs, ICE_RSS_INNER_HEADERS,
3370 ice_release_lock(&hw->rss_locks);
3376 * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3377 * @hw: pointer to the hardware structure
3378 * @vsi_handle: software VSI handle
3379 * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3380 * @addl_hdrs: Protocol header fields within a packet segment
3381 * @segs_cnt: packet segment count
3383 * Assumption: lock has already been acquired for RSS list
3385 static enum ice_status
3386 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3387 u32 addl_hdrs, u8 segs_cnt)
3389 const enum ice_block blk = ICE_BLK_RSS;
3390 struct ice_flow_seg_info *segs;
3391 struct ice_flow_prof *prof;
3392 enum ice_status status;
3394 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3397 return ICE_ERR_NO_MEMORY;
3399 /* Construct the packet segment info from the hashed fields */
3400 status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3405 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3407 ICE_FLOW_FIND_PROF_CHK_FLDS);
3409 status = ICE_ERR_DOES_NOT_EXIST;
3413 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3417 /* Remove RSS configuration from VSI context before deleting
3420 ice_rem_rss_list(hw, vsi_handle, prof);
3422 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
3423 status = ice_flow_rem_prof(hw, blk, prof->id);
3431 * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
3432 * @hw: pointer to the hardware structure
3433 * @vsi_handle: software VSI handle
3434 * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3435 * @addl_hdrs: Protocol header fields within a packet segment
3437 * This function will lookup the flow profile based on the input
3438 * hash field bitmap, iterate through the profile entry list of
3439 * that profile and find entry associated with input VSI to be
3440 * removed. Calls are made to underlying flow apis which will in
3441 * turn build or update buffers for RSS XLT1 section.
3444 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3447 enum ice_status status;
3449 if (hashed_flds == ICE_HASH_INVALID ||
3450 !ice_is_vsi_valid(hw, vsi_handle))
3451 return ICE_ERR_PARAM;
3453 ice_acquire_lock(&hw->rss_locks);
3454 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3455 ICE_RSS_OUTER_HEADERS);
3457 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3458 addl_hdrs, ICE_RSS_INNER_HEADERS);
3459 ice_release_lock(&hw->rss_locks);
3465 * ice_replay_rss_cfg - replay RSS configurations associated with VSI
3466 * @hw: pointer to the hardware structure
3467 * @vsi_handle: software VSI handle
3469 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3471 enum ice_status status = ICE_SUCCESS;
3472 struct ice_rss_cfg *r;
3474 if (!ice_is_vsi_valid(hw, vsi_handle))
3475 return ICE_ERR_PARAM;
3477 ice_acquire_lock(&hw->rss_locks);
3478 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3479 ice_rss_cfg, l_entry) {
3480 if (ice_is_bit_set(r->vsis, vsi_handle)) {
3481 status = ice_add_rss_cfg_sync(hw, vsi_handle,
3484 ICE_RSS_OUTER_HEADERS,
3488 status = ice_add_rss_cfg_sync(hw, vsi_handle,
3491 ICE_RSS_INNER_HEADERS,
3497 ice_release_lock(&hw->rss_locks);
3503 * ice_get_rss_cfg - returns hashed fields for the given header types
3504 * @hw: pointer to the hardware structure
3505 * @vsi_handle: software VSI handle
3506 * @hdrs: protocol header type
3508 * This function will return the match fields of the first instance of flow
3509 * profile having the given header types and containing input VSI
3511 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
3513 struct ice_rss_cfg *r, *rss_cfg = NULL;
3515 /* verify if the protocol header is non zero and VSI is valid */
3516 if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
3517 return ICE_HASH_INVALID;
3519 ice_acquire_lock(&hw->rss_locks);
3520 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3521 ice_rss_cfg, l_entry)
3522 if (ice_is_bit_set(r->vsis, vsi_handle) &&
3523 r->packet_hdr == hdrs) {
3527 ice_release_lock(&hw->rss_locks);
3529 return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID;