1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
5 #include "ice_common.h"
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE 2
10 #define ICE_FLOW_FLD_SZ_VLAN 2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR 4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR 16
13 #define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
14 #define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
15 #define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
16 #define ICE_FLOW_FLD_SZ_IP_DSCP 1
17 #define ICE_FLOW_FLD_SZ_IP_TTL 1
18 #define ICE_FLOW_FLD_SZ_IP_PROT 1
19 #define ICE_FLOW_FLD_SZ_PORT 2
20 #define ICE_FLOW_FLD_SZ_TCP_FLAGS 1
21 #define ICE_FLOW_FLD_SZ_ICMP_TYPE 1
22 #define ICE_FLOW_FLD_SZ_ICMP_CODE 1
23 #define ICE_FLOW_FLD_SZ_ARP_OPER 2
24 #define ICE_FLOW_FLD_SZ_GRE_KEYID 4
25 #define ICE_FLOW_FLD_SZ_GTP_TEID 4
26 #define ICE_FLOW_FLD_SZ_GTP_QFI 2
27 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID 2
28 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
29 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID 4
30 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
31 #define ICE_FLOW_FLD_SZ_AH_SPI 4
32 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI 4
34 /* Describe properties of a protocol header field */
35 struct ice_flow_field_info {
36 enum ice_flow_seg_hdr hdr;
37 s16 off; /* Offset from start of a protocol header, in bits */
38 u16 size; /* Size of fields in bits */
39 u16 mask; /* 16-bit mask for field */
42 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
44 .off = (_offset_bytes) * BITS_PER_BYTE, \
45 .size = (_size_bytes) * BITS_PER_BYTE, \
49 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
51 .off = (_offset_bytes) * BITS_PER_BYTE, \
52 .size = (_size_bytes) * BITS_PER_BYTE, \
56 /* Table containing properties of supported protocol header fields */
58 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
60 /* ICE_FLOW_FIELD_IDX_ETH_DA */
61 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
62 /* ICE_FLOW_FIELD_IDX_ETH_SA */
63 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
64 /* ICE_FLOW_FIELD_IDX_S_VLAN */
65 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
66 /* ICE_FLOW_FIELD_IDX_C_VLAN */
67 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
68 /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
69 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
71 /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
72 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
74 /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
75 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
77 /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
78 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
79 ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
80 /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
81 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
82 ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
83 /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
84 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
85 ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
86 /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
87 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
88 ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
89 /* ICE_FLOW_FIELD_IDX_IPV4_SA */
90 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
91 /* ICE_FLOW_FIELD_IDX_IPV4_DA */
92 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
93 /* ICE_FLOW_FIELD_IDX_IPV6_SA */
94 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
95 /* ICE_FLOW_FIELD_IDX_IPV6_DA */
96 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
97 /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
98 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
99 ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
100 /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
101 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
102 ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
103 /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
104 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
105 ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
106 /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
107 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
108 ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
109 /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
110 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
111 ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
112 /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
113 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
114 ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
116 /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
117 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
118 /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
119 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
120 /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
121 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
122 /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
123 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
124 /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
125 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
126 /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
127 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
128 /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
129 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
131 /* ICE_FLOW_FIELD_IDX_ARP_SIP */
132 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
133 /* ICE_FLOW_FIELD_IDX_ARP_DIP */
134 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
135 /* ICE_FLOW_FIELD_IDX_ARP_SHA */
136 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
137 /* ICE_FLOW_FIELD_IDX_ARP_DHA */
138 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
139 /* ICE_FLOW_FIELD_IDX_ARP_OP */
140 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
142 /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
143 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
144 /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
145 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
147 /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
148 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
150 /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
151 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
152 ICE_FLOW_FLD_SZ_GTP_TEID),
153 /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
154 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
155 ICE_FLOW_FLD_SZ_GTP_TEID),
156 /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
157 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
158 ICE_FLOW_FLD_SZ_GTP_TEID),
159 /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
160 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
161 ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
162 /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
163 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
164 ICE_FLOW_FLD_SZ_GTP_TEID),
165 /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
166 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
167 ICE_FLOW_FLD_SZ_GTP_TEID),
169 /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
170 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
171 ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
173 /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
174 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
175 ICE_FLOW_FLD_SZ_PFCP_SEID),
177 /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
178 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
179 ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
181 /* ICE_FLOW_FIELD_IDX_ESP_SPI */
182 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
183 ICE_FLOW_FLD_SZ_ESP_SPI),
185 /* ICE_FLOW_FIELD_IDX_AH_SPI */
186 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
187 ICE_FLOW_FLD_SZ_AH_SPI),
189 /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
190 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
191 ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
194 /* Bitmaps indicating relevant packet types for a particular protocol header
196 * Packet types for packets with an Outer/First/Single MAC header
198 static const u32 ice_ptypes_mac_ofos[] = {
199 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
200 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
201 0x00000000, 0x03FFF000, 0x7FFFFFE0, 0x00000000,
202 0x00000000, 0x00000000, 0x00000000, 0x00000000,
203 0x00000000, 0x00000000, 0x00000000, 0x00000000,
204 0x00000000, 0x00000000, 0x00000000, 0x00000000,
205 0x00000000, 0x00000000, 0x00000000, 0x00000000,
206 0x00000000, 0x00000000, 0x00000000, 0x00000000,
209 /* Packet types for packets with an Innermost/Last MAC VLAN header */
210 static const u32 ice_ptypes_macvlan_il[] = {
211 0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
212 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
213 0x00000000, 0x00000000, 0x00000000, 0x00000000,
214 0x00000000, 0x00000000, 0x00000000, 0x00000000,
215 0x00000000, 0x00000000, 0x00000000, 0x00000000,
216 0x00000000, 0x00000000, 0x00000000, 0x00000000,
217 0x00000000, 0x00000000, 0x00000000, 0x00000000,
218 0x00000000, 0x00000000, 0x00000000, 0x00000000,
221 /* Packet types for packets with an Outer/First/Single IPv4 header */
222 static const u32 ice_ptypes_ipv4_ofos[] = {
223 0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
224 0x00000000, 0x00000155, 0x00000000, 0x00000000,
225 0x0003000F, 0x000FC000, 0x83E0F800, 0x00000101,
226 0x00000000, 0x00000000, 0x00000000, 0x00000000,
227 0x00000000, 0x00000000, 0x00000000, 0x00000000,
228 0x00000000, 0x00000000, 0x00000000, 0x00000000,
229 0x00000000, 0x00000000, 0x00000000, 0x00000000,
230 0x00000000, 0x00000000, 0x00000000, 0x00000000,
233 /* Packet types for packets with an Innermost/Last IPv4 header */
234 static const u32 ice_ptypes_ipv4_il[] = {
235 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
236 0x0000000E, 0x00000000, 0x00000000, 0x00000000,
237 0x00000000, 0x00000000, 0x001FF800, 0x00000000,
238 0x00000000, 0x00000000, 0x00000000, 0x00000000,
239 0x00000000, 0x00000000, 0x00000000, 0x00000000,
240 0x00000000, 0x00000000, 0x00000000, 0x00000000,
241 0x00000000, 0x00000000, 0x00000000, 0x00000000,
242 0x00000000, 0x00000000, 0x00000000, 0x00000000,
245 /* Packet types for packets with an Outer/First/Single IPv6 header */
246 static const u32 ice_ptypes_ipv6_ofos[] = {
247 0x00000000, 0x00000000, 0x77000000, 0x10002000,
248 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
249 0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000206,
250 0x00000000, 0x00000000, 0x00000000, 0x00000000,
251 0x00000000, 0x00000000, 0x00000000, 0x00000000,
252 0x00000000, 0x00000000, 0x00000000, 0x00000000,
253 0x00000000, 0x00000000, 0x00000000, 0x00000000,
254 0x00000000, 0x00000000, 0x00000000, 0x00000000,
257 /* Packet types for packets with an Innermost/Last IPv6 header */
258 static const u32 ice_ptypes_ipv6_il[] = {
259 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
260 0x00000770, 0x00000000, 0x00000000, 0x00000000,
261 0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
262 0x00000000, 0x00000000, 0x00000000, 0x00000000,
263 0x00000000, 0x00000000, 0x00000000, 0x00000000,
264 0x00000000, 0x00000000, 0x00000000, 0x00000000,
265 0x00000000, 0x00000000, 0x00000000, 0x00000000,
266 0x00000000, 0x00000000, 0x00000000, 0x00000000,
269 /* Packet types for packets with an Outermost/First ARP header */
270 static const u32 ice_ptypes_arp_of[] = {
271 0x00000800, 0x00000000, 0x00000000, 0x00000000,
272 0x00000000, 0x00000000, 0x00000000, 0x00000000,
273 0x00000000, 0x00000000, 0x00000000, 0x00000000,
274 0x00000000, 0x00000000, 0x00000000, 0x00000000,
275 0x00000000, 0x00000000, 0x00000000, 0x00000000,
276 0x00000000, 0x00000000, 0x00000000, 0x00000000,
277 0x00000000, 0x00000000, 0x00000000, 0x00000000,
278 0x00000000, 0x00000000, 0x00000000, 0x00000000,
281 /* UDP Packet types for non-tunneled packets or tunneled
282 * packets with inner UDP.
284 static const u32 ice_ptypes_udp_il[] = {
285 0x81000000, 0x20204040, 0x04000010, 0x80810102,
286 0x00000040, 0x00000000, 0x00000000, 0x00000000,
287 0x00000000, 0x00410000, 0x90842000, 0x00000007,
288 0x00000000, 0x00000000, 0x00000000, 0x00000000,
289 0x00000000, 0x00000000, 0x00000000, 0x00000000,
290 0x00000000, 0x00000000, 0x00000000, 0x00000000,
291 0x00000000, 0x00000000, 0x00000000, 0x00000000,
292 0x00000000, 0x00000000, 0x00000000, 0x00000000,
295 /* Packet types for packets with an Innermost/Last TCP header */
296 static const u32 ice_ptypes_tcp_il[] = {
297 0x04000000, 0x80810102, 0x10000040, 0x02040408,
298 0x00000102, 0x00000000, 0x00000000, 0x00000000,
299 0x00000000, 0x00820000, 0x21084000, 0x00000000,
300 0x00000000, 0x00000000, 0x00000000, 0x00000000,
301 0x00000000, 0x00000000, 0x00000000, 0x00000000,
302 0x00000000, 0x00000000, 0x00000000, 0x00000000,
303 0x00000000, 0x00000000, 0x00000000, 0x00000000,
304 0x00000000, 0x00000000, 0x00000000, 0x00000000,
307 /* Packet types for packets with an Innermost/Last SCTP header */
308 static const u32 ice_ptypes_sctp_il[] = {
309 0x08000000, 0x01020204, 0x20000081, 0x04080810,
310 0x00000204, 0x00000000, 0x00000000, 0x00000000,
311 0x00000000, 0x01040000, 0x00000000, 0x00000000,
312 0x00000000, 0x00000000, 0x00000000, 0x00000000,
313 0x00000000, 0x00000000, 0x00000000, 0x00000000,
314 0x00000000, 0x00000000, 0x00000000, 0x00000000,
315 0x00000000, 0x00000000, 0x00000000, 0x00000000,
316 0x00000000, 0x00000000, 0x00000000, 0x00000000,
319 /* Packet types for packets with an Outermost/First ICMP header */
320 static const u32 ice_ptypes_icmp_of[] = {
321 0x10000000, 0x00000000, 0x00000000, 0x00000000,
322 0x00000000, 0x00000000, 0x00000000, 0x00000000,
323 0x00000000, 0x00000000, 0x00000000, 0x00000000,
324 0x00000000, 0x00000000, 0x00000000, 0x00000000,
325 0x00000000, 0x00000000, 0x00000000, 0x00000000,
326 0x00000000, 0x00000000, 0x00000000, 0x00000000,
327 0x00000000, 0x00000000, 0x00000000, 0x00000000,
328 0x00000000, 0x00000000, 0x00000000, 0x00000000,
331 /* Packet types for packets with an Innermost/Last ICMP header */
332 static const u32 ice_ptypes_icmp_il[] = {
333 0x00000000, 0x02040408, 0x40000102, 0x08101020,
334 0x00000408, 0x00000000, 0x00000000, 0x00000000,
335 0x00000000, 0x00000000, 0x42108000, 0x00000000,
336 0x00000000, 0x00000000, 0x00000000, 0x00000000,
337 0x00000000, 0x00000000, 0x00000000, 0x00000000,
338 0x00000000, 0x00000000, 0x00000000, 0x00000000,
339 0x00000000, 0x00000000, 0x00000000, 0x00000000,
340 0x00000000, 0x00000000, 0x00000000, 0x00000000,
343 /* Packet types for packets with an Outermost/First GRE header */
344 static const u32 ice_ptypes_gre_of[] = {
345 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
346 0x0000017E, 0x00000000, 0x00000000, 0x00000000,
347 0x00000000, 0x00000000, 0x00000000, 0x00000000,
348 0x00000000, 0x00000000, 0x00000000, 0x00000000,
349 0x00000000, 0x00000000, 0x00000000, 0x00000000,
350 0x00000000, 0x00000000, 0x00000000, 0x00000000,
351 0x00000000, 0x00000000, 0x00000000, 0x00000000,
352 0x00000000, 0x00000000, 0x00000000, 0x00000000,
355 /* Packet types for packets with an Innermost/Last MAC header */
356 static const u32 ice_ptypes_mac_il[] = {
357 0x00000000, 0x00000000, 0x00000000, 0x00000000,
358 0x00000000, 0x00000000, 0x00000000, 0x00000000,
359 0x00000000, 0x00000000, 0x00000000, 0x00000000,
360 0x00000000, 0x00000000, 0x00000000, 0x00000000,
361 0x00000000, 0x00000000, 0x00000000, 0x00000000,
362 0x00000000, 0x00000000, 0x00000000, 0x00000000,
363 0x00000000, 0x00000000, 0x00000000, 0x00000000,
364 0x00000000, 0x00000000, 0x00000000, 0x00000000,
367 /* Packet types for GTPC */
368 static const u32 ice_ptypes_gtpc[] = {
369 0x00000000, 0x00000000, 0x00000000, 0x00000000,
370 0x00000000, 0x00000000, 0x00000000, 0x00000000,
371 0x00000000, 0x00000000, 0x00000180, 0x00000000,
372 0x00000000, 0x00000000, 0x00000000, 0x00000000,
373 0x00000000, 0x00000000, 0x00000000, 0x00000000,
374 0x00000000, 0x00000000, 0x00000000, 0x00000000,
375 0x00000000, 0x00000000, 0x00000000, 0x00000000,
376 0x00000000, 0x00000000, 0x00000000, 0x00000000,
379 /* Packet types for GTPC with TEID */
380 static const u32 ice_ptypes_gtpc_tid[] = {
381 0x00000000, 0x00000000, 0x00000000, 0x00000000,
382 0x00000000, 0x00000000, 0x00000000, 0x00000000,
383 0x00000000, 0x00000000, 0x00000060, 0x00000000,
384 0x00000000, 0x00000000, 0x00000000, 0x00000000,
385 0x00000000, 0x00000000, 0x00000000, 0x00000000,
386 0x00000000, 0x00000000, 0x00000000, 0x00000000,
387 0x00000000, 0x00000000, 0x00000000, 0x00000000,
388 0x00000000, 0x00000000, 0x00000000, 0x00000000,
391 /* Packet types for GTPU */
392 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
393 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
394 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
395 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
396 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
397 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
398 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
399 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
400 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
401 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
402 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
403 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
404 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
405 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
406 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
407 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
408 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
409 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
410 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
411 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
412 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
415 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
416 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
417 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
418 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
419 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
420 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
421 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
422 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
423 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
424 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
425 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
426 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
427 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
428 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
429 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
430 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
431 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
432 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
433 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
434 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
435 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
438 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
439 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
440 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
441 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
442 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
443 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
444 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
445 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
446 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
447 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
448 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
449 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
450 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
451 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
452 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
453 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
454 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
455 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
456 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
457 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
458 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
461 static const u32 ice_ptypes_gtpu[] = {
462 0x00000000, 0x00000000, 0x00000000, 0x00000000,
463 0x00000000, 0x00000000, 0x00000000, 0x00000000,
464 0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
465 0x00000000, 0x00000000, 0x00000000, 0x00000000,
466 0x00000000, 0x00000000, 0x00000000, 0x00000000,
467 0x00000000, 0x00000000, 0x00000000, 0x00000000,
468 0x00000000, 0x00000000, 0x00000000, 0x00000000,
469 0x00000000, 0x00000000, 0x00000000, 0x00000000,
472 /* Packet types for pppoe */
473 static const u32 ice_ptypes_pppoe[] = {
474 0x00000000, 0x00000000, 0x00000000, 0x00000000,
475 0x00000000, 0x00000000, 0x00000000, 0x00000000,
476 0x00000000, 0x03FFF000, 0x00000000, 0x00000000,
477 0x00000000, 0x00000000, 0x00000000, 0x00000000,
478 0x00000000, 0x00000000, 0x00000000, 0x00000000,
479 0x00000000, 0x00000000, 0x00000000, 0x00000000,
480 0x00000000, 0x00000000, 0x00000000, 0x00000000,
481 0x00000000, 0x00000000, 0x00000000, 0x00000000,
484 /* Packet types for packets with PFCP NODE header */
485 static const u32 ice_ptypes_pfcp_node[] = {
486 0x00000000, 0x00000000, 0x00000000, 0x00000000,
487 0x00000000, 0x00000000, 0x00000000, 0x00000000,
488 0x00000000, 0x00000000, 0x80000000, 0x00000002,
489 0x00000000, 0x00000000, 0x00000000, 0x00000000,
490 0x00000000, 0x00000000, 0x00000000, 0x00000000,
491 0x00000000, 0x00000000, 0x00000000, 0x00000000,
492 0x00000000, 0x00000000, 0x00000000, 0x00000000,
493 0x00000000, 0x00000000, 0x00000000, 0x00000000,
496 /* Packet types for packets with PFCP SESSION header */
497 static const u32 ice_ptypes_pfcp_session[] = {
498 0x00000000, 0x00000000, 0x00000000, 0x00000000,
499 0x00000000, 0x00000000, 0x00000000, 0x00000000,
500 0x00000000, 0x00000000, 0x00000000, 0x00000005,
501 0x00000000, 0x00000000, 0x00000000, 0x00000000,
502 0x00000000, 0x00000000, 0x00000000, 0x00000000,
503 0x00000000, 0x00000000, 0x00000000, 0x00000000,
504 0x00000000, 0x00000000, 0x00000000, 0x00000000,
505 0x00000000, 0x00000000, 0x00000000, 0x00000000,
508 /* Packet types for l2tpv3 */
509 static const u32 ice_ptypes_l2tpv3[] = {
510 0x00000000, 0x00000000, 0x00000000, 0x00000000,
511 0x00000000, 0x00000000, 0x00000000, 0x00000000,
512 0x00000000, 0x00000000, 0x00000000, 0x00000300,
513 0x00000000, 0x00000000, 0x00000000, 0x00000000,
514 0x00000000, 0x00000000, 0x00000000, 0x00000000,
515 0x00000000, 0x00000000, 0x00000000, 0x00000000,
516 0x00000000, 0x00000000, 0x00000000, 0x00000000,
517 0x00000000, 0x00000000, 0x00000000, 0x00000000,
520 /* Packet types for esp */
521 static const u32 ice_ptypes_esp[] = {
522 0x00000000, 0x00000000, 0x00000000, 0x00000000,
523 0x00000000, 0x00000003, 0x00000000, 0x00000000,
524 0x00000000, 0x00000000, 0x00000000, 0x00000000,
525 0x00000000, 0x00000000, 0x00000000, 0x00000000,
526 0x00000000, 0x00000000, 0x00000000, 0x00000000,
527 0x00000000, 0x00000000, 0x00000000, 0x00000000,
528 0x00000000, 0x00000000, 0x00000000, 0x00000000,
529 0x00000000, 0x00000000, 0x00000000, 0x00000000,
532 /* Packet types for ah */
533 static const u32 ice_ptypes_ah[] = {
534 0x00000000, 0x00000000, 0x00000000, 0x00000000,
535 0x00000000, 0x0000000C, 0x00000000, 0x00000000,
536 0x00000000, 0x00000000, 0x00000000, 0x00000000,
537 0x00000000, 0x00000000, 0x00000000, 0x00000000,
538 0x00000000, 0x00000000, 0x00000000, 0x00000000,
539 0x00000000, 0x00000000, 0x00000000, 0x00000000,
540 0x00000000, 0x00000000, 0x00000000, 0x00000000,
541 0x00000000, 0x00000000, 0x00000000, 0x00000000,
544 /* Packet types for packets with NAT_T ESP header */
545 static const u32 ice_ptypes_nat_t_esp[] = {
546 0x00000000, 0x00000000, 0x00000000, 0x00000000,
547 0x00000000, 0x00000030, 0x00000000, 0x00000000,
548 0x00000000, 0x00000000, 0x00000000, 0x00000000,
549 0x00000000, 0x00000000, 0x00000000, 0x00000000,
550 0x00000000, 0x00000000, 0x00000000, 0x00000000,
551 0x00000000, 0x00000000, 0x00000000, 0x00000000,
552 0x00000000, 0x00000000, 0x00000000, 0x00000000,
553 0x00000000, 0x00000000, 0x00000000, 0x00000000,
556 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
557 0x00000846, 0x00000000, 0x00000000, 0x00000000,
558 0x00000000, 0x00000000, 0x00000000, 0x00000000,
559 0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
560 0x00000000, 0x00000000, 0x00000000, 0x00000000,
561 0x00000000, 0x00000000, 0x00000000, 0x00000000,
562 0x00000000, 0x00000000, 0x00000000, 0x00000000,
563 0x00000000, 0x00000000, 0x00000000, 0x00000000,
564 0x00000000, 0x00000000, 0x00000000, 0x00000000,
567 /* Manage parameters and info. used during the creation of a flow profile */
568 struct ice_flow_prof_params {
570 u16 entry_length; /* # of bytes formatted entry will require */
572 struct ice_flow_prof *prof;
574 /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
575 * This will give us the direction flags.
577 struct ice_fv_word es[ICE_MAX_FV_WORDS];
578 /* attributes can be used to add attributes to a particular PTYPE */
579 const struct ice_ptype_attributes *attr;
582 u16 mask[ICE_MAX_FV_WORDS];
583 ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
586 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
587 (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
588 ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
589 ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
590 ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
591 ICE_FLOW_SEG_HDR_NAT_T_ESP)
593 #define ICE_FLOW_SEG_HDRS_L2_MASK \
594 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
595 #define ICE_FLOW_SEG_HDRS_L3_MASK \
596 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
597 ICE_FLOW_SEG_HDR_ARP)
598 #define ICE_FLOW_SEG_HDRS_L4_MASK \
599 (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
600 ICE_FLOW_SEG_HDR_SCTP)
603 * ice_flow_val_hdrs - validates packet segments for valid protocol headers
604 * @segs: array of one or more packet segments that describe the flow
605 * @segs_cnt: number of packet segments provided
607 static enum ice_status
608 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
612 for (i = 0; i < segs_cnt; i++) {
613 /* Multiple L3 headers */
614 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
615 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
616 return ICE_ERR_PARAM;
618 /* Multiple L4 headers */
619 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
620 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
621 return ICE_ERR_PARAM;
627 /* Sizes of fixed known protocol headers without header options */
628 #define ICE_FLOW_PROT_HDR_SZ_MAC 14
629 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
630 #define ICE_FLOW_PROT_HDR_SZ_IPV4 20
631 #define ICE_FLOW_PROT_HDR_SZ_IPV6 40
632 #define ICE_FLOW_PROT_HDR_SZ_ARP 28
633 #define ICE_FLOW_PROT_HDR_SZ_ICMP 8
634 #define ICE_FLOW_PROT_HDR_SZ_TCP 20
635 #define ICE_FLOW_PROT_HDR_SZ_UDP 8
636 #define ICE_FLOW_PROT_HDR_SZ_SCTP 12
639 * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
640 * @params: information about the flow to be processed
641 * @seg: index of packet segment whose header size is to be determined
643 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
648 sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
649 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
652 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
653 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
654 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
655 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
656 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
657 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
658 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
659 /* A L3 header is required if L4 is specified */
663 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
664 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
665 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
666 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
667 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
668 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
669 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
670 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
676 * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
677 * @params: information about the flow to be processed
679 * This function identifies the packet types associated with the protocol
680 * headers being present in packet segments of the specified flow profile.
682 static enum ice_status
683 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
685 struct ice_flow_prof *prof;
688 ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
693 for (i = 0; i < params->prof->segs_cnt; i++) {
694 const ice_bitmap_t *src;
697 hdrs = prof->segs[i].hdrs;
699 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
700 src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
701 (const ice_bitmap_t *)ice_ptypes_mac_il;
702 ice_and_bitmap(params->ptypes, params->ptypes, src,
706 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
707 src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
708 ice_and_bitmap(params->ptypes, params->ptypes, src,
712 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
713 ice_and_bitmap(params->ptypes, params->ptypes,
714 (const ice_bitmap_t *)ice_ptypes_arp_of,
718 if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
719 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
720 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
721 ice_and_bitmap(params->ptypes, params->ptypes, src,
723 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
724 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
725 ice_and_bitmap(params->ptypes,
728 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
729 ice_and_bitmap(params->ptypes, params->ptypes,
730 (const ice_bitmap_t *)
733 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
734 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
735 ice_and_bitmap(params->ptypes, params->ptypes,
736 src, ICE_FLOW_PTYPE_MAX);
738 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
739 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
740 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
741 ice_and_bitmap(params->ptypes, params->ptypes, src,
743 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
744 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
745 ice_and_bitmap(params->ptypes,
748 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
749 ice_and_bitmap(params->ptypes, params->ptypes,
750 (const ice_bitmap_t *)
753 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
754 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
755 ice_and_bitmap(params->ptypes, params->ptypes,
756 src, ICE_FLOW_PTYPE_MAX);
760 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
761 src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
762 ice_and_bitmap(params->ptypes, params->ptypes,
763 src, ICE_FLOW_PTYPE_MAX);
764 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
765 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
766 ice_and_bitmap(params->ptypes, params->ptypes, src,
770 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
771 src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
772 (const ice_bitmap_t *)ice_ptypes_icmp_il;
773 ice_and_bitmap(params->ptypes, params->ptypes, src,
775 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
777 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
778 ice_and_bitmap(params->ptypes, params->ptypes,
779 src, ICE_FLOW_PTYPE_MAX);
781 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
782 src = (const ice_bitmap_t *)ice_ptypes_gtpc;
783 ice_and_bitmap(params->ptypes, params->ptypes,
784 src, ICE_FLOW_PTYPE_MAX);
785 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
786 src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
787 ice_and_bitmap(params->ptypes, params->ptypes,
788 src, ICE_FLOW_PTYPE_MAX);
789 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
790 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
791 ice_and_bitmap(params->ptypes, params->ptypes,
792 src, ICE_FLOW_PTYPE_MAX);
794 /* Attributes for GTP packet with downlink */
795 params->attr = ice_attr_gtpu_down;
796 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
797 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
798 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
799 ice_and_bitmap(params->ptypes, params->ptypes,
800 src, ICE_FLOW_PTYPE_MAX);
802 /* Attributes for GTP packet with uplink */
803 params->attr = ice_attr_gtpu_up;
804 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
805 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
806 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
807 ice_and_bitmap(params->ptypes, params->ptypes,
808 src, ICE_FLOW_PTYPE_MAX);
810 /* Attributes for GTP packet with Extension Header */
811 params->attr = ice_attr_gtpu_eh;
812 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
813 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
814 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
815 ice_and_bitmap(params->ptypes, params->ptypes,
816 src, ICE_FLOW_PTYPE_MAX);
817 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
818 src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
819 ice_and_bitmap(params->ptypes, params->ptypes,
820 src, ICE_FLOW_PTYPE_MAX);
821 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
822 src = (const ice_bitmap_t *)ice_ptypes_esp;
823 ice_and_bitmap(params->ptypes, params->ptypes,
824 src, ICE_FLOW_PTYPE_MAX);
825 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
826 src = (const ice_bitmap_t *)ice_ptypes_ah;
827 ice_and_bitmap(params->ptypes, params->ptypes,
828 src, ICE_FLOW_PTYPE_MAX);
829 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
830 src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
831 ice_and_bitmap(params->ptypes, params->ptypes,
832 src, ICE_FLOW_PTYPE_MAX);
835 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
836 if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
838 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
841 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
843 ice_and_bitmap(params->ptypes, params->ptypes,
844 src, ICE_FLOW_PTYPE_MAX);
846 src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
847 ice_andnot_bitmap(params->ptypes, params->ptypes,
848 src, ICE_FLOW_PTYPE_MAX);
850 src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
851 ice_andnot_bitmap(params->ptypes, params->ptypes,
852 src, ICE_FLOW_PTYPE_MAX);
860 * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
861 * @hw: pointer to the HW struct
862 * @params: information about the flow to be processed
863 * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
865 * This function will allocate an extraction sequence entries for a DWORD size
866 * chunk of the packet flags.
868 static enum ice_status
869 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
870 struct ice_flow_prof_params *params,
871 enum ice_flex_mdid_pkt_flags flags)
873 u8 fv_words = hw->blk[params->blk].es.fvw;
876 /* Make sure the number of extraction sequence entries required does not
877 * exceed the block's capacity.
879 if (params->es_cnt >= fv_words)
880 return ICE_ERR_MAX_LIMIT;
882 /* some blocks require a reversed field vector layout */
883 if (hw->blk[params->blk].es.reverse)
884 idx = fv_words - params->es_cnt - 1;
886 idx = params->es_cnt;
888 params->es[idx].prot_id = ICE_PROT_META_ID;
889 params->es[idx].off = flags;
896 * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
897 * @hw: pointer to the HW struct
898 * @params: information about the flow to be processed
899 * @seg: packet segment index of the field to be extracted
900 * @fld: ID of field to be extracted
901 * @match: bitfield of all fields
903 * This function determines the protocol ID, offset, and size of the given
904 * field. It then allocates one or more extraction sequence entries for the
905 * given field, and fill the entries with protocol ID and offset information.
907 static enum ice_status
908 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
909 u8 seg, enum ice_flow_field fld, u64 match)
911 enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
912 enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
913 u8 fv_words = hw->blk[params->blk].es.fvw;
914 struct ice_flow_fld_info *flds;
915 u16 cnt, ese_bits, i;
920 flds = params->prof->segs[seg].fields;
923 case ICE_FLOW_FIELD_IDX_ETH_DA:
924 case ICE_FLOW_FIELD_IDX_ETH_SA:
925 case ICE_FLOW_FIELD_IDX_S_VLAN:
926 case ICE_FLOW_FIELD_IDX_C_VLAN:
927 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
929 case ICE_FLOW_FIELD_IDX_ETH_TYPE:
930 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
932 case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
933 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
935 case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
936 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
938 case ICE_FLOW_FIELD_IDX_IPV4_TTL:
939 case ICE_FLOW_FIELD_IDX_IPV4_PROT:
940 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
942 /* TTL and PROT share the same extraction seq. entry.
943 * Each is considered a sibling to the other in terms of sharing
944 * the same extraction sequence entry.
946 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
947 sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
948 else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
949 sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
951 /* If the sibling field is also included, that field's
952 * mask needs to be included.
954 if (match & BIT(sib))
955 sib_mask = ice_flds_info[sib].mask;
957 case ICE_FLOW_FIELD_IDX_IPV6_TTL:
958 case ICE_FLOW_FIELD_IDX_IPV6_PROT:
959 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
961 /* TTL and PROT share the same extraction seq. entry.
962 * Each is considered a sibling to the other in terms of sharing
963 * the same extraction sequence entry.
965 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
966 sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
967 else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
968 sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
970 /* If the sibling field is also included, that field's
971 * mask needs to be included.
973 if (match & BIT(sib))
974 sib_mask = ice_flds_info[sib].mask;
976 case ICE_FLOW_FIELD_IDX_IPV4_SA:
977 case ICE_FLOW_FIELD_IDX_IPV4_DA:
978 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
980 case ICE_FLOW_FIELD_IDX_IPV6_SA:
981 case ICE_FLOW_FIELD_IDX_IPV6_DA:
982 case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
983 case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
984 case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
985 case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
986 case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
987 case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
988 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
990 case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
991 case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
992 case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
993 prot_id = ICE_PROT_TCP_IL;
995 case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
996 case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
997 prot_id = ICE_PROT_UDP_IL_OR_S;
999 case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
1000 case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
1001 prot_id = ICE_PROT_SCTP_IL;
1003 case ICE_FLOW_FIELD_IDX_GTPC_TEID:
1004 case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
1005 case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
1006 case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
1007 case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
1008 case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
1009 /* GTP is accessed through UDP OF protocol */
1010 prot_id = ICE_PROT_UDP_OF;
1012 case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
1013 prot_id = ICE_PROT_PPPOE;
1015 case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1016 prot_id = ICE_PROT_UDP_IL_OR_S;
1018 case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1019 prot_id = ICE_PROT_L2TPV3;
1021 case ICE_FLOW_FIELD_IDX_ESP_SPI:
1022 prot_id = ICE_PROT_ESP_F;
1024 case ICE_FLOW_FIELD_IDX_AH_SPI:
1025 prot_id = ICE_PROT_ESP_2;
1027 case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1028 prot_id = ICE_PROT_UDP_IL_OR_S;
1030 case ICE_FLOW_FIELD_IDX_ARP_SIP:
1031 case ICE_FLOW_FIELD_IDX_ARP_DIP:
1032 case ICE_FLOW_FIELD_IDX_ARP_SHA:
1033 case ICE_FLOW_FIELD_IDX_ARP_DHA:
1034 case ICE_FLOW_FIELD_IDX_ARP_OP:
1035 prot_id = ICE_PROT_ARP_OF;
1037 case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1038 case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1039 /* ICMP type and code share the same extraction seq. entry */
1040 prot_id = (params->prof->segs[seg].hdrs &
1041 ICE_FLOW_SEG_HDR_IPV4) ?
1042 ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1043 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1044 ICE_FLOW_FIELD_IDX_ICMP_CODE :
1045 ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1047 case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1048 prot_id = ICE_PROT_GRE_OF;
1051 return ICE_ERR_NOT_IMPL;
1054 /* Each extraction sequence entry is a word in size, and extracts a
1055 * word-aligned offset from a protocol header.
1057 ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1059 flds[fld].xtrct.prot_id = prot_id;
1060 flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1061 ICE_FLOW_FV_EXTRACT_SZ;
1062 flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
1063 flds[fld].xtrct.idx = params->es_cnt;
1064 flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1066 /* Adjust the next field-entry index after accommodating the number of
1067 * entries this field consumes
1069 cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1070 ice_flds_info[fld].size, ese_bits);
1072 /* Fill in the extraction sequence entries needed for this field */
1073 off = flds[fld].xtrct.off;
1074 mask = flds[fld].xtrct.mask;
1075 for (i = 0; i < cnt; i++) {
1076 /* Only consume an extraction sequence entry if there is no
1077 * sibling field associated with this field or the sibling entry
1078 * already extracts the word shared with this field.
1080 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1081 flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1082 flds[sib].xtrct.off != off) {
1085 /* Make sure the number of extraction sequence required
1086 * does not exceed the block's capability
1088 if (params->es_cnt >= fv_words)
1089 return ICE_ERR_MAX_LIMIT;
1091 /* some blocks require a reversed field vector layout */
1092 if (hw->blk[params->blk].es.reverse)
1093 idx = fv_words - params->es_cnt - 1;
1095 idx = params->es_cnt;
1097 params->es[idx].prot_id = prot_id;
1098 params->es[idx].off = off;
1099 params->mask[idx] = mask | sib_mask;
1103 off += ICE_FLOW_FV_EXTRACT_SZ;
1110 * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1111 * @hw: pointer to the HW struct
1112 * @params: information about the flow to be processed
1113 * @seg: index of packet segment whose raw fields are to be be extracted
1115 static enum ice_status
1116 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1123 if (!params->prof->segs[seg].raws_cnt)
1126 if (params->prof->segs[seg].raws_cnt >
1127 ARRAY_SIZE(params->prof->segs[seg].raws))
1128 return ICE_ERR_MAX_LIMIT;
1130 /* Offsets within the segment headers are not supported */
1131 hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1133 return ICE_ERR_PARAM;
1135 fv_words = hw->blk[params->blk].es.fvw;
1137 for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1138 struct ice_flow_seg_fld_raw *raw;
1141 raw = ¶ms->prof->segs[seg].raws[i];
1143 /* Storing extraction information */
1144 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1145 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1146 ICE_FLOW_FV_EXTRACT_SZ;
1147 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1149 raw->info.xtrct.idx = params->es_cnt;
1151 /* Determine the number of field vector entries this raw field
1154 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1155 (raw->info.src.last * BITS_PER_BYTE),
1156 (ICE_FLOW_FV_EXTRACT_SZ *
1158 off = raw->info.xtrct.off;
1159 for (j = 0; j < cnt; j++) {
1162 /* Make sure the number of extraction sequence required
1163 * does not exceed the block's capability
1165 if (params->es_cnt >= hw->blk[params->blk].es.count ||
1166 params->es_cnt >= ICE_MAX_FV_WORDS)
1167 return ICE_ERR_MAX_LIMIT;
1169 /* some blocks require a reversed field vector layout */
1170 if (hw->blk[params->blk].es.reverse)
1171 idx = fv_words - params->es_cnt - 1;
1173 idx = params->es_cnt;
1175 params->es[idx].prot_id = raw->info.xtrct.prot_id;
1176 params->es[idx].off = off;
1178 off += ICE_FLOW_FV_EXTRACT_SZ;
1186 * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1187 * @hw: pointer to the HW struct
1188 * @params: information about the flow to be processed
1190 * This function iterates through all matched fields in the given segments, and
1191 * creates an extraction sequence for the fields.
1193 static enum ice_status
1194 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1195 struct ice_flow_prof_params *params)
1197 enum ice_status status = ICE_SUCCESS;
1200 /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1203 if (params->blk == ICE_BLK_ACL) {
1204 status = ice_flow_xtract_pkt_flags(hw, params,
1205 ICE_RX_MDID_PKT_FLAGS_15_0);
1210 for (i = 0; i < params->prof->segs_cnt; i++) {
1211 u64 match = params->prof->segs[i].match;
1212 enum ice_flow_field j;
1214 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1215 const u64 bit = BIT_ULL(j);
1218 status = ice_flow_xtract_fld(hw, params, i, j,
1226 /* Process raw matching bytes */
1227 status = ice_flow_xtract_raws(hw, params, i);
1236 * ice_flow_sel_acl_scen - returns the specific scenario
1237 * @hw: pointer to the hardware structure
1238 * @params: information about the flow to be processed
1240 * This function will return the specific scenario based on the
1241 * params passed to it
1243 static enum ice_status
1244 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1246 /* Find the best-fit scenario for the provided match width */
1247 struct ice_acl_scen *cand_scen = NULL, *scen;
1250 return ICE_ERR_DOES_NOT_EXIST;
1252 /* Loop through each scenario and match against the scenario width
1253 * to select the specific scenario
1255 LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1256 if (scen->eff_width >= params->entry_length &&
1257 (!cand_scen || cand_scen->eff_width > scen->eff_width))
1260 return ICE_ERR_DOES_NOT_EXIST;
1262 params->prof->cfg.scen = cand_scen;
1268 * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1269 * @params: information about the flow to be processed
1271 static enum ice_status
1272 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1274 u16 index, i, range_idx = 0;
1276 index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1278 for (i = 0; i < params->prof->segs_cnt; i++) {
1279 struct ice_flow_seg_info *seg = ¶ms->prof->segs[i];
1280 u64 match = seg->match;
1283 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1284 struct ice_flow_fld_info *fld;
1285 const u64 bit = BIT_ULL(j);
1290 fld = &seg->fields[j];
1291 fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1293 if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1294 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1296 /* Range checking only supported for single
1299 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1301 BITS_PER_BYTE * 2) > 1)
1302 return ICE_ERR_PARAM;
1304 /* Ranges must define low and high values */
1305 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1306 fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1307 return ICE_ERR_PARAM;
1309 fld->entry.val = range_idx++;
1311 /* Store adjusted byte-length of field for later
1312 * use, taking into account potential
1313 * non-byte-aligned displacement
1315 fld->entry.last = DIVIDE_AND_ROUND_UP
1316 (ice_flds_info[j].size +
1317 (fld->xtrct.disp % BITS_PER_BYTE),
1319 fld->entry.val = index;
1320 index += fld->entry.last;
1326 for (j = 0; j < seg->raws_cnt; j++) {
1327 struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1329 raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1330 raw->info.entry.val = index;
1331 raw->info.entry.last = raw->info.src.last;
1332 index += raw->info.entry.last;
1336 /* Currently only support using the byte selection base, which only
1337 * allows for an effective entry size of 30 bytes. Reject anything
1340 if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1341 return ICE_ERR_PARAM;
1343 /* Only 8 range checkers per profile, reject anything trying to use
1346 if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1347 return ICE_ERR_PARAM;
1349 /* Store # bytes required for entry for later use */
1350 params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1356 * ice_flow_proc_segs - process all packet segments associated with a profile
1357 * @hw: pointer to the HW struct
1358 * @params: information about the flow to be processed
1360 static enum ice_status
1361 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1363 enum ice_status status;
1365 status = ice_flow_proc_seg_hdrs(params);
1369 status = ice_flow_create_xtrct_seq(hw, params);
1373 switch (params->blk) {
1376 status = ICE_SUCCESS;
1379 status = ice_flow_acl_def_entry_frmt(params);
1382 status = ice_flow_sel_acl_scen(hw, params);
1387 return ICE_ERR_NOT_IMPL;
1393 #define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001
1394 #define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002
1395 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004
1398 * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1399 * @hw: pointer to the HW struct
1400 * @blk: classification stage
1401 * @dir: flow direction
1402 * @segs: array of one or more packet segments that describe the flow
1403 * @segs_cnt: number of packet segments provided
1404 * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1405 * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1407 static struct ice_flow_prof *
1408 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1409 enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1410 u8 segs_cnt, u16 vsi_handle, u32 conds)
1412 struct ice_flow_prof *p, *prof = NULL;
1414 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1415 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1416 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1417 segs_cnt && segs_cnt == p->segs_cnt) {
1420 /* Check for profile-VSI association if specified */
1421 if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1422 ice_is_vsi_valid(hw, vsi_handle) &&
1423 !ice_is_bit_set(p->vsis, vsi_handle))
1426 /* Protocol headers must be checked. Matched fields are
1427 * checked if specified.
1429 for (i = 0; i < segs_cnt; i++)
1430 if (segs[i].hdrs != p->segs[i].hdrs ||
1431 ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1432 segs[i].match != p->segs[i].match))
1435 /* A match is found if all segments are matched */
1436 if (i == segs_cnt) {
1441 ice_release_lock(&hw->fl_profs_locks[blk]);
1447 * ice_flow_find_prof - Look up a profile matching headers and matched fields
1448 * @hw: pointer to the HW struct
1449 * @blk: classification stage
1450 * @dir: flow direction
1451 * @segs: array of one or more packet segments that describe the flow
1452 * @segs_cnt: number of packet segments provided
1455 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1456 struct ice_flow_seg_info *segs, u8 segs_cnt)
1458 struct ice_flow_prof *p;
1460 p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1461 ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1463 return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1467 * ice_flow_find_prof_id - Look up a profile with given profile ID
1468 * @hw: pointer to the HW struct
1469 * @blk: classification stage
1470 * @prof_id: unique ID to identify this flow profile
1472 static struct ice_flow_prof *
1473 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1475 struct ice_flow_prof *p;
1477 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1478 if (p->id == prof_id)
1485 * ice_dealloc_flow_entry - Deallocate flow entry memory
1486 * @hw: pointer to the HW struct
1487 * @entry: flow entry to be removed
1490 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1496 ice_free(hw, entry->entry);
1498 if (entry->range_buf) {
1499 ice_free(hw, entry->range_buf);
1500 entry->range_buf = NULL;
1504 ice_free(hw, entry->acts);
1506 entry->acts_cnt = 0;
1509 ice_free(hw, entry);
1512 #define ICE_ACL_INVALID_SCEN 0x3f
1515 * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
1516 * @hw: pointer to the hardware structure
1517 * @prof: pointer to flow profile
1518 * @buf: destination buffer function writes partial extraction sequence to
1520 * returns ICE_SUCCESS if no PF is associated to the given profile
1521 * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
1522 * returns other error code for real error
1524 static enum ice_status
1525 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1526 struct ice_aqc_acl_prof_generic_frmt *buf)
1528 enum ice_status status;
1531 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1535 status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1539 /* If all PF's associated scenarios are all 0 or all
1540 * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1541 * not been configured yet.
1543 if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1544 buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1545 buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1546 buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1549 if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1550 buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1551 buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1552 buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1553 buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1554 buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1555 buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1556 buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1559 return ICE_ERR_IN_USE;
1563 * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
1564 * @hw: pointer to the hardware structure
1565 * @acts: array of actions to be performed on a match
1566 * @acts_cnt: number of actions
1568 static enum ice_status
1569 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1574 for (i = 0; i < acts_cnt; i++) {
1575 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1576 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1577 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1578 struct ice_acl_cntrs cntrs;
1579 enum ice_status status;
1581 cntrs.bank = 0; /* Only bank0 for the moment */
1583 LE16_TO_CPU(acts[i].data.acl_act.value);
1585 LE16_TO_CPU(acts[i].data.acl_act.value);
1587 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1588 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1590 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1592 status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1601 * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
1602 * @hw: pointer to the hardware structure
1603 * @prof: pointer to flow profile
1605 * Disassociate the scenario from the profile for the PF of the VSI.
1607 static enum ice_status
1608 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1610 struct ice_aqc_acl_prof_generic_frmt buf;
1611 enum ice_status status = ICE_SUCCESS;
1614 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1616 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1620 status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1624 /* Clear scenario for this PF */
1625 buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1626 status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1632 * ice_flow_rem_entry_sync - Remove a flow entry
1633 * @hw: pointer to the HW struct
1634 * @blk: classification stage
1635 * @entry: flow entry to be removed
1637 static enum ice_status
1638 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1639 struct ice_flow_entry *entry)
1642 return ICE_ERR_BAD_PTR;
1644 if (blk == ICE_BLK_ACL) {
1645 enum ice_status status;
1648 return ICE_ERR_BAD_PTR;
1650 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1651 entry->scen_entry_idx);
1655 /* Checks if we need to release an ACL counter. */
1656 if (entry->acts_cnt && entry->acts)
1657 ice_flow_acl_free_act_cntr(hw, entry->acts,
1661 LIST_DEL(&entry->l_entry);
1663 ice_dealloc_flow_entry(hw, entry);
1669 * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1670 * @hw: pointer to the HW struct
1671 * @blk: classification stage
1672 * @dir: flow direction
1673 * @prof_id: unique ID to identify this flow profile
1674 * @segs: array of one or more packet segments that describe the flow
1675 * @segs_cnt: number of packet segments provided
1676 * @acts: array of default actions
1677 * @acts_cnt: number of default actions
1678 * @prof: stores the returned flow profile added
1680 * Assumption: the caller has acquired the lock to the profile list
1682 static enum ice_status
1683 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1684 enum ice_flow_dir dir, u64 prof_id,
1685 struct ice_flow_seg_info *segs, u8 segs_cnt,
1686 struct ice_flow_action *acts, u8 acts_cnt,
1687 struct ice_flow_prof **prof)
1689 struct ice_flow_prof_params params;
1690 enum ice_status status;
1693 if (!prof || (acts_cnt && !acts))
1694 return ICE_ERR_BAD_PTR;
1696 ice_memset(¶ms, 0, sizeof(params), ICE_NONDMA_MEM);
1697 params.prof = (struct ice_flow_prof *)
1698 ice_malloc(hw, sizeof(*params.prof));
1700 return ICE_ERR_NO_MEMORY;
1702 /* initialize extraction sequence to all invalid (0xff) */
1703 for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1704 params.es[i].prot_id = ICE_PROT_INVALID;
1705 params.es[i].off = ICE_FV_OFFSET_INVAL;
1709 params.prof->id = prof_id;
1710 params.prof->dir = dir;
1711 params.prof->segs_cnt = segs_cnt;
1713 /* Make a copy of the segments that need to be persistent in the flow
1716 for (i = 0; i < segs_cnt; i++)
1717 ice_memcpy(¶ms.prof->segs[i], &segs[i], sizeof(*segs),
1718 ICE_NONDMA_TO_NONDMA);
1720 /* Make a copy of the actions that need to be persistent in the flow
1724 params.prof->acts = (struct ice_flow_action *)
1725 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
1726 ICE_NONDMA_TO_NONDMA);
1728 if (!params.prof->acts) {
1729 status = ICE_ERR_NO_MEMORY;
1734 status = ice_flow_proc_segs(hw, ¶ms);
1736 ice_debug(hw, ICE_DBG_FLOW,
1737 "Error processing a flow's packet segments\n");
1741 /* Add a HW profile for this flow profile */
1742 status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes,
1743 params.attr, params.attr_cnt, params.es,
1746 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
1750 INIT_LIST_HEAD(¶ms.prof->entries);
1751 ice_init_lock(¶ms.prof->entries_lock);
1752 *prof = params.prof;
1756 if (params.prof->acts)
1757 ice_free(hw, params.prof->acts);
1758 ice_free(hw, params.prof);
1765 * ice_flow_rem_prof_sync - remove a flow profile
1766 * @hw: pointer to the hardware structure
1767 * @blk: classification stage
1768 * @prof: pointer to flow profile to remove
1770 * Assumption: the caller has acquired the lock to the profile list
1772 static enum ice_status
1773 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
1774 struct ice_flow_prof *prof)
1776 enum ice_status status;
1778 /* Remove all remaining flow entries before removing the flow profile */
1779 if (!LIST_EMPTY(&prof->entries)) {
1780 struct ice_flow_entry *e, *t;
1782 ice_acquire_lock(&prof->entries_lock);
1784 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
1786 status = ice_flow_rem_entry_sync(hw, blk, e);
1791 ice_release_lock(&prof->entries_lock);
1794 if (blk == ICE_BLK_ACL) {
1795 struct ice_aqc_acl_profile_ranges query_rng_buf;
1796 struct ice_aqc_acl_prof_generic_frmt buf;
1799 /* Disassociate the scenario from the profile for the PF */
1800 status = ice_flow_acl_disassoc_scen(hw, prof);
1804 /* Clear the range-checker if the profile ID is no longer
1807 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1808 if (status && status != ICE_ERR_IN_USE) {
1810 } else if (!status) {
1811 /* Clear the range-checker value for profile ID */
1812 ice_memset(&query_rng_buf, 0,
1813 sizeof(struct ice_aqc_acl_profile_ranges),
1816 status = ice_flow_get_hw_prof(hw, blk, prof->id,
1821 status = ice_prog_acl_prof_ranges(hw, prof_id,
1822 &query_rng_buf, NULL);
1828 /* Remove all hardware profiles associated with this flow profile */
1829 status = ice_rem_prof(hw, blk, prof->id);
1831 LIST_DEL(&prof->l_entry);
1832 ice_destroy_lock(&prof->entries_lock);
1834 ice_free(hw, prof->acts);
1842 * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
1843 * @buf: Destination buffer function writes partial xtrct sequence to
1844 * @info: Info about field
1847 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
1848 struct ice_flow_fld_info *info)
1853 src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
1854 info->xtrct.disp / BITS_PER_BYTE;
1855 dst = info->entry.val;
1856 for (i = 0; i < info->entry.last; i++)
1857 /* HW stores field vector words in LE, convert words back to BE
1858 * so constructed entries will end up in network order
1860 buf->byte_selection[dst++] = src++ ^ 1;
1864 * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
1865 * @hw: pointer to the hardware structure
1866 * @prof: pointer to flow profile
1868 static enum ice_status
1869 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
1871 struct ice_aqc_acl_prof_generic_frmt buf;
1872 struct ice_flow_fld_info *info;
1873 enum ice_status status;
1877 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1879 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1883 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1884 if (status && status != ICE_ERR_IN_USE)
1888 /* Program the profile dependent configuration. This is done
1889 * only once regardless of the number of PFs using that profile
1891 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1893 for (i = 0; i < prof->segs_cnt; i++) {
1894 struct ice_flow_seg_info *seg = &prof->segs[i];
1895 u64 match = seg->match;
1898 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1899 const u64 bit = BIT_ULL(j);
1904 info = &seg->fields[j];
1906 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
1907 buf.word_selection[info->entry.val] =
1910 ice_flow_acl_set_xtrct_seq_fld(&buf,
1916 for (j = 0; j < seg->raws_cnt; j++) {
1917 info = &seg->raws[j].info;
1918 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
1922 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
1923 ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
1927 /* Update the current PF */
1928 buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
1929 status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1935 * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
1936 * @hw: pointer to the hardware structure
1937 * @blk: classification stage
1938 * @vsi_handle: software VSI handle
1939 * @vsig: target VSI group
1941 * Assumption: the caller has already verified that the VSI to
1942 * be added has the same characteristics as the VSIG and will
1943 * thereby have access to all resources added to that VSIG.
1946 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
1949 enum ice_status status;
1951 if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
1952 return ICE_ERR_PARAM;
1954 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1955 status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
1957 ice_release_lock(&hw->fl_profs_locks[blk]);
1963 * ice_flow_assoc_prof - associate a VSI with a flow profile
1964 * @hw: pointer to the hardware structure
1965 * @blk: classification stage
1966 * @prof: pointer to flow profile
1967 * @vsi_handle: software VSI handle
1969 * Assumption: the caller has acquired the lock to the profile list
1970 * and the software VSI handle has been validated
1972 static enum ice_status
1973 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
1974 struct ice_flow_prof *prof, u16 vsi_handle)
1976 enum ice_status status = ICE_SUCCESS;
1978 if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
1979 if (blk == ICE_BLK_ACL) {
1980 status = ice_flow_acl_set_xtrct_seq(hw, prof);
1984 status = ice_add_prof_id_flow(hw, blk,
1985 ice_get_hw_vsi_num(hw,
1989 ice_set_bit(vsi_handle, prof->vsis);
1991 ice_debug(hw, ICE_DBG_FLOW,
1992 "HW profile add failed, %d\n",
2000 * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
2001 * @hw: pointer to the hardware structure
2002 * @blk: classification stage
2003 * @prof: pointer to flow profile
2004 * @vsi_handle: software VSI handle
2006 * Assumption: the caller has acquired the lock to the profile list
2007 * and the software VSI handle has been validated
2009 static enum ice_status
2010 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
2011 struct ice_flow_prof *prof, u16 vsi_handle)
2013 enum ice_status status = ICE_SUCCESS;
2015 if (ice_is_bit_set(prof->vsis, vsi_handle)) {
2016 status = ice_rem_prof_id_flow(hw, blk,
2017 ice_get_hw_vsi_num(hw,
2021 ice_clear_bit(vsi_handle, prof->vsis);
2023 ice_debug(hw, ICE_DBG_FLOW,
2024 "HW profile remove failed, %d\n",
2032 * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2033 * @hw: pointer to the HW struct
2034 * @blk: classification stage
2035 * @dir: flow direction
2036 * @prof_id: unique ID to identify this flow profile
2037 * @segs: array of one or more packet segments that describe the flow
2038 * @segs_cnt: number of packet segments provided
2039 * @acts: array of default actions
2040 * @acts_cnt: number of default actions
2041 * @prof: stores the returned flow profile added
2044 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2045 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2046 struct ice_flow_action *acts, u8 acts_cnt,
2047 struct ice_flow_prof **prof)
2049 enum ice_status status;
2051 if (segs_cnt > ICE_FLOW_SEG_MAX)
2052 return ICE_ERR_MAX_LIMIT;
2055 return ICE_ERR_PARAM;
2058 return ICE_ERR_BAD_PTR;
2060 status = ice_flow_val_hdrs(segs, segs_cnt);
2064 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2066 status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2067 acts, acts_cnt, prof);
2069 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2071 ice_release_lock(&hw->fl_profs_locks[blk]);
2077 * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2078 * @hw: pointer to the HW struct
2079 * @blk: the block for which the flow profile is to be removed
2080 * @prof_id: unique ID of the flow profile to be removed
2083 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2085 struct ice_flow_prof *prof;
2086 enum ice_status status;
2088 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2090 prof = ice_flow_find_prof_id(hw, blk, prof_id);
2092 status = ICE_ERR_DOES_NOT_EXIST;
2096 /* prof becomes invalid after the call */
2097 status = ice_flow_rem_prof_sync(hw, blk, prof);
2100 ice_release_lock(&hw->fl_profs_locks[blk]);
2106 * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
2107 * @hw: pointer to the HW struct
2108 * @blk: classification stage
2109 * @prof_id: the profile ID handle
2110 * @hw_prof_id: pointer to variable to receive the HW profile ID
2113 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2116 struct ice_prof_map *map;
2118 map = ice_search_prof_id(hw, blk, prof_id);
2120 *hw_prof_id = map->prof_id;
2124 return ICE_ERR_DOES_NOT_EXIST;
2128 * ice_flow_find_entry - look for a flow entry using its unique ID
2129 * @hw: pointer to the HW struct
2130 * @blk: classification stage
2131 * @entry_id: unique ID to identify this flow entry
2133 * This function looks for the flow entry with the specified unique ID in all
2134 * flow profiles of the specified classification stage. If the entry is found,
2135 * and it returns the handle to the flow entry. Otherwise, it returns
2136 * ICE_FLOW_ENTRY_ID_INVAL.
2138 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2140 struct ice_flow_entry *found = NULL;
2141 struct ice_flow_prof *p;
2143 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2145 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2146 struct ice_flow_entry *e;
2148 ice_acquire_lock(&p->entries_lock);
2149 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2150 if (e->id == entry_id) {
2154 ice_release_lock(&p->entries_lock);
2160 ice_release_lock(&hw->fl_profs_locks[blk]);
2162 return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2166 * ice_flow_acl_check_actions - Checks the ACL rule's actions
2167 * @hw: pointer to the hardware structure
2168 * @acts: array of actions to be performed on a match
2169 * @acts_cnt: number of actions
2170 * @cnt_alloc: indicates if an ACL counter has been allocated.
2172 static enum ice_status
2173 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2174 u8 acts_cnt, bool *cnt_alloc)
2176 ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2179 ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2182 if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2183 return ICE_ERR_OUT_OF_RANGE;
2185 for (i = 0; i < acts_cnt; i++) {
2186 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2187 acts[i].type != ICE_FLOW_ACT_DROP &&
2188 acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2189 acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2192 /* If the caller want to add two actions of the same type, then
2193 * it is considered invalid configuration.
2195 if (ice_test_and_set_bit(acts[i].type, dup_check))
2196 return ICE_ERR_PARAM;
2199 /* Checks if ACL counters are needed. */
2200 for (i = 0; i < acts_cnt; i++) {
2201 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2202 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2203 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2204 struct ice_acl_cntrs cntrs;
2205 enum ice_status status;
2208 cntrs.bank = 0; /* Only bank0 for the moment */
2210 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2211 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2213 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2215 status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2218 /* Counter index within the bank */
2219 acts[i].data.acl_act.value =
2220 CPU_TO_LE16(cntrs.first_cntr);
2229 * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2230 * @fld: number of the given field
2231 * @info: info about field
2232 * @range_buf: range checker configuration buffer
2233 * @data: pointer to a data buffer containing flow entry's match values/masks
2234 * @range: Input/output param indicating which range checkers are being used
2237 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2238 struct ice_aqc_acl_profile_ranges *range_buf,
2239 u8 *data, u8 *range)
2243 /* If not specified, default mask is all bits in field */
2244 new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2245 BIT(ice_flds_info[fld].size) - 1 :
2246 (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2248 /* If the mask is 0, then we don't need to worry about this input
2249 * range checker value.
2253 (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2255 (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2256 u8 range_idx = info->entry.val;
2258 range_buf->checker_cfg[range_idx].low_boundary =
2259 CPU_TO_BE16(new_low);
2260 range_buf->checker_cfg[range_idx].high_boundary =
2261 CPU_TO_BE16(new_high);
2262 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2264 /* Indicate which range checker is being used */
2265 *range |= BIT(range_idx);
2270 * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2271 * @fld: number of the given field
2272 * @info: info about the field
2273 * @buf: buffer containing the entry
2274 * @dontcare: buffer containing don't care mask for entry
2275 * @data: pointer to a data buffer containing flow entry's match values/masks
2278 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2279 u8 *dontcare, u8 *data)
2281 u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2282 bool use_mask = false;
2285 src = info->src.val;
2286 mask = info->src.mask;
2287 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2288 disp = info->xtrct.disp % BITS_PER_BYTE;
2290 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2293 for (k = 0; k < info->entry.last; k++, dst++) {
2294 /* Add overflow bits from previous byte */
2295 buf[dst] = (tmp_s & 0xff00) >> 8;
2297 /* If mask is not valid, tmp_m is always zero, so just setting
2298 * dontcare to 0 (no masked bits). If mask is valid, pulls in
2299 * overflow bits of mask from prev byte
2301 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2303 /* If there is displacement, last byte will only contain
2304 * displaced data, but there is no more data to read from user
2305 * buffer, so skip so as not to potentially read beyond end of
2308 if (!disp || k < info->entry.last - 1) {
2309 /* Store shifted data to use in next byte */
2310 tmp_s = data[src++] << disp;
2312 /* Add current (shifted) byte */
2313 buf[dst] |= tmp_s & 0xff;
2315 /* Handle mask if valid */
2317 tmp_m = (~data[mask++] & 0xff) << disp;
2318 dontcare[dst] |= tmp_m & 0xff;
2323 /* Fill in don't care bits at beginning of field */
2325 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2326 for (k = 0; k < disp; k++)
2327 dontcare[dst] |= BIT(k);
2330 end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2332 /* Fill in don't care bits at end of field */
2334 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2335 info->entry.last - 1;
2336 for (k = end_disp; k < BITS_PER_BYTE; k++)
2337 dontcare[dst] |= BIT(k);
2342 * ice_flow_acl_frmt_entry - Format ACL entry
2343 * @hw: pointer to the hardware structure
2344 * @prof: pointer to flow profile
2345 * @e: pointer to the flow entry
2346 * @data: pointer to a data buffer containing flow entry's match values/masks
2347 * @acts: array of actions to be performed on a match
2348 * @acts_cnt: number of actions
2350 * Formats the key (and key_inverse) to be matched from the data passed in,
2351 * along with data from the flow profile. This key/key_inverse pair makes up
2352 * the 'entry' for an ACL flow entry.
2354 static enum ice_status
2355 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2356 struct ice_flow_entry *e, u8 *data,
2357 struct ice_flow_action *acts, u8 acts_cnt)
2359 u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2360 struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2361 enum ice_status status;
2366 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2370 /* Format the result action */
2372 status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2376 status = ICE_ERR_NO_MEMORY;
2378 e->acts = (struct ice_flow_action *)
2379 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2380 ICE_NONDMA_TO_NONDMA);
2385 e->acts_cnt = acts_cnt;
2387 /* Format the matching data */
2388 buf_sz = prof->cfg.scen->width;
2389 buf = (u8 *)ice_malloc(hw, buf_sz);
2393 dontcare = (u8 *)ice_malloc(hw, buf_sz);
2397 /* 'key' buffer will store both key and key_inverse, so must be twice
2400 key = (u8 *)ice_malloc(hw, buf_sz * 2);
2404 range_buf = (struct ice_aqc_acl_profile_ranges *)
2405 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2409 /* Set don't care mask to all 1's to start, will zero out used bytes */
2410 ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2412 for (i = 0; i < prof->segs_cnt; i++) {
2413 struct ice_flow_seg_info *seg = &prof->segs[i];
2414 u64 match = seg->match;
2417 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
2418 struct ice_flow_fld_info *info;
2419 const u64 bit = BIT_ULL(j);
2424 info = &seg->fields[j];
2426 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2427 ice_flow_acl_frmt_entry_range(j, info,
2431 ice_flow_acl_frmt_entry_fld(j, info, buf,
2437 for (j = 0; j < seg->raws_cnt; j++) {
2438 struct ice_flow_fld_info *info = &seg->raws[j].info;
2439 u16 dst, src, mask, k;
2440 bool use_mask = false;
2442 src = info->src.val;
2443 dst = info->entry.val -
2444 ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2445 mask = info->src.mask;
2447 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2450 for (k = 0; k < info->entry.last; k++, dst++) {
2451 buf[dst] = data[src++];
2453 dontcare[dst] = ~data[mask++];
2460 buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2461 dontcare[prof->cfg.scen->pid_idx] = 0;
2463 /* Format the buffer for direction flags */
2464 dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2466 if (prof->dir == ICE_FLOW_RX)
2467 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2470 buf[prof->cfg.scen->rng_chk_idx] = range;
2471 /* Mark any unused range checkers as don't care */
2472 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2473 e->range_buf = range_buf;
2475 ice_free(hw, range_buf);
2478 status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2484 e->entry_sz = buf_sz * 2;
2491 ice_free(hw, dontcare);
2496 if (status && range_buf) {
2497 ice_free(hw, range_buf);
2498 e->range_buf = NULL;
2501 if (status && e->acts) {
2502 ice_free(hw, e->acts);
2507 if (status && cnt_alloc)
2508 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2514 * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2515 * the compared data.
2516 * @prof: pointer to flow profile
2517 * @e: pointer to the comparing flow entry
2518 * @do_chg_action: decide if we want to change the ACL action
2519 * @do_add_entry: decide if we want to add the new ACL entry
2520 * @do_rem_entry: decide if we want to remove the current ACL entry
2522 * Find an ACL scenario entry that matches the compared data. In the same time,
2523 * this function also figure out:
2524 * a/ If we want to change the ACL action
2525 * b/ If we want to add the new ACL entry
2526 * c/ If we want to remove the current ACL entry
2528 static struct ice_flow_entry *
2529 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2530 struct ice_flow_entry *e, bool *do_chg_action,
2531 bool *do_add_entry, bool *do_rem_entry)
2533 struct ice_flow_entry *p, *return_entry = NULL;
2537 * a/ There exists an entry with same matching data, but different
2538 * priority, then we remove this existing ACL entry. Then, we
2539 * will add the new entry to the ACL scenario.
2540 * b/ There exists an entry with same matching data, priority, and
2541 * result action, then we do nothing
2542 * c/ There exists an entry with same matching data, priority, but
2543 * different, action, then do only change the action's entry.
2544 * d/ Else, we add this new entry to the ACL scenario.
2546 *do_chg_action = false;
2547 *do_add_entry = true;
2548 *do_rem_entry = false;
2549 LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2550 if (memcmp(p->entry, e->entry, p->entry_sz))
2553 /* From this point, we have the same matching_data. */
2554 *do_add_entry = false;
2557 if (p->priority != e->priority) {
2558 /* matching data && !priority */
2559 *do_add_entry = true;
2560 *do_rem_entry = true;
2564 /* From this point, we will have matching_data && priority */
2565 if (p->acts_cnt != e->acts_cnt)
2566 *do_chg_action = true;
2567 for (i = 0; i < p->acts_cnt; i++) {
2568 bool found_not_match = false;
2570 for (j = 0; j < e->acts_cnt; j++)
2571 if (memcmp(&p->acts[i], &e->acts[j],
2572 sizeof(struct ice_flow_action))) {
2573 found_not_match = true;
2577 if (found_not_match) {
2578 *do_chg_action = true;
2583 /* (do_chg_action = true) means :
2584 * matching_data && priority && !result_action
2585 * (do_chg_action = false) means :
2586 * matching_data && priority && result_action
2591 return return_entry;
2595 * ice_flow_acl_convert_to_acl_prior - Convert to ACL priority
2598 static enum ice_acl_entry_prior
2599 ice_flow_acl_convert_to_acl_prior(enum ice_flow_priority p)
2601 enum ice_acl_entry_prior acl_prior;
2604 case ICE_FLOW_PRIO_LOW:
2605 acl_prior = ICE_LOW;
2607 case ICE_FLOW_PRIO_NORMAL:
2608 acl_prior = ICE_NORMAL;
2610 case ICE_FLOW_PRIO_HIGH:
2611 acl_prior = ICE_HIGH;
2614 acl_prior = ICE_NORMAL;
2622 * ice_flow_acl_union_rng_chk - Perform union operation between two
2623 * range-range checker buffers
2624 * @dst_buf: pointer to destination range checker buffer
2625 * @src_buf: pointer to source range checker buffer
2627 * For this function, we do the union between dst_buf and src_buf
2628 * range checker buffer, and we will save the result back to dst_buf
2630 static enum ice_status
2631 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2632 struct ice_aqc_acl_profile_ranges *src_buf)
2636 if (!dst_buf || !src_buf)
2637 return ICE_ERR_BAD_PTR;
2639 for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2640 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2641 bool will_populate = false;
2643 in_data = &src_buf->checker_cfg[i];
2648 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2649 cfg_data = &dst_buf->checker_cfg[j];
2651 if (!cfg_data->mask ||
2652 !memcmp(cfg_data, in_data,
2653 sizeof(struct ice_acl_rng_data))) {
2654 will_populate = true;
2659 if (will_populate) {
2660 ice_memcpy(cfg_data, in_data,
2661 sizeof(struct ice_acl_rng_data),
2662 ICE_NONDMA_TO_NONDMA);
2664 /* No available slot left to program range checker */
2665 return ICE_ERR_MAX_LIMIT;
2673 * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2674 * @hw: pointer to the hardware structure
2675 * @prof: pointer to flow profile
2676 * @entry: double pointer to the flow entry
2678 * For this function, we will look at the current added entries in the
2679 * corresponding ACL scenario. Then, we will perform matching logic to
2680 * see if we want to add/modify/do nothing with this new entry.
2682 static enum ice_status
2683 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2684 struct ice_flow_entry **entry)
2686 bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2687 struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2688 struct ice_acl_act_entry *acts = NULL;
2689 struct ice_flow_entry *exist;
2690 enum ice_status status = ICE_SUCCESS;
2691 struct ice_flow_entry *e;
2694 if (!entry || !(*entry) || !prof)
2695 return ICE_ERR_BAD_PTR;
2699 do_chg_rng_chk = false;
2703 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2708 /* Query the current range-checker value in FW */
2709 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2713 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2714 sizeof(struct ice_aqc_acl_profile_ranges),
2715 ICE_NONDMA_TO_NONDMA);
2717 /* Generate the new range-checker value */
2718 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2722 /* Reconfigure the range check if the buffer is changed. */
2723 do_chg_rng_chk = false;
2724 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2725 sizeof(struct ice_aqc_acl_profile_ranges))) {
2726 status = ice_prog_acl_prof_ranges(hw, prof_id,
2727 &cfg_rng_buf, NULL);
2731 do_chg_rng_chk = true;
2735 /* Figure out if we want to (change the ACL action) and/or
2736 * (Add the new ACL entry) and/or (Remove the current ACL entry)
2738 exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
2739 &do_add_entry, &do_rem_entry);
2742 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
2747 /* Prepare the result action buffer */
2748 acts = (struct ice_acl_act_entry *)ice_calloc
2749 (hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
2750 for (i = 0; i < e->acts_cnt; i++)
2751 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
2752 sizeof(struct ice_acl_act_entry),
2753 ICE_NONDMA_TO_NONDMA);
2756 enum ice_acl_entry_prior prior;
2760 keys = (u8 *)e->entry;
2761 inverts = keys + (e->entry_sz / 2);
2762 prior = ice_flow_acl_convert_to_acl_prior(e->priority);
2764 status = ice_acl_add_entry(hw, prof->cfg.scen, prior, keys,
2765 inverts, acts, e->acts_cnt,
2770 e->scen_entry_idx = entry_idx;
2771 LIST_ADD(&e->l_entry, &prof->entries);
2773 if (do_chg_action) {
2774 /* For the action memory info, update the SW's copy of
2775 * exist entry with e's action memory info
2777 ice_free(hw, exist->acts);
2778 exist->acts_cnt = e->acts_cnt;
2779 exist->acts = (struct ice_flow_action *)
2780 ice_calloc(hw, exist->acts_cnt,
2781 sizeof(struct ice_flow_action));
2784 status = ICE_ERR_NO_MEMORY;
2788 ice_memcpy(exist->acts, e->acts,
2789 sizeof(struct ice_flow_action) * e->acts_cnt,
2790 ICE_NONDMA_TO_NONDMA);
2792 status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
2794 exist->scen_entry_idx);
2799 if (do_chg_rng_chk) {
2800 /* In this case, we want to update the range checker
2801 * information of the exist entry
2803 status = ice_flow_acl_union_rng_chk(exist->range_buf,
2809 /* As we don't add the new entry to our SW DB, deallocate its
2810 * memories, and return the exist entry to the caller
2812 ice_dealloc_flow_entry(hw, e);
2823 * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
2824 * @hw: pointer to the hardware structure
2825 * @prof: pointer to flow profile
2826 * @e: double pointer to the flow entry
2828 static enum ice_status
2829 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2830 struct ice_flow_entry **e)
2832 enum ice_status status;
2834 ice_acquire_lock(&prof->entries_lock);
2835 status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
2836 ice_release_lock(&prof->entries_lock);
2842 * ice_flow_add_entry - Add a flow entry
2843 * @hw: pointer to the HW struct
2844 * @blk: classification stage
2845 * @prof_id: ID of the profile to add a new flow entry to
2846 * @entry_id: unique ID to identify this flow entry
2847 * @vsi_handle: software VSI handle for the flow entry
2848 * @prio: priority of the flow entry
2849 * @data: pointer to a data buffer containing flow entry's match values/masks
2850 * @acts: arrays of actions to be performed on a match
2851 * @acts_cnt: number of actions
2852 * @entry_h: pointer to buffer that receives the new flow entry's handle
2855 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2856 u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
2857 void *data, struct ice_flow_action *acts, u8 acts_cnt,
2860 struct ice_flow_entry *e = NULL;
2861 struct ice_flow_prof *prof;
2862 enum ice_status status = ICE_SUCCESS;
2864 /* ACL entries must indicate an action */
2865 if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
2866 return ICE_ERR_PARAM;
2868 /* No flow entry data is expected for RSS */
2869 if (!entry_h || (!data && blk != ICE_BLK_RSS))
2870 return ICE_ERR_BAD_PTR;
2872 if (!ice_is_vsi_valid(hw, vsi_handle))
2873 return ICE_ERR_PARAM;
2875 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2877 prof = ice_flow_find_prof_id(hw, blk, prof_id);
2879 status = ICE_ERR_DOES_NOT_EXIST;
2881 /* Allocate memory for the entry being added and associate
2882 * the VSI to the found flow profile
2884 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
2886 status = ICE_ERR_NO_MEMORY;
2888 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
2891 ice_release_lock(&hw->fl_profs_locks[blk]);
2896 e->vsi_handle = vsi_handle;
2905 /* ACL will handle the entry management */
2906 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
2911 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
2917 status = ICE_ERR_NOT_IMPL;
2921 if (blk != ICE_BLK_ACL) {
2922 /* ACL will handle the entry management */
2923 ice_acquire_lock(&prof->entries_lock);
2924 LIST_ADD(&e->l_entry, &prof->entries);
2925 ice_release_lock(&prof->entries_lock);
2928 *entry_h = ICE_FLOW_ENTRY_HNDL(e);
2933 ice_free(hw, e->entry);
2941 * ice_flow_rem_entry - Remove a flow entry
2942 * @hw: pointer to the HW struct
2943 * @blk: classification stage
2944 * @entry_h: handle to the flow entry to be removed
2946 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
2949 struct ice_flow_entry *entry;
2950 struct ice_flow_prof *prof;
2951 enum ice_status status = ICE_SUCCESS;
2953 if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
2954 return ICE_ERR_PARAM;
2956 entry = ICE_FLOW_ENTRY_PTR((unsigned long)entry_h);
2958 /* Retain the pointer to the flow profile as the entry will be freed */
2962 ice_acquire_lock(&prof->entries_lock);
2963 status = ice_flow_rem_entry_sync(hw, blk, entry);
2964 ice_release_lock(&prof->entries_lock);
2971 * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
2972 * @seg: packet segment the field being set belongs to
2973 * @fld: field to be set
2974 * @field_type: type of the field
2975 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
2976 * entry's input buffer
2977 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
2979 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
2980 * entry's input buffer
2982 * This helper function stores information of a field being matched, including
2983 * the type of the field and the locations of the value to match, the mask, and
2984 * and the upper-bound value in the start of the input buffer for a flow entry.
2985 * This function should only be used for fixed-size data structures.
2987 * This function also opportunistically determines the protocol headers to be
2988 * present based on the fields being set. Some fields cannot be used alone to
2989 * determine the protocol headers present. Sometimes, fields for particular
2990 * protocol headers are not matched. In those cases, the protocol headers
2991 * must be explicitly set.
2994 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
2995 enum ice_flow_fld_match_type field_type, u16 val_loc,
2996 u16 mask_loc, u16 last_loc)
2998 u64 bit = BIT_ULL(fld);
3001 if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
3004 seg->fields[fld].type = field_type;
3005 seg->fields[fld].src.val = val_loc;
3006 seg->fields[fld].src.mask = mask_loc;
3007 seg->fields[fld].src.last = last_loc;
3009 ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
3013 * ice_flow_set_fld - specifies locations of field from entry's input buffer
3014 * @seg: packet segment the field being set belongs to
3015 * @fld: field to be set
3016 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3017 * entry's input buffer
3018 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3020 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3021 * entry's input buffer
3022 * @range: indicate if field being matched is to be in a range
3024 * This function specifies the locations, in the form of byte offsets from the
3025 * start of the input buffer for a flow entry, from where the value to match,
3026 * the mask value, and upper value can be extracted. These locations are then
3027 * stored in the flow profile. When adding a flow entry associated with the
3028 * flow profile, these locations will be used to quickly extract the values and
3029 * create the content of a match entry. This function should only be used for
3030 * fixed-size data structures.
3033 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3034 u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3036 enum ice_flow_fld_match_type t = range ?
3037 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3039 ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3043 * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3044 * @seg: packet segment the field being set belongs to
3045 * @fld: field to be set
3046 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3047 * entry's input buffer
3048 * @pref_loc: location of prefix value from entry's input buffer
3049 * @pref_sz: size of the location holding the prefix value
3051 * This function specifies the locations, in the form of byte offsets from the
3052 * start of the input buffer for a flow entry, from where the value to match
3053 * and the IPv4 prefix value can be extracted. These locations are then stored
3054 * in the flow profile. When adding flow entries to the associated flow profile,
3055 * these locations can be used to quickly extract the values to create the
3056 * content of a match entry. This function should only be used for fixed-size
3060 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3061 u16 val_loc, u16 pref_loc, u8 pref_sz)
3063 /* For this type of field, the "mask" location is for the prefix value's
3064 * location and the "last" location is for the size of the location of
3067 ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3068 pref_loc, (u16)pref_sz);
3072 * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3073 * @seg: packet segment the field being set belongs to
3074 * @off: offset of the raw field from the beginning of the segment in bytes
3075 * @len: length of the raw pattern to be matched
3076 * @val_loc: location of the value to match from entry's input buffer
3077 * @mask_loc: location of mask value from entry's input buffer
3079 * This function specifies the offset of the raw field to be match from the
3080 * beginning of the specified packet segment, and the locations, in the form of
3081 * byte offsets from the start of the input buffer for a flow entry, from where
3082 * the value to match and the mask value to be extracted. These locations are
3083 * then stored in the flow profile. When adding flow entries to the associated
3084 * flow profile, these locations can be used to quickly extract the values to
3085 * create the content of a match entry. This function should only be used for
3086 * fixed-size data structures.
3089 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3090 u16 val_loc, u16 mask_loc)
3092 if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3093 seg->raws[seg->raws_cnt].off = off;
3094 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3095 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3096 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3097 /* The "last" field is used to store the length of the field */
3098 seg->raws[seg->raws_cnt].info.src.last = len;
3101 /* Overflows of "raws" will be handled as an error condition later in
3102 * the flow when this information is processed.
3107 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3108 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
3110 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3111 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3113 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3114 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
3115 ICE_FLOW_SEG_HDR_SCTP)
3117 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3118 (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3119 ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3120 ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3123 * ice_flow_set_rss_seg_info - setup packet segments for RSS
3124 * @segs: pointer to the flow field segment(s)
3125 * @hash_fields: fields to be hashed on for the segment(s)
3126 * @flow_hdr: protocol header fields within a packet segment
3128 * Helper function to extract fields from hash bitmap and use flow
3129 * header value to set flow field segment for further use in flow
3130 * profile entry or removal.
3132 static enum ice_status
3133 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
3136 u64 val = hash_fields;
3139 for (i = 0; val && i < ICE_FLOW_FIELD_IDX_MAX; i++) {
3140 u64 bit = BIT_ULL(i);
3143 ice_flow_set_fld(segs, (enum ice_flow_field)i,
3144 ICE_FLOW_FLD_OFF_INVAL,
3145 ICE_FLOW_FLD_OFF_INVAL,
3146 ICE_FLOW_FLD_OFF_INVAL, false);
3150 ICE_FLOW_SET_HDRS(segs, flow_hdr);
3152 if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3153 ~ICE_FLOW_RSS_HDRS_INNER_MASK)
3154 return ICE_ERR_PARAM;
3156 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3157 if (val && !ice_is_pow2(val))
3160 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3161 if (val && !ice_is_pow2(val))
3168 * ice_rem_vsi_rss_list - remove VSI from RSS list
3169 * @hw: pointer to the hardware structure
3170 * @vsi_handle: software VSI handle
3172 * Remove the VSI from all RSS configurations in the list.
3174 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3176 struct ice_rss_cfg *r, *tmp;
3178 if (LIST_EMPTY(&hw->rss_list_head))
3181 ice_acquire_lock(&hw->rss_locks);
3182 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3183 ice_rss_cfg, l_entry)
3184 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3185 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3186 LIST_DEL(&r->l_entry);
3189 ice_release_lock(&hw->rss_locks);
3193 * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3194 * @hw: pointer to the hardware structure
3195 * @vsi_handle: software VSI handle
3197 * This function will iterate through all flow profiles and disassociate
3198 * the VSI from that profile. If the flow profile has no VSIs it will
3201 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3203 const enum ice_block blk = ICE_BLK_RSS;
3204 struct ice_flow_prof *p, *t;
3205 enum ice_status status = ICE_SUCCESS;
3207 if (!ice_is_vsi_valid(hw, vsi_handle))
3208 return ICE_ERR_PARAM;
3210 if (LIST_EMPTY(&hw->fl_profs[blk]))
3213 ice_acquire_lock(&hw->fl_profs_locks[blk]);
3214 LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3216 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3217 status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3221 if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3222 status = ice_flow_rem_prof_sync(hw, blk, p);
3227 ice_release_lock(&hw->fl_profs_locks[blk]);
3233 * ice_rem_rss_list - remove RSS configuration from list
3234 * @hw: pointer to the hardware structure
3235 * @vsi_handle: software VSI handle
3236 * @prof: pointer to flow profile
3238 * Assumption: lock has already been acquired for RSS list
3241 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3243 struct ice_rss_cfg *r, *tmp;
3245 /* Search for RSS hash fields associated to the VSI that match the
3246 * hash configurations associated to the flow profile. If found
3247 * remove from the RSS entry list of the VSI context and delete entry.
3249 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3250 ice_rss_cfg, l_entry)
3251 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3252 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3253 ice_clear_bit(vsi_handle, r->vsis);
3254 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3255 LIST_DEL(&r->l_entry);
3263 * ice_add_rss_list - add RSS configuration to list
3264 * @hw: pointer to the hardware structure
3265 * @vsi_handle: software VSI handle
3266 * @prof: pointer to flow profile
3268 * Assumption: lock has already been acquired for RSS list
3270 static enum ice_status
3271 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3273 struct ice_rss_cfg *r, *rss_cfg;
3275 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3276 ice_rss_cfg, l_entry)
3277 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3278 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3279 ice_set_bit(vsi_handle, r->vsis);
3283 rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3285 return ICE_ERR_NO_MEMORY;
3287 rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
3288 rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
3289 rss_cfg->symm = prof->cfg.symm;
3290 ice_set_bit(vsi_handle, rss_cfg->vsis);
3292 LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3297 #define ICE_FLOW_PROF_HASH_S 0
3298 #define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3299 #define ICE_FLOW_PROF_HDR_S 32
3300 #define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3301 #define ICE_FLOW_PROF_ENCAP_S 63
3302 #define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
3304 #define ICE_RSS_OUTER_HEADERS 1
3305 #define ICE_RSS_INNER_HEADERS 2
3307 /* Flow profile ID format:
3308 * [0:31] - Packet match fields
3309 * [32:62] - Protocol header
3310 * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
3312 #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
3313 (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3314 (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3315 ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
3318 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3320 u32 s = ((src % 4) << 3); /* byte shift */
3321 u32 v = dst | 0x80; /* value to program */
3322 u8 i = src / 4; /* register index */
3325 reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3326 reg = (reg & ~(0xff << s)) | (v << s);
3327 wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3331 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3334 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3337 for (i = 0; i < len; i++) {
3338 ice_rss_config_xor_word(hw, prof_id,
3339 /* Yes, field vector in GLQF_HSYMM and
3340 * GLQF_HINSET is inversed!
3342 fv_last_word - (src + i),
3343 fv_last_word - (dst + i));
3344 ice_rss_config_xor_word(hw, prof_id,
3345 fv_last_word - (dst + i),
3346 fv_last_word - (src + i));
3351 ice_rss_update_symm(struct ice_hw *hw,
3352 struct ice_flow_prof *prof)
3354 struct ice_prof_map *map;
3357 map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3358 prof_id = map->prof_id;
3360 /* clear to default */
3361 for (m = 0; m < 6; m++)
3362 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3363 if (prof->cfg.symm) {
3364 struct ice_flow_seg_info *seg =
3365 &prof->segs[prof->segs_cnt - 1];
3367 struct ice_flow_seg_xtrct *ipv4_src =
3368 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3369 struct ice_flow_seg_xtrct *ipv4_dst =
3370 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3371 struct ice_flow_seg_xtrct *ipv6_src =
3372 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3373 struct ice_flow_seg_xtrct *ipv6_dst =
3374 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3376 struct ice_flow_seg_xtrct *tcp_src =
3377 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3378 struct ice_flow_seg_xtrct *tcp_dst =
3379 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3381 struct ice_flow_seg_xtrct *udp_src =
3382 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3383 struct ice_flow_seg_xtrct *udp_dst =
3384 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3386 struct ice_flow_seg_xtrct *sctp_src =
3387 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3388 struct ice_flow_seg_xtrct *sctp_dst =
3389 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3392 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3393 ice_rss_config_xor(hw, prof_id,
3394 ipv4_src->idx, ipv4_dst->idx, 2);
3397 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3398 ice_rss_config_xor(hw, prof_id,
3399 ipv6_src->idx, ipv6_dst->idx, 8);
3402 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3403 ice_rss_config_xor(hw, prof_id,
3404 tcp_src->idx, tcp_dst->idx, 1);
3407 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3408 ice_rss_config_xor(hw, prof_id,
3409 udp_src->idx, udp_dst->idx, 1);
3412 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3413 ice_rss_config_xor(hw, prof_id,
3414 sctp_src->idx, sctp_dst->idx, 1);
3419 * ice_add_rss_cfg_sync - add an RSS configuration
3420 * @hw: pointer to the hardware structure
3421 * @vsi_handle: software VSI handle
3422 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3423 * @addl_hdrs: protocol header fields
3424 * @segs_cnt: packet segment count
3425 * @symm: symmetric hash enable/disable
3427 * Assumption: lock has already been acquired for RSS list
3429 static enum ice_status
3430 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3431 u32 addl_hdrs, u8 segs_cnt, bool symm)
3433 const enum ice_block blk = ICE_BLK_RSS;
3434 struct ice_flow_prof *prof = NULL;
3435 struct ice_flow_seg_info *segs;
3436 enum ice_status status;
3438 if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
3439 return ICE_ERR_PARAM;
3441 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3444 return ICE_ERR_NO_MEMORY;
3446 /* Construct the packet segment info from the hashed fields */
3447 status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3452 /* Search for a flow profile that has matching headers, hash fields
3453 * and has the input VSI associated to it. If found, no further
3454 * operations required and exit.
3456 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3458 ICE_FLOW_FIND_PROF_CHK_FLDS |
3459 ICE_FLOW_FIND_PROF_CHK_VSI);
3461 if (prof->cfg.symm == symm)
3463 prof->cfg.symm = symm;
3467 /* Check if a flow profile exists with the same protocol headers and
3468 * associated with the input VSI. If so disassociate the VSI from
3469 * this profile. The VSI will be added to a new profile created with
3470 * the protocol header and new hash field configuration.
3472 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3473 vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3475 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3477 ice_rem_rss_list(hw, vsi_handle, prof);
3481 /* Remove profile if it has no VSIs associated */
3482 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3483 status = ice_flow_rem_prof(hw, blk, prof->id);
3489 /* Search for a profile that has same match fields only. If this
3490 * exists then associate the VSI to this profile.
3492 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3494 ICE_FLOW_FIND_PROF_CHK_FLDS);
3496 if (prof->cfg.symm == symm) {
3497 status = ice_flow_assoc_prof(hw, blk, prof,
3500 status = ice_add_rss_list(hw, vsi_handle,
3503 /* if a profile exist but with different symmetric
3504 * requirement, just return error.
3506 status = ICE_ERR_NOT_SUPPORTED;
3511 /* Create a new flow profile with generated profile and packet
3512 * segment information.
3514 status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3515 ICE_FLOW_GEN_PROFID(hashed_flds,
3516 segs[segs_cnt - 1].hdrs,
3518 segs, segs_cnt, NULL, 0, &prof);
3522 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3523 /* If association to a new flow profile failed then this profile can
3527 ice_flow_rem_prof(hw, blk, prof->id);
3531 status = ice_add_rss_list(hw, vsi_handle, prof);
3533 prof->cfg.symm = symm;
3536 ice_rss_update_symm(hw, prof);
3544 * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3545 * @hw: pointer to the hardware structure
3546 * @vsi_handle: software VSI handle
3547 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3548 * @addl_hdrs: protocol header fields
3549 * @symm: symmetric hash enable/disable
3551 * This function will generate a flow profile based on fields associated with
3552 * the input fields to hash on, the flow type and use the VSI number to add
3553 * a flow entry to the profile.
3556 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3557 u32 addl_hdrs, bool symm)
3559 enum ice_status status;
3561 if (hashed_flds == ICE_HASH_INVALID ||
3562 !ice_is_vsi_valid(hw, vsi_handle))
3563 return ICE_ERR_PARAM;
3565 ice_acquire_lock(&hw->rss_locks);
3566 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3567 ICE_RSS_OUTER_HEADERS, symm);
3569 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3570 addl_hdrs, ICE_RSS_INNER_HEADERS,
3572 ice_release_lock(&hw->rss_locks);
3578 * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3579 * @hw: pointer to the hardware structure
3580 * @vsi_handle: software VSI handle
3581 * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3582 * @addl_hdrs: Protocol header fields within a packet segment
3583 * @segs_cnt: packet segment count
3585 * Assumption: lock has already been acquired for RSS list
3587 static enum ice_status
3588 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3589 u32 addl_hdrs, u8 segs_cnt)
3591 const enum ice_block blk = ICE_BLK_RSS;
3592 struct ice_flow_seg_info *segs;
3593 struct ice_flow_prof *prof;
3594 enum ice_status status;
3596 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3599 return ICE_ERR_NO_MEMORY;
3601 /* Construct the packet segment info from the hashed fields */
3602 status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3607 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3609 ICE_FLOW_FIND_PROF_CHK_FLDS);
3611 status = ICE_ERR_DOES_NOT_EXIST;
3615 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3619 /* Remove RSS configuration from VSI context before deleting
3622 ice_rem_rss_list(hw, vsi_handle, prof);
3624 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
3625 status = ice_flow_rem_prof(hw, blk, prof->id);
3633 * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
3634 * @hw: pointer to the hardware structure
3635 * @vsi_handle: software VSI handle
3636 * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3637 * @addl_hdrs: Protocol header fields within a packet segment
3639 * This function will lookup the flow profile based on the input
3640 * hash field bitmap, iterate through the profile entry list of
3641 * that profile and find entry associated with input VSI to be
3642 * removed. Calls are made to underlying flow apis which will in
3643 * turn build or update buffers for RSS XLT1 section.
3646 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3649 enum ice_status status;
3651 if (hashed_flds == ICE_HASH_INVALID ||
3652 !ice_is_vsi_valid(hw, vsi_handle))
3653 return ICE_ERR_PARAM;
3655 ice_acquire_lock(&hw->rss_locks);
3656 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3657 ICE_RSS_OUTER_HEADERS);
3659 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3660 addl_hdrs, ICE_RSS_INNER_HEADERS);
3661 ice_release_lock(&hw->rss_locks);
3667 * ice_replay_rss_cfg - replay RSS configurations associated with VSI
3668 * @hw: pointer to the hardware structure
3669 * @vsi_handle: software VSI handle
3671 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3673 enum ice_status status = ICE_SUCCESS;
3674 struct ice_rss_cfg *r;
3676 if (!ice_is_vsi_valid(hw, vsi_handle))
3677 return ICE_ERR_PARAM;
3679 ice_acquire_lock(&hw->rss_locks);
3680 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3681 ice_rss_cfg, l_entry) {
3682 if (ice_is_bit_set(r->vsis, vsi_handle)) {
3683 status = ice_add_rss_cfg_sync(hw, vsi_handle,
3686 ICE_RSS_OUTER_HEADERS,
3690 status = ice_add_rss_cfg_sync(hw, vsi_handle,
3693 ICE_RSS_INNER_HEADERS,
3699 ice_release_lock(&hw->rss_locks);
3705 * ice_get_rss_cfg - returns hashed fields for the given header types
3706 * @hw: pointer to the hardware structure
3707 * @vsi_handle: software VSI handle
3708 * @hdrs: protocol header type
3710 * This function will return the match fields of the first instance of flow
3711 * profile having the given header types and containing input VSI
3713 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
3715 struct ice_rss_cfg *r, *rss_cfg = NULL;
3717 /* verify if the protocol header is non zero and VSI is valid */
3718 if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
3719 return ICE_HASH_INVALID;
3721 ice_acquire_lock(&hw->rss_locks);
3722 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3723 ice_rss_cfg, l_entry)
3724 if (ice_is_bit_set(r->vsis, vsi_handle) &&
3725 r->packet_hdr == hdrs) {
3729 ice_release_lock(&hw->rss_locks);
3731 return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID;