1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
5 #include "ice_common.h"
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE 2
10 #define ICE_FLOW_FLD_SZ_VLAN 2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR 4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR 16
13 #define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
14 #define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
15 #define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
16 #define ICE_FLOW_FLD_SZ_IP_DSCP 1
17 #define ICE_FLOW_FLD_SZ_IP_TTL 1
18 #define ICE_FLOW_FLD_SZ_IP_PROT 1
19 #define ICE_FLOW_FLD_SZ_PORT 2
20 #define ICE_FLOW_FLD_SZ_TCP_FLAGS 1
21 #define ICE_FLOW_FLD_SZ_ICMP_TYPE 1
22 #define ICE_FLOW_FLD_SZ_ICMP_CODE 1
23 #define ICE_FLOW_FLD_SZ_ARP_OPER 2
24 #define ICE_FLOW_FLD_SZ_GRE_KEYID 4
25 #define ICE_FLOW_FLD_SZ_GTP_TEID 4
26 #define ICE_FLOW_FLD_SZ_GTP_QFI 2
27 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID 2
28 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
29 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID 4
30 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
31 #define ICE_FLOW_FLD_SZ_AH_SPI 4
32 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI 4
34 /* Describe properties of a protocol header field */
35 struct ice_flow_field_info {
36 enum ice_flow_seg_hdr hdr;
37 s16 off; /* Offset from start of a protocol header, in bits */
38 u16 size; /* Size of fields in bits */
39 u16 mask; /* 16-bit mask for field */
42 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
44 .off = (_offset_bytes) * BITS_PER_BYTE, \
45 .size = (_size_bytes) * BITS_PER_BYTE, \
49 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
51 .off = (_offset_bytes) * BITS_PER_BYTE, \
52 .size = (_size_bytes) * BITS_PER_BYTE, \
56 /* Table containing properties of supported protocol header fields */
58 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
60 /* ICE_FLOW_FIELD_IDX_ETH_DA */
61 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
62 /* ICE_FLOW_FIELD_IDX_ETH_SA */
63 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
64 /* ICE_FLOW_FIELD_IDX_S_VLAN */
65 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
66 /* ICE_FLOW_FIELD_IDX_C_VLAN */
67 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
68 /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
69 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
71 /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
72 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
74 /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
75 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
77 /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
78 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
79 ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
80 /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
81 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
82 ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
83 /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
84 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
85 ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
86 /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
87 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
88 ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
89 /* ICE_FLOW_FIELD_IDX_IPV4_SA */
90 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
91 /* ICE_FLOW_FIELD_IDX_IPV4_DA */
92 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
93 /* ICE_FLOW_FIELD_IDX_IPV6_SA */
94 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
95 /* ICE_FLOW_FIELD_IDX_IPV6_DA */
96 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
97 /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
98 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
99 ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
100 /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
101 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
102 ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
103 /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
104 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
105 ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
106 /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
107 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
108 ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
109 /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
110 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
111 ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
112 /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
113 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
114 ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
116 /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
117 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
118 /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
119 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
120 /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
121 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
122 /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
123 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
124 /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
125 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
126 /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
127 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
128 /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
129 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
131 /* ICE_FLOW_FIELD_IDX_ARP_SIP */
132 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
133 /* ICE_FLOW_FIELD_IDX_ARP_DIP */
134 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
135 /* ICE_FLOW_FIELD_IDX_ARP_SHA */
136 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
137 /* ICE_FLOW_FIELD_IDX_ARP_DHA */
138 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
139 /* ICE_FLOW_FIELD_IDX_ARP_OP */
140 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
142 /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
143 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
144 /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
145 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
147 /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
148 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
150 /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
151 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
152 ICE_FLOW_FLD_SZ_GTP_TEID),
153 /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
154 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
155 ICE_FLOW_FLD_SZ_GTP_TEID),
156 /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
157 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
158 ICE_FLOW_FLD_SZ_GTP_TEID),
159 /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
160 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
161 ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
162 /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
163 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
164 ICE_FLOW_FLD_SZ_GTP_TEID),
165 /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
166 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
167 ICE_FLOW_FLD_SZ_GTP_TEID),
169 /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
170 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
171 ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
173 /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
174 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
175 ICE_FLOW_FLD_SZ_PFCP_SEID),
177 /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
178 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
179 ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
181 /* ICE_FLOW_FIELD_IDX_ESP_SPI */
182 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
183 ICE_FLOW_FLD_SZ_ESP_SPI),
185 /* ICE_FLOW_FIELD_IDX_AH_SPI */
186 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
187 ICE_FLOW_FLD_SZ_AH_SPI),
189 /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
190 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
191 ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
194 /* Bitmaps indicating relevant packet types for a particular protocol header
196 * Packet types for packets with an Outer/First/Single MAC header
198 static const u32 ice_ptypes_mac_ofos[] = {
199 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
200 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
201 0x00000000, 0x03FFF000, 0x7FFFFFE0, 0x00000000,
202 0x00000000, 0x00000000, 0x00000000, 0x00000000,
203 0x00000000, 0x00000000, 0x00000000, 0x00000000,
204 0x00000000, 0x00000000, 0x00000000, 0x00000000,
205 0x00000000, 0x00000000, 0x00000000, 0x00000000,
206 0x00000000, 0x00000000, 0x00000000, 0x00000000,
209 /* Packet types for packets with an Innermost/Last MAC VLAN header */
210 static const u32 ice_ptypes_macvlan_il[] = {
211 0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
212 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
213 0x00000000, 0x00000000, 0x00000000, 0x00000000,
214 0x00000000, 0x00000000, 0x00000000, 0x00000000,
215 0x00000000, 0x00000000, 0x00000000, 0x00000000,
216 0x00000000, 0x00000000, 0x00000000, 0x00000000,
217 0x00000000, 0x00000000, 0x00000000, 0x00000000,
218 0x00000000, 0x00000000, 0x00000000, 0x00000000,
221 /* Packet types for packets with an Outer/First/Single IPv4 header */
222 static const u32 ice_ptypes_ipv4_ofos[] = {
223 0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
224 0x00000000, 0x00000155, 0x00000000, 0x00000000,
225 0x0003000F, 0x000FC000, 0x83E0F800, 0x00000101,
226 0x00000000, 0x00000000, 0x00000000, 0x00000000,
227 0x00000000, 0x00000000, 0x00000000, 0x00000000,
228 0x00000000, 0x00000000, 0x00000000, 0x00000000,
229 0x00000000, 0x00000000, 0x00000000, 0x00000000,
230 0x00000000, 0x00000000, 0x00000000, 0x00000000,
233 /* Packet types for packets with an Innermost/Last IPv4 header */
234 static const u32 ice_ptypes_ipv4_il[] = {
235 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
236 0x0000000E, 0x00000000, 0x00000000, 0x00000000,
237 0x00000000, 0x00000000, 0x001FF800, 0x00000000,
238 0x00000000, 0x00000000, 0x00000000, 0x00000000,
239 0x00000000, 0x00000000, 0x00000000, 0x00000000,
240 0x00000000, 0x00000000, 0x00000000, 0x00000000,
241 0x00000000, 0x00000000, 0x00000000, 0x00000000,
242 0x00000000, 0x00000000, 0x00000000, 0x00000000,
245 /* Packet types for packets with an Outer/First/Single IPv6 header */
246 static const u32 ice_ptypes_ipv6_ofos[] = {
247 0x00000000, 0x00000000, 0x77000000, 0x10002000,
248 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
249 0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000206,
250 0x00000000, 0x00000000, 0x00000000, 0x00000000,
251 0x00000000, 0x00000000, 0x00000000, 0x00000000,
252 0x00000000, 0x00000000, 0x00000000, 0x00000000,
253 0x00000000, 0x00000000, 0x00000000, 0x00000000,
254 0x00000000, 0x00000000, 0x00000000, 0x00000000,
257 /* Packet types for packets with an Innermost/Last IPv6 header */
258 static const u32 ice_ptypes_ipv6_il[] = {
259 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
260 0x00000770, 0x00000000, 0x00000000, 0x00000000,
261 0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
262 0x00000000, 0x00000000, 0x00000000, 0x00000000,
263 0x00000000, 0x00000000, 0x00000000, 0x00000000,
264 0x00000000, 0x00000000, 0x00000000, 0x00000000,
265 0x00000000, 0x00000000, 0x00000000, 0x00000000,
266 0x00000000, 0x00000000, 0x00000000, 0x00000000,
269 /* Packet types for packets with an Outermost/First ARP header */
270 static const u32 ice_ptypes_arp_of[] = {
271 0x00000800, 0x00000000, 0x00000000, 0x00000000,
272 0x00000000, 0x00000000, 0x00000000, 0x00000000,
273 0x00000000, 0x00000000, 0x00000000, 0x00000000,
274 0x00000000, 0x00000000, 0x00000000, 0x00000000,
275 0x00000000, 0x00000000, 0x00000000, 0x00000000,
276 0x00000000, 0x00000000, 0x00000000, 0x00000000,
277 0x00000000, 0x00000000, 0x00000000, 0x00000000,
278 0x00000000, 0x00000000, 0x00000000, 0x00000000,
281 /* UDP Packet types for non-tunneled packets or tunneled
282 * packets with inner UDP.
284 static const u32 ice_ptypes_udp_il[] = {
285 0x81000000, 0x20204040, 0x04000010, 0x80810102,
286 0x00000040, 0x00000000, 0x00000000, 0x00000000,
287 0x00000000, 0x00410000, 0x90842000, 0x00000007,
288 0x00000000, 0x00000000, 0x00000000, 0x00000000,
289 0x00000000, 0x00000000, 0x00000000, 0x00000000,
290 0x00000000, 0x00000000, 0x00000000, 0x00000000,
291 0x00000000, 0x00000000, 0x00000000, 0x00000000,
292 0x00000000, 0x00000000, 0x00000000, 0x00000000,
295 /* Packet types for packets with an Innermost/Last TCP header */
296 static const u32 ice_ptypes_tcp_il[] = {
297 0x04000000, 0x80810102, 0x10000040, 0x02040408,
298 0x00000102, 0x00000000, 0x00000000, 0x00000000,
299 0x00000000, 0x00820000, 0x21084000, 0x00000000,
300 0x00000000, 0x00000000, 0x00000000, 0x00000000,
301 0x00000000, 0x00000000, 0x00000000, 0x00000000,
302 0x00000000, 0x00000000, 0x00000000, 0x00000000,
303 0x00000000, 0x00000000, 0x00000000, 0x00000000,
304 0x00000000, 0x00000000, 0x00000000, 0x00000000,
307 /* Packet types for packets with an Innermost/Last SCTP header */
308 static const u32 ice_ptypes_sctp_il[] = {
309 0x08000000, 0x01020204, 0x20000081, 0x04080810,
310 0x00000204, 0x00000000, 0x00000000, 0x00000000,
311 0x00000000, 0x01040000, 0x00000000, 0x00000000,
312 0x00000000, 0x00000000, 0x00000000, 0x00000000,
313 0x00000000, 0x00000000, 0x00000000, 0x00000000,
314 0x00000000, 0x00000000, 0x00000000, 0x00000000,
315 0x00000000, 0x00000000, 0x00000000, 0x00000000,
316 0x00000000, 0x00000000, 0x00000000, 0x00000000,
319 /* Packet types for packets with an Outermost/First ICMP header */
320 static const u32 ice_ptypes_icmp_of[] = {
321 0x10000000, 0x00000000, 0x00000000, 0x00000000,
322 0x00000000, 0x00000000, 0x00000000, 0x00000000,
323 0x00000000, 0x00000000, 0x00000000, 0x00000000,
324 0x00000000, 0x00000000, 0x00000000, 0x00000000,
325 0x00000000, 0x00000000, 0x00000000, 0x00000000,
326 0x00000000, 0x00000000, 0x00000000, 0x00000000,
327 0x00000000, 0x00000000, 0x00000000, 0x00000000,
328 0x00000000, 0x00000000, 0x00000000, 0x00000000,
331 /* Packet types for packets with an Innermost/Last ICMP header */
332 static const u32 ice_ptypes_icmp_il[] = {
333 0x00000000, 0x02040408, 0x40000102, 0x08101020,
334 0x00000408, 0x00000000, 0x00000000, 0x00000000,
335 0x00000000, 0x00000000, 0x42108000, 0x00000000,
336 0x00000000, 0x00000000, 0x00000000, 0x00000000,
337 0x00000000, 0x00000000, 0x00000000, 0x00000000,
338 0x00000000, 0x00000000, 0x00000000, 0x00000000,
339 0x00000000, 0x00000000, 0x00000000, 0x00000000,
340 0x00000000, 0x00000000, 0x00000000, 0x00000000,
343 /* Packet types for packets with an Outermost/First GRE header */
344 static const u32 ice_ptypes_gre_of[] = {
345 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
346 0x0000017E, 0x00000000, 0x00000000, 0x00000000,
347 0x00000000, 0x00000000, 0x00000000, 0x00000000,
348 0x00000000, 0x00000000, 0x00000000, 0x00000000,
349 0x00000000, 0x00000000, 0x00000000, 0x00000000,
350 0x00000000, 0x00000000, 0x00000000, 0x00000000,
351 0x00000000, 0x00000000, 0x00000000, 0x00000000,
352 0x00000000, 0x00000000, 0x00000000, 0x00000000,
355 /* Packet types for packets with an Innermost/Last MAC header */
356 static const u32 ice_ptypes_mac_il[] = {
357 0x00000000, 0x00000000, 0x00000000, 0x00000000,
358 0x00000000, 0x00000000, 0x00000000, 0x00000000,
359 0x00000000, 0x00000000, 0x00000000, 0x00000000,
360 0x00000000, 0x00000000, 0x00000000, 0x00000000,
361 0x00000000, 0x00000000, 0x00000000, 0x00000000,
362 0x00000000, 0x00000000, 0x00000000, 0x00000000,
363 0x00000000, 0x00000000, 0x00000000, 0x00000000,
364 0x00000000, 0x00000000, 0x00000000, 0x00000000,
367 /* Packet types for GTPC */
368 static const u32 ice_ptypes_gtpc[] = {
369 0x00000000, 0x00000000, 0x00000000, 0x00000000,
370 0x00000000, 0x00000000, 0x00000000, 0x00000000,
371 0x00000000, 0x00000000, 0x00000180, 0x00000000,
372 0x00000000, 0x00000000, 0x00000000, 0x00000000,
373 0x00000000, 0x00000000, 0x00000000, 0x00000000,
374 0x00000000, 0x00000000, 0x00000000, 0x00000000,
375 0x00000000, 0x00000000, 0x00000000, 0x00000000,
376 0x00000000, 0x00000000, 0x00000000, 0x00000000,
379 /* Packet types for GTPC with TEID */
380 static const u32 ice_ptypes_gtpc_tid[] = {
381 0x00000000, 0x00000000, 0x00000000, 0x00000000,
382 0x00000000, 0x00000000, 0x00000000, 0x00000000,
383 0x00000000, 0x00000000, 0x00000060, 0x00000000,
384 0x00000000, 0x00000000, 0x00000000, 0x00000000,
385 0x00000000, 0x00000000, 0x00000000, 0x00000000,
386 0x00000000, 0x00000000, 0x00000000, 0x00000000,
387 0x00000000, 0x00000000, 0x00000000, 0x00000000,
388 0x00000000, 0x00000000, 0x00000000, 0x00000000,
391 /* Packet types for GTPU */
392 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
393 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
394 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
395 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
396 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
397 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
398 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
399 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
400 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
401 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
402 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
403 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
404 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
405 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
406 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
407 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
408 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
409 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
410 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
411 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
412 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
415 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
416 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
417 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
418 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
419 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
420 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
421 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
422 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
423 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
424 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
425 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
426 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
427 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
428 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
429 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
430 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
431 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
432 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
433 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
434 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
435 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
438 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
439 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
440 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
441 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
442 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
443 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
444 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
445 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
446 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
447 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
448 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
449 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
450 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
451 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
452 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
453 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
454 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
455 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
456 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
457 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
458 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
461 static const u32 ice_ptypes_gtpu[] = {
462 0x00000000, 0x00000000, 0x00000000, 0x00000000,
463 0x00000000, 0x00000000, 0x00000000, 0x00000000,
464 0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
465 0x00000000, 0x00000000, 0x00000000, 0x00000000,
466 0x00000000, 0x00000000, 0x00000000, 0x00000000,
467 0x00000000, 0x00000000, 0x00000000, 0x00000000,
468 0x00000000, 0x00000000, 0x00000000, 0x00000000,
469 0x00000000, 0x00000000, 0x00000000, 0x00000000,
472 /* Packet types for pppoe */
473 static const u32 ice_ptypes_pppoe[] = {
474 0x00000000, 0x00000000, 0x00000000, 0x00000000,
475 0x00000000, 0x00000000, 0x00000000, 0x00000000,
476 0x00000000, 0x03FFF000, 0x00000000, 0x00000000,
477 0x00000000, 0x00000000, 0x00000000, 0x00000000,
478 0x00000000, 0x00000000, 0x00000000, 0x00000000,
479 0x00000000, 0x00000000, 0x00000000, 0x00000000,
480 0x00000000, 0x00000000, 0x00000000, 0x00000000,
481 0x00000000, 0x00000000, 0x00000000, 0x00000000,
484 /* Packet types for packets with PFCP NODE header */
485 static const u32 ice_ptypes_pfcp_node[] = {
486 0x00000000, 0x00000000, 0x00000000, 0x00000000,
487 0x00000000, 0x00000000, 0x00000000, 0x00000000,
488 0x00000000, 0x00000000, 0x80000000, 0x00000002,
489 0x00000000, 0x00000000, 0x00000000, 0x00000000,
490 0x00000000, 0x00000000, 0x00000000, 0x00000000,
491 0x00000000, 0x00000000, 0x00000000, 0x00000000,
492 0x00000000, 0x00000000, 0x00000000, 0x00000000,
493 0x00000000, 0x00000000, 0x00000000, 0x00000000,
496 /* Packet types for packets with PFCP SESSION header */
497 static const u32 ice_ptypes_pfcp_session[] = {
498 0x00000000, 0x00000000, 0x00000000, 0x00000000,
499 0x00000000, 0x00000000, 0x00000000, 0x00000000,
500 0x00000000, 0x00000000, 0x00000000, 0x00000005,
501 0x00000000, 0x00000000, 0x00000000, 0x00000000,
502 0x00000000, 0x00000000, 0x00000000, 0x00000000,
503 0x00000000, 0x00000000, 0x00000000, 0x00000000,
504 0x00000000, 0x00000000, 0x00000000, 0x00000000,
505 0x00000000, 0x00000000, 0x00000000, 0x00000000,
508 /* Packet types for l2tpv3 */
509 static const u32 ice_ptypes_l2tpv3[] = {
510 0x00000000, 0x00000000, 0x00000000, 0x00000000,
511 0x00000000, 0x00000000, 0x00000000, 0x00000000,
512 0x00000000, 0x00000000, 0x00000000, 0x00000300,
513 0x00000000, 0x00000000, 0x00000000, 0x00000000,
514 0x00000000, 0x00000000, 0x00000000, 0x00000000,
515 0x00000000, 0x00000000, 0x00000000, 0x00000000,
516 0x00000000, 0x00000000, 0x00000000, 0x00000000,
517 0x00000000, 0x00000000, 0x00000000, 0x00000000,
520 /* Packet types for esp */
521 static const u32 ice_ptypes_esp[] = {
522 0x00000000, 0x00000000, 0x00000000, 0x00000000,
523 0x00000000, 0x00000003, 0x00000000, 0x00000000,
524 0x00000000, 0x00000000, 0x00000000, 0x00000000,
525 0x00000000, 0x00000000, 0x00000000, 0x00000000,
526 0x00000000, 0x00000000, 0x00000000, 0x00000000,
527 0x00000000, 0x00000000, 0x00000000, 0x00000000,
528 0x00000000, 0x00000000, 0x00000000, 0x00000000,
529 0x00000000, 0x00000000, 0x00000000, 0x00000000,
532 /* Packet types for ah */
533 static const u32 ice_ptypes_ah[] = {
534 0x00000000, 0x00000000, 0x00000000, 0x00000000,
535 0x00000000, 0x0000000C, 0x00000000, 0x00000000,
536 0x00000000, 0x00000000, 0x00000000, 0x00000000,
537 0x00000000, 0x00000000, 0x00000000, 0x00000000,
538 0x00000000, 0x00000000, 0x00000000, 0x00000000,
539 0x00000000, 0x00000000, 0x00000000, 0x00000000,
540 0x00000000, 0x00000000, 0x00000000, 0x00000000,
541 0x00000000, 0x00000000, 0x00000000, 0x00000000,
544 /* Packet types for packets with NAT_T ESP header */
545 static const u32 ice_ptypes_nat_t_esp[] = {
546 0x00000000, 0x00000000, 0x00000000, 0x00000000,
547 0x00000000, 0x00000030, 0x00000000, 0x00000000,
548 0x00000000, 0x00000000, 0x00000000, 0x00000000,
549 0x00000000, 0x00000000, 0x00000000, 0x00000000,
550 0x00000000, 0x00000000, 0x00000000, 0x00000000,
551 0x00000000, 0x00000000, 0x00000000, 0x00000000,
552 0x00000000, 0x00000000, 0x00000000, 0x00000000,
553 0x00000000, 0x00000000, 0x00000000, 0x00000000,
556 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
557 0x00000846, 0x00000000, 0x00000000, 0x00000000,
558 0x00000000, 0x00000000, 0x00000000, 0x00000000,
559 0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
560 0x00000000, 0x00000000, 0x00000000, 0x00000000,
561 0x00000000, 0x00000000, 0x00000000, 0x00000000,
562 0x00000000, 0x00000000, 0x00000000, 0x00000000,
563 0x00000000, 0x00000000, 0x00000000, 0x00000000,
564 0x00000000, 0x00000000, 0x00000000, 0x00000000,
567 /* Manage parameters and info. used during the creation of a flow profile */
568 struct ice_flow_prof_params {
570 u16 entry_length; /* # of bytes formatted entry will require */
572 struct ice_flow_prof *prof;
574 /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
575 * This will give us the direction flags.
577 struct ice_fv_word es[ICE_MAX_FV_WORDS];
578 /* attributes can be used to add attributes to a particular PTYPE */
579 const struct ice_ptype_attributes *attr;
582 u16 mask[ICE_MAX_FV_WORDS];
583 ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
586 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
587 (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
588 ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
589 ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
590 ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
591 ICE_FLOW_SEG_HDR_NAT_T_ESP)
593 #define ICE_FLOW_SEG_HDRS_L2_MASK \
594 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
595 #define ICE_FLOW_SEG_HDRS_L3_MASK \
596 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
597 ICE_FLOW_SEG_HDR_ARP)
598 #define ICE_FLOW_SEG_HDRS_L4_MASK \
599 (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
600 ICE_FLOW_SEG_HDR_SCTP)
603 * ice_flow_val_hdrs - validates packet segments for valid protocol headers
604 * @segs: array of one or more packet segments that describe the flow
605 * @segs_cnt: number of packet segments provided
607 static enum ice_status
608 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
612 for (i = 0; i < segs_cnt; i++) {
613 /* Multiple L3 headers */
614 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
615 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
616 return ICE_ERR_PARAM;
618 /* Multiple L4 headers */
619 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
620 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
621 return ICE_ERR_PARAM;
627 /* Sizes of fixed known protocol headers without header options */
628 #define ICE_FLOW_PROT_HDR_SZ_MAC 14
629 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
630 #define ICE_FLOW_PROT_HDR_SZ_IPV4 20
631 #define ICE_FLOW_PROT_HDR_SZ_IPV6 40
632 #define ICE_FLOW_PROT_HDR_SZ_ARP 28
633 #define ICE_FLOW_PROT_HDR_SZ_ICMP 8
634 #define ICE_FLOW_PROT_HDR_SZ_TCP 20
635 #define ICE_FLOW_PROT_HDR_SZ_UDP 8
636 #define ICE_FLOW_PROT_HDR_SZ_SCTP 12
639 * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
640 * @params: information about the flow to be processed
641 * @seg: index of packet segment whose header size is to be determined
643 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
648 sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
649 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
652 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
653 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
654 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
655 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
656 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
657 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
658 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
659 /* A L3 header is required if L4 is specified */
663 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
664 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
665 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
666 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
667 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
668 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
669 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
670 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
676 * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
677 * @params: information about the flow to be processed
679 * This function identifies the packet types associated with the protocol
680 * headers being present in packet segments of the specified flow profile.
682 static enum ice_status
683 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
685 struct ice_flow_prof *prof;
688 ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
693 for (i = 0; i < params->prof->segs_cnt; i++) {
694 const ice_bitmap_t *src;
697 hdrs = prof->segs[i].hdrs;
699 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
700 src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
701 (const ice_bitmap_t *)ice_ptypes_mac_il;
702 ice_and_bitmap(params->ptypes, params->ptypes, src,
706 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
707 src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
708 ice_and_bitmap(params->ptypes, params->ptypes, src,
712 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
713 ice_and_bitmap(params->ptypes, params->ptypes,
714 (const ice_bitmap_t *)ice_ptypes_arp_of,
718 if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
719 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
720 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
721 ice_and_bitmap(params->ptypes, params->ptypes, src,
723 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
724 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
725 ice_and_bitmap(params->ptypes,
728 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
729 ice_and_bitmap(params->ptypes, params->ptypes,
730 (const ice_bitmap_t *)
733 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
734 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
735 ice_and_bitmap(params->ptypes, params->ptypes,
736 src, ICE_FLOW_PTYPE_MAX);
738 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
739 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
740 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
741 ice_and_bitmap(params->ptypes, params->ptypes, src,
743 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
744 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
745 ice_and_bitmap(params->ptypes,
748 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
749 ice_and_bitmap(params->ptypes, params->ptypes,
750 (const ice_bitmap_t *)
753 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
754 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
755 ice_and_bitmap(params->ptypes, params->ptypes,
756 src, ICE_FLOW_PTYPE_MAX);
760 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
761 src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
762 ice_and_bitmap(params->ptypes, params->ptypes,
763 src, ICE_FLOW_PTYPE_MAX);
764 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
765 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
766 ice_and_bitmap(params->ptypes, params->ptypes, src,
770 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
771 src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
772 (const ice_bitmap_t *)ice_ptypes_icmp_il;
773 ice_and_bitmap(params->ptypes, params->ptypes, src,
775 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
777 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
778 ice_and_bitmap(params->ptypes, params->ptypes,
779 src, ICE_FLOW_PTYPE_MAX);
781 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
782 src = (const ice_bitmap_t *)ice_ptypes_gtpc;
783 ice_and_bitmap(params->ptypes, params->ptypes,
784 src, ICE_FLOW_PTYPE_MAX);
785 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
786 src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
787 ice_and_bitmap(params->ptypes, params->ptypes,
788 src, ICE_FLOW_PTYPE_MAX);
789 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
790 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
791 ice_and_bitmap(params->ptypes, params->ptypes,
792 src, ICE_FLOW_PTYPE_MAX);
794 /* Attributes for GTP packet with downlink */
795 params->attr = ice_attr_gtpu_down;
796 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
797 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
798 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
799 ice_and_bitmap(params->ptypes, params->ptypes,
800 src, ICE_FLOW_PTYPE_MAX);
802 /* Attributes for GTP packet with uplink */
803 params->attr = ice_attr_gtpu_up;
804 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
805 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
806 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
807 ice_and_bitmap(params->ptypes, params->ptypes,
808 src, ICE_FLOW_PTYPE_MAX);
810 /* Attributes for GTP packet with Extension Header */
811 params->attr = ice_attr_gtpu_eh;
812 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
813 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
814 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
815 ice_and_bitmap(params->ptypes, params->ptypes,
816 src, ICE_FLOW_PTYPE_MAX);
817 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
818 src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
819 ice_and_bitmap(params->ptypes, params->ptypes,
820 src, ICE_FLOW_PTYPE_MAX);
821 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
822 src = (const ice_bitmap_t *)ice_ptypes_esp;
823 ice_and_bitmap(params->ptypes, params->ptypes,
824 src, ICE_FLOW_PTYPE_MAX);
825 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
826 src = (const ice_bitmap_t *)ice_ptypes_ah;
827 ice_and_bitmap(params->ptypes, params->ptypes,
828 src, ICE_FLOW_PTYPE_MAX);
829 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
830 src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
831 ice_and_bitmap(params->ptypes, params->ptypes,
832 src, ICE_FLOW_PTYPE_MAX);
835 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
836 if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
838 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
841 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
843 ice_and_bitmap(params->ptypes, params->ptypes,
844 src, ICE_FLOW_PTYPE_MAX);
846 src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
847 ice_andnot_bitmap(params->ptypes, params->ptypes,
848 src, ICE_FLOW_PTYPE_MAX);
850 src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
851 ice_andnot_bitmap(params->ptypes, params->ptypes,
852 src, ICE_FLOW_PTYPE_MAX);
860 * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
861 * @hw: pointer to the HW struct
862 * @params: information about the flow to be processed
863 * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
865 * This function will allocate an extraction sequence entries for a DWORD size
866 * chunk of the packet flags.
868 static enum ice_status
869 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
870 struct ice_flow_prof_params *params,
871 enum ice_flex_mdid_pkt_flags flags)
873 u8 fv_words = hw->blk[params->blk].es.fvw;
876 /* Make sure the number of extraction sequence entries required does not
877 * exceed the block's capacity.
879 if (params->es_cnt >= fv_words)
880 return ICE_ERR_MAX_LIMIT;
882 /* some blocks require a reversed field vector layout */
883 if (hw->blk[params->blk].es.reverse)
884 idx = fv_words - params->es_cnt - 1;
886 idx = params->es_cnt;
888 params->es[idx].prot_id = ICE_PROT_META_ID;
889 params->es[idx].off = flags;
896 * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
897 * @hw: pointer to the HW struct
898 * @params: information about the flow to be processed
899 * @seg: packet segment index of the field to be extracted
900 * @fld: ID of field to be extracted
901 * @match: bitfield of all fields
903 * This function determines the protocol ID, offset, and size of the given
904 * field. It then allocates one or more extraction sequence entries for the
905 * given field, and fill the entries with protocol ID and offset information.
907 static enum ice_status
908 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
909 u8 seg, enum ice_flow_field fld, u64 match)
911 enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
912 enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
913 u8 fv_words = hw->blk[params->blk].es.fvw;
914 struct ice_flow_fld_info *flds;
915 u16 cnt, ese_bits, i;
921 flds = params->prof->segs[seg].fields;
924 case ICE_FLOW_FIELD_IDX_ETH_DA:
925 case ICE_FLOW_FIELD_IDX_ETH_SA:
926 case ICE_FLOW_FIELD_IDX_S_VLAN:
927 case ICE_FLOW_FIELD_IDX_C_VLAN:
928 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
930 case ICE_FLOW_FIELD_IDX_ETH_TYPE:
931 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
933 case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
934 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
936 case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
937 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
939 case ICE_FLOW_FIELD_IDX_IPV4_TTL:
940 case ICE_FLOW_FIELD_IDX_IPV4_PROT:
941 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
943 /* TTL and PROT share the same extraction seq. entry.
944 * Each is considered a sibling to the other in terms of sharing
945 * the same extraction sequence entry.
947 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
948 sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
949 else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
950 sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
952 /* If the sibling field is also included, that field's
953 * mask needs to be included.
955 if (match & BIT(sib))
956 sib_mask = ice_flds_info[sib].mask;
958 case ICE_FLOW_FIELD_IDX_IPV6_TTL:
959 case ICE_FLOW_FIELD_IDX_IPV6_PROT:
960 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
962 /* TTL and PROT share the same extraction seq. entry.
963 * Each is considered a sibling to the other in terms of sharing
964 * the same extraction sequence entry.
966 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
967 sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
968 else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
969 sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
971 /* If the sibling field is also included, that field's
972 * mask needs to be included.
974 if (match & BIT(sib))
975 sib_mask = ice_flds_info[sib].mask;
977 case ICE_FLOW_FIELD_IDX_IPV4_SA:
978 case ICE_FLOW_FIELD_IDX_IPV4_DA:
979 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
981 case ICE_FLOW_FIELD_IDX_IPV6_SA:
982 case ICE_FLOW_FIELD_IDX_IPV6_DA:
983 case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
984 case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
985 case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
986 case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
987 case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
988 case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
989 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
991 case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
992 case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
993 case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
994 prot_id = ICE_PROT_TCP_IL;
996 case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
997 case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
998 prot_id = ICE_PROT_UDP_IL_OR_S;
1000 case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
1001 case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
1002 prot_id = ICE_PROT_SCTP_IL;
1004 case ICE_FLOW_FIELD_IDX_GTPC_TEID:
1005 case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
1006 case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
1007 case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
1008 case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
1009 case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
1010 /* GTP is accessed through UDP OF protocol */
1011 prot_id = ICE_PROT_UDP_OF;
1013 case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
1014 prot_id = ICE_PROT_PPPOE;
1016 case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1017 prot_id = ICE_PROT_UDP_IL_OR_S;
1019 case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1020 prot_id = ICE_PROT_L2TPV3;
1022 case ICE_FLOW_FIELD_IDX_ESP_SPI:
1023 prot_id = ICE_PROT_ESP_F;
1025 case ICE_FLOW_FIELD_IDX_AH_SPI:
1026 prot_id = ICE_PROT_ESP_2;
1028 case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1029 prot_id = ICE_PROT_UDP_IL_OR_S;
1031 case ICE_FLOW_FIELD_IDX_ARP_SIP:
1032 case ICE_FLOW_FIELD_IDX_ARP_DIP:
1033 case ICE_FLOW_FIELD_IDX_ARP_SHA:
1034 case ICE_FLOW_FIELD_IDX_ARP_DHA:
1035 case ICE_FLOW_FIELD_IDX_ARP_OP:
1036 prot_id = ICE_PROT_ARP_OF;
1038 case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1039 case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1040 /* ICMP type and code share the same extraction seq. entry */
1041 prot_id = (params->prof->segs[seg].hdrs &
1042 ICE_FLOW_SEG_HDR_IPV4) ?
1043 ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1044 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1045 ICE_FLOW_FIELD_IDX_ICMP_CODE :
1046 ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1048 case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1049 prot_id = ICE_PROT_GRE_OF;
1052 return ICE_ERR_NOT_IMPL;
1055 /* Each extraction sequence entry is a word in size, and extracts a
1056 * word-aligned offset from a protocol header.
1058 ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1060 flds[fld].xtrct.prot_id = prot_id;
1061 flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1062 ICE_FLOW_FV_EXTRACT_SZ;
1063 flds[fld].xtrct.disp = (u8)((ice_flds_info[fld].off + adj) % ese_bits);
1064 flds[fld].xtrct.idx = params->es_cnt;
1065 flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1067 /* Adjust the next field-entry index after accommodating the number of
1068 * entries this field consumes
1070 cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1071 ice_flds_info[fld].size, ese_bits);
1073 /* Fill in the extraction sequence entries needed for this field */
1074 off = flds[fld].xtrct.off;
1075 mask = flds[fld].xtrct.mask;
1076 for (i = 0; i < cnt; i++) {
1077 /* Only consume an extraction sequence entry if there is no
1078 * sibling field associated with this field or the sibling entry
1079 * already extracts the word shared with this field.
1081 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1082 flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1083 flds[sib].xtrct.off != off) {
1086 /* Make sure the number of extraction sequence required
1087 * does not exceed the block's capability
1089 if (params->es_cnt >= fv_words)
1090 return ICE_ERR_MAX_LIMIT;
1092 /* some blocks require a reversed field vector layout */
1093 if (hw->blk[params->blk].es.reverse)
1094 idx = fv_words - params->es_cnt - 1;
1096 idx = params->es_cnt;
1098 params->es[idx].prot_id = prot_id;
1099 params->es[idx].off = off;
1100 params->mask[idx] = mask | sib_mask;
1104 off += ICE_FLOW_FV_EXTRACT_SZ;
1111 * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1112 * @hw: pointer to the HW struct
1113 * @params: information about the flow to be processed
1114 * @seg: index of packet segment whose raw fields are to be be extracted
1116 static enum ice_status
1117 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1124 if (!params->prof->segs[seg].raws_cnt)
1127 if (params->prof->segs[seg].raws_cnt >
1128 ARRAY_SIZE(params->prof->segs[seg].raws))
1129 return ICE_ERR_MAX_LIMIT;
1131 /* Offsets within the segment headers are not supported */
1132 hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1134 return ICE_ERR_PARAM;
1136 fv_words = hw->blk[params->blk].es.fvw;
1138 for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1139 struct ice_flow_seg_fld_raw *raw;
1142 raw = ¶ms->prof->segs[seg].raws[i];
1144 /* Storing extraction information */
1145 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1146 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1147 ICE_FLOW_FV_EXTRACT_SZ;
1148 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1150 raw->info.xtrct.idx = params->es_cnt;
1152 /* Determine the number of field vector entries this raw field
1155 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1156 (raw->info.src.last * BITS_PER_BYTE),
1157 (ICE_FLOW_FV_EXTRACT_SZ *
1159 off = raw->info.xtrct.off;
1160 for (j = 0; j < cnt; j++) {
1163 /* Make sure the number of extraction sequence required
1164 * does not exceed the block's capability
1166 if (params->es_cnt >= hw->blk[params->blk].es.count ||
1167 params->es_cnt >= ICE_MAX_FV_WORDS)
1168 return ICE_ERR_MAX_LIMIT;
1170 /* some blocks require a reversed field vector layout */
1171 if (hw->blk[params->blk].es.reverse)
1172 idx = fv_words - params->es_cnt - 1;
1174 idx = params->es_cnt;
1176 params->es[idx].prot_id = raw->info.xtrct.prot_id;
1177 params->es[idx].off = off;
1179 off += ICE_FLOW_FV_EXTRACT_SZ;
1187 * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1188 * @hw: pointer to the HW struct
1189 * @params: information about the flow to be processed
1191 * This function iterates through all matched fields in the given segments, and
1192 * creates an extraction sequence for the fields.
1194 static enum ice_status
1195 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1196 struct ice_flow_prof_params *params)
1198 enum ice_status status = ICE_SUCCESS;
1201 /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1204 if (params->blk == ICE_BLK_ACL) {
1205 status = ice_flow_xtract_pkt_flags(hw, params,
1206 ICE_RX_MDID_PKT_FLAGS_15_0);
1211 for (i = 0; i < params->prof->segs_cnt; i++) {
1212 u64 match = params->prof->segs[i].match;
1213 enum ice_flow_field j;
1215 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1216 const u64 bit = BIT_ULL(j);
1219 status = ice_flow_xtract_fld(hw, params, i, j,
1227 /* Process raw matching bytes */
1228 status = ice_flow_xtract_raws(hw, params, i);
1237 * ice_flow_sel_acl_scen - returns the specific scenario
1238 * @hw: pointer to the hardware structure
1239 * @params: information about the flow to be processed
1241 * This function will return the specific scenario based on the
1242 * params passed to it
1244 static enum ice_status
1245 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1247 /* Find the best-fit scenario for the provided match width */
1248 struct ice_acl_scen *cand_scen = NULL, *scen;
1251 return ICE_ERR_DOES_NOT_EXIST;
1253 /* Loop through each scenario and match against the scenario width
1254 * to select the specific scenario
1256 LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1257 if (scen->eff_width >= params->entry_length &&
1258 (!cand_scen || cand_scen->eff_width > scen->eff_width))
1261 return ICE_ERR_DOES_NOT_EXIST;
1263 params->prof->cfg.scen = cand_scen;
1269 * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1270 * @params: information about the flow to be processed
1272 static enum ice_status
1273 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1275 u16 index, i, range_idx = 0;
1277 index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1279 for (i = 0; i < params->prof->segs_cnt; i++) {
1280 struct ice_flow_seg_info *seg = ¶ms->prof->segs[i];
1281 u64 match = seg->match;
1284 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1285 struct ice_flow_fld_info *fld;
1286 const u64 bit = BIT_ULL(j);
1291 fld = &seg->fields[j];
1292 fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1294 if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1295 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1297 /* Range checking only supported for single
1300 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1302 BITS_PER_BYTE * 2) > 1)
1303 return ICE_ERR_PARAM;
1305 /* Ranges must define low and high values */
1306 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1307 fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1308 return ICE_ERR_PARAM;
1310 fld->entry.val = range_idx++;
1312 /* Store adjusted byte-length of field for later
1313 * use, taking into account potential
1314 * non-byte-aligned displacement
1316 fld->entry.last = DIVIDE_AND_ROUND_UP
1317 (ice_flds_info[j].size +
1318 (fld->xtrct.disp % BITS_PER_BYTE),
1320 fld->entry.val = index;
1321 index += fld->entry.last;
1327 for (j = 0; j < seg->raws_cnt; j++) {
1328 struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1330 raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1331 raw->info.entry.val = index;
1332 raw->info.entry.last = raw->info.src.last;
1333 index += raw->info.entry.last;
1337 /* Currently only support using the byte selection base, which only
1338 * allows for an effective entry size of 30 bytes. Reject anything
1341 if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1342 return ICE_ERR_PARAM;
1344 /* Only 8 range checkers per profile, reject anything trying to use
1347 if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1348 return ICE_ERR_PARAM;
1350 /* Store # bytes required for entry for later use */
1351 params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1357 * ice_flow_proc_segs - process all packet segments associated with a profile
1358 * @hw: pointer to the HW struct
1359 * @params: information about the flow to be processed
1361 static enum ice_status
1362 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1364 enum ice_status status;
1366 status = ice_flow_proc_seg_hdrs(params);
1370 status = ice_flow_create_xtrct_seq(hw, params);
1374 switch (params->blk) {
1377 status = ICE_SUCCESS;
1380 status = ice_flow_acl_def_entry_frmt(params);
1383 status = ice_flow_sel_acl_scen(hw, params);
1388 return ICE_ERR_NOT_IMPL;
1394 #define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001
1395 #define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002
1396 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004
1399 * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1400 * @hw: pointer to the HW struct
1401 * @blk: classification stage
1402 * @dir: flow direction
1403 * @segs: array of one or more packet segments that describe the flow
1404 * @segs_cnt: number of packet segments provided
1405 * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1406 * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1408 static struct ice_flow_prof *
1409 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1410 enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1411 u8 segs_cnt, u16 vsi_handle, u32 conds)
1413 struct ice_flow_prof *p, *prof = NULL;
1415 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1416 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1417 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1418 segs_cnt && segs_cnt == p->segs_cnt) {
1421 /* Check for profile-VSI association if specified */
1422 if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1423 ice_is_vsi_valid(hw, vsi_handle) &&
1424 !ice_is_bit_set(p->vsis, vsi_handle))
1427 /* Protocol headers must be checked. Matched fields are
1428 * checked if specified.
1430 for (i = 0; i < segs_cnt; i++)
1431 if (segs[i].hdrs != p->segs[i].hdrs ||
1432 ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1433 segs[i].match != p->segs[i].match))
1436 /* A match is found if all segments are matched */
1437 if (i == segs_cnt) {
1442 ice_release_lock(&hw->fl_profs_locks[blk]);
1448 * ice_flow_find_prof - Look up a profile matching headers and matched fields
1449 * @hw: pointer to the HW struct
1450 * @blk: classification stage
1451 * @dir: flow direction
1452 * @segs: array of one or more packet segments that describe the flow
1453 * @segs_cnt: number of packet segments provided
1456 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1457 struct ice_flow_seg_info *segs, u8 segs_cnt)
1459 struct ice_flow_prof *p;
1461 p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1462 ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1464 return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1468 * ice_flow_find_prof_id - Look up a profile with given profile ID
1469 * @hw: pointer to the HW struct
1470 * @blk: classification stage
1471 * @prof_id: unique ID to identify this flow profile
1473 static struct ice_flow_prof *
1474 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1476 struct ice_flow_prof *p;
1478 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1479 if (p->id == prof_id)
1486 * ice_dealloc_flow_entry - Deallocate flow entry memory
1487 * @hw: pointer to the HW struct
1488 * @entry: flow entry to be removed
1491 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1497 ice_free(hw, entry->entry);
1499 if (entry->range_buf) {
1500 ice_free(hw, entry->range_buf);
1501 entry->range_buf = NULL;
1505 ice_free(hw, entry->acts);
1507 entry->acts_cnt = 0;
1510 ice_free(hw, entry);
1513 #define ICE_ACL_INVALID_SCEN 0x3f
1516 * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
1517 * @hw: pointer to the hardware structure
1518 * @prof: pointer to flow profile
1519 * @buf: destination buffer function writes partial extraction sequence to
1521 * returns ICE_SUCCESS if no PF is associated to the given profile
1522 * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
1523 * returns other error code for real error
1525 static enum ice_status
1526 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1527 struct ice_aqc_acl_prof_generic_frmt *buf)
1529 enum ice_status status;
1532 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1536 status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1540 /* If all PF's associated scenarios are all 0 or all
1541 * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1542 * not been configured yet.
1544 if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1545 buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1546 buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1547 buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1550 if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1551 buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1552 buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1553 buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1554 buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1555 buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1556 buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1557 buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1560 return ICE_ERR_IN_USE;
1564 * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
1565 * @hw: pointer to the hardware structure
1566 * @acts: array of actions to be performed on a match
1567 * @acts_cnt: number of actions
1569 static enum ice_status
1570 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1575 for (i = 0; i < acts_cnt; i++) {
1576 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1577 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1578 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1579 struct ice_acl_cntrs cntrs;
1580 enum ice_status status;
1582 cntrs.bank = 0; /* Only bank0 for the moment */
1584 LE16_TO_CPU(acts[i].data.acl_act.value);
1586 LE16_TO_CPU(acts[i].data.acl_act.value);
1588 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1589 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1591 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1593 status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1602 * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
1603 * @hw: pointer to the hardware structure
1604 * @prof: pointer to flow profile
1606 * Disassociate the scenario from the profile for the PF of the VSI.
1608 static enum ice_status
1609 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1611 struct ice_aqc_acl_prof_generic_frmt buf;
1612 enum ice_status status = ICE_SUCCESS;
1615 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1617 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1621 status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1625 /* Clear scenario for this PF */
1626 buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1627 status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1633 * ice_flow_rem_entry_sync - Remove a flow entry
1634 * @hw: pointer to the HW struct
1635 * @blk: classification stage
1636 * @entry: flow entry to be removed
1638 static enum ice_status
1639 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1640 struct ice_flow_entry *entry)
1643 return ICE_ERR_BAD_PTR;
1645 if (blk == ICE_BLK_ACL) {
1646 enum ice_status status;
1649 return ICE_ERR_BAD_PTR;
1651 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1652 entry->scen_entry_idx);
1656 /* Checks if we need to release an ACL counter. */
1657 if (entry->acts_cnt && entry->acts)
1658 ice_flow_acl_free_act_cntr(hw, entry->acts,
1662 LIST_DEL(&entry->l_entry);
1664 ice_dealloc_flow_entry(hw, entry);
1670 * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1671 * @hw: pointer to the HW struct
1672 * @blk: classification stage
1673 * @dir: flow direction
1674 * @prof_id: unique ID to identify this flow profile
1675 * @segs: array of one or more packet segments that describe the flow
1676 * @segs_cnt: number of packet segments provided
1677 * @acts: array of default actions
1678 * @acts_cnt: number of default actions
1679 * @prof: stores the returned flow profile added
1681 * Assumption: the caller has acquired the lock to the profile list
1683 static enum ice_status
1684 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1685 enum ice_flow_dir dir, u64 prof_id,
1686 struct ice_flow_seg_info *segs, u8 segs_cnt,
1687 struct ice_flow_action *acts, u8 acts_cnt,
1688 struct ice_flow_prof **prof)
1690 struct ice_flow_prof_params params;
1691 enum ice_status status;
1694 if (!prof || (acts_cnt && !acts))
1695 return ICE_ERR_BAD_PTR;
1697 ice_memset(¶ms, 0, sizeof(params), ICE_NONDMA_MEM);
1698 params.prof = (struct ice_flow_prof *)
1699 ice_malloc(hw, sizeof(*params.prof));
1701 return ICE_ERR_NO_MEMORY;
1703 /* initialize extraction sequence to all invalid (0xff) */
1704 for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1705 params.es[i].prot_id = ICE_PROT_INVALID;
1706 params.es[i].off = ICE_FV_OFFSET_INVAL;
1710 params.prof->id = prof_id;
1711 params.prof->dir = dir;
1712 params.prof->segs_cnt = segs_cnt;
1714 /* Make a copy of the segments that need to be persistent in the flow
1717 for (i = 0; i < segs_cnt; i++)
1718 ice_memcpy(¶ms.prof->segs[i], &segs[i], sizeof(*segs),
1719 ICE_NONDMA_TO_NONDMA);
1721 /* Make a copy of the actions that need to be persistent in the flow
1725 params.prof->acts = (struct ice_flow_action *)
1726 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
1727 ICE_NONDMA_TO_NONDMA);
1729 if (!params.prof->acts) {
1730 status = ICE_ERR_NO_MEMORY;
1735 status = ice_flow_proc_segs(hw, ¶ms);
1737 ice_debug(hw, ICE_DBG_FLOW,
1738 "Error processing a flow's packet segments\n");
1742 /* Add a HW profile for this flow profile */
1743 status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes,
1744 params.attr, params.attr_cnt, params.es,
1747 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
1751 INIT_LIST_HEAD(¶ms.prof->entries);
1752 ice_init_lock(¶ms.prof->entries_lock);
1753 *prof = params.prof;
1757 if (params.prof->acts)
1758 ice_free(hw, params.prof->acts);
1759 ice_free(hw, params.prof);
1766 * ice_flow_rem_prof_sync - remove a flow profile
1767 * @hw: pointer to the hardware structure
1768 * @blk: classification stage
1769 * @prof: pointer to flow profile to remove
1771 * Assumption: the caller has acquired the lock to the profile list
1773 static enum ice_status
1774 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
1775 struct ice_flow_prof *prof)
1777 enum ice_status status;
1779 /* Remove all remaining flow entries before removing the flow profile */
1780 if (!LIST_EMPTY(&prof->entries)) {
1781 struct ice_flow_entry *e, *t;
1783 ice_acquire_lock(&prof->entries_lock);
1785 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
1787 status = ice_flow_rem_entry_sync(hw, blk, e);
1792 ice_release_lock(&prof->entries_lock);
1795 if (blk == ICE_BLK_ACL) {
1796 struct ice_aqc_acl_profile_ranges query_rng_buf;
1797 struct ice_aqc_acl_prof_generic_frmt buf;
1800 /* Disassociate the scenario from the profile for the PF */
1801 status = ice_flow_acl_disassoc_scen(hw, prof);
1805 /* Clear the range-checker if the profile ID is no longer
1808 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1809 if (status && status != ICE_ERR_IN_USE) {
1811 } else if (!status) {
1812 /* Clear the range-checker value for profile ID */
1813 ice_memset(&query_rng_buf, 0,
1814 sizeof(struct ice_aqc_acl_profile_ranges),
1817 status = ice_flow_get_hw_prof(hw, blk, prof->id,
1822 status = ice_prog_acl_prof_ranges(hw, prof_id,
1823 &query_rng_buf, NULL);
1829 /* Remove all hardware profiles associated with this flow profile */
1830 status = ice_rem_prof(hw, blk, prof->id);
1832 LIST_DEL(&prof->l_entry);
1833 ice_destroy_lock(&prof->entries_lock);
1835 ice_free(hw, prof->acts);
1843 * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
1844 * @buf: Destination buffer function writes partial xtrct sequence to
1845 * @info: Info about field
1848 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
1849 struct ice_flow_fld_info *info)
1854 src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
1855 info->xtrct.disp / BITS_PER_BYTE;
1856 dst = info->entry.val;
1857 for (i = 0; i < info->entry.last; i++)
1858 /* HW stores field vector words in LE, convert words back to BE
1859 * so constructed entries will end up in network order
1861 buf->byte_selection[dst++] = src++ ^ 1;
1865 * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
1866 * @hw: pointer to the hardware structure
1867 * @prof: pointer to flow profile
1869 static enum ice_status
1870 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
1872 struct ice_aqc_acl_prof_generic_frmt buf;
1873 struct ice_flow_fld_info *info;
1874 enum ice_status status;
1878 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1880 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1884 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1885 if (status && status != ICE_ERR_IN_USE)
1889 /* Program the profile dependent configuration. This is done
1890 * only once regardless of the number of PFs using that profile
1892 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1894 for (i = 0; i < prof->segs_cnt; i++) {
1895 struct ice_flow_seg_info *seg = &prof->segs[i];
1896 u64 match = seg->match;
1899 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1900 const u64 bit = BIT_ULL(j);
1905 info = &seg->fields[j];
1907 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
1908 buf.word_selection[info->entry.val] =
1911 ice_flow_acl_set_xtrct_seq_fld(&buf,
1917 for (j = 0; j < seg->raws_cnt; j++) {
1918 info = &seg->raws[j].info;
1919 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
1923 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
1924 ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
1928 /* Update the current PF */
1929 buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
1930 status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1936 * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
1937 * @hw: pointer to the hardware structure
1938 * @blk: classification stage
1939 * @vsi_handle: software VSI handle
1940 * @vsig: target VSI group
1942 * Assumption: the caller has already verified that the VSI to
1943 * be added has the same characteristics as the VSIG and will
1944 * thereby have access to all resources added to that VSIG.
1947 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
1950 enum ice_status status;
1952 if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
1953 return ICE_ERR_PARAM;
1955 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1956 status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
1958 ice_release_lock(&hw->fl_profs_locks[blk]);
1964 * ice_flow_assoc_prof - associate a VSI with a flow profile
1965 * @hw: pointer to the hardware structure
1966 * @blk: classification stage
1967 * @prof: pointer to flow profile
1968 * @vsi_handle: software VSI handle
1970 * Assumption: the caller has acquired the lock to the profile list
1971 * and the software VSI handle has been validated
1973 static enum ice_status
1974 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
1975 struct ice_flow_prof *prof, u16 vsi_handle)
1977 enum ice_status status = ICE_SUCCESS;
1979 if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
1980 if (blk == ICE_BLK_ACL) {
1981 status = ice_flow_acl_set_xtrct_seq(hw, prof);
1985 status = ice_add_prof_id_flow(hw, blk,
1986 ice_get_hw_vsi_num(hw,
1990 ice_set_bit(vsi_handle, prof->vsis);
1992 ice_debug(hw, ICE_DBG_FLOW,
1993 "HW profile add failed, %d\n",
2001 * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
2002 * @hw: pointer to the hardware structure
2003 * @blk: classification stage
2004 * @prof: pointer to flow profile
2005 * @vsi_handle: software VSI handle
2007 * Assumption: the caller has acquired the lock to the profile list
2008 * and the software VSI handle has been validated
2010 static enum ice_status
2011 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
2012 struct ice_flow_prof *prof, u16 vsi_handle)
2014 enum ice_status status = ICE_SUCCESS;
2016 if (ice_is_bit_set(prof->vsis, vsi_handle)) {
2017 status = ice_rem_prof_id_flow(hw, blk,
2018 ice_get_hw_vsi_num(hw,
2022 ice_clear_bit(vsi_handle, prof->vsis);
2024 ice_debug(hw, ICE_DBG_FLOW,
2025 "HW profile remove failed, %d\n",
2033 * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2034 * @hw: pointer to the HW struct
2035 * @blk: classification stage
2036 * @dir: flow direction
2037 * @prof_id: unique ID to identify this flow profile
2038 * @segs: array of one or more packet segments that describe the flow
2039 * @segs_cnt: number of packet segments provided
2040 * @acts: array of default actions
2041 * @acts_cnt: number of default actions
2042 * @prof: stores the returned flow profile added
2045 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2046 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2047 struct ice_flow_action *acts, u8 acts_cnt,
2048 struct ice_flow_prof **prof)
2050 enum ice_status status;
2052 if (segs_cnt > ICE_FLOW_SEG_MAX)
2053 return ICE_ERR_MAX_LIMIT;
2056 return ICE_ERR_PARAM;
2059 return ICE_ERR_BAD_PTR;
2061 status = ice_flow_val_hdrs(segs, segs_cnt);
2065 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2067 status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2068 acts, acts_cnt, prof);
2070 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2072 ice_release_lock(&hw->fl_profs_locks[blk]);
2078 * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2079 * @hw: pointer to the HW struct
2080 * @blk: the block for which the flow profile is to be removed
2081 * @prof_id: unique ID of the flow profile to be removed
2084 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2086 struct ice_flow_prof *prof;
2087 enum ice_status status;
2089 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2091 prof = ice_flow_find_prof_id(hw, blk, prof_id);
2093 status = ICE_ERR_DOES_NOT_EXIST;
2097 /* prof becomes invalid after the call */
2098 status = ice_flow_rem_prof_sync(hw, blk, prof);
2101 ice_release_lock(&hw->fl_profs_locks[blk]);
2107 * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
2108 * @hw: pointer to the HW struct
2109 * @blk: classification stage
2110 * @prof_id: the profile ID handle
2111 * @hw_prof_id: pointer to variable to receive the HW profile ID
2114 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2117 struct ice_prof_map *map;
2119 map = ice_search_prof_id(hw, blk, prof_id);
2121 *hw_prof_id = map->prof_id;
2125 return ICE_ERR_DOES_NOT_EXIST;
2129 * ice_flow_find_entry - look for a flow entry using its unique ID
2130 * @hw: pointer to the HW struct
2131 * @blk: classification stage
2132 * @entry_id: unique ID to identify this flow entry
2134 * This function looks for the flow entry with the specified unique ID in all
2135 * flow profiles of the specified classification stage. If the entry is found,
2136 * and it returns the handle to the flow entry. Otherwise, it returns
2137 * ICE_FLOW_ENTRY_ID_INVAL.
2139 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2141 struct ice_flow_entry *found = NULL;
2142 struct ice_flow_prof *p;
2144 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2146 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2147 struct ice_flow_entry *e;
2149 ice_acquire_lock(&p->entries_lock);
2150 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2151 if (e->id == entry_id) {
2155 ice_release_lock(&p->entries_lock);
2161 ice_release_lock(&hw->fl_profs_locks[blk]);
2163 return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2167 * ice_flow_acl_check_actions - Checks the ACL rule's actions
2168 * @hw: pointer to the hardware structure
2169 * @acts: array of actions to be performed on a match
2170 * @acts_cnt: number of actions
2171 * @cnt_alloc: indicates if an ACL counter has been allocated.
2173 static enum ice_status
2174 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2175 u8 acts_cnt, bool *cnt_alloc)
2177 ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2180 ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2183 if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2184 return ICE_ERR_OUT_OF_RANGE;
2186 for (i = 0; i < acts_cnt; i++) {
2187 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2188 acts[i].type != ICE_FLOW_ACT_DROP &&
2189 acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2190 acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2193 /* If the caller want to add two actions of the same type, then
2194 * it is considered invalid configuration.
2196 if (ice_test_and_set_bit(acts[i].type, dup_check))
2197 return ICE_ERR_PARAM;
2200 /* Checks if ACL counters are needed. */
2201 for (i = 0; i < acts_cnt; i++) {
2202 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2203 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2204 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2205 struct ice_acl_cntrs cntrs;
2206 enum ice_status status;
2209 cntrs.bank = 0; /* Only bank0 for the moment */
2211 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2212 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2214 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2216 status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2219 /* Counter index within the bank */
2220 acts[i].data.acl_act.value =
2221 CPU_TO_LE16(cntrs.first_cntr);
2230 * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2231 * @fld: number of the given field
2232 * @info: info about field
2233 * @range_buf: range checker configuration buffer
2234 * @data: pointer to a data buffer containing flow entry's match values/masks
2235 * @range: Input/output param indicating which range checkers are being used
2238 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2239 struct ice_aqc_acl_profile_ranges *range_buf,
2240 u8 *data, u8 *range)
2244 /* If not specified, default mask is all bits in field */
2245 new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2246 BIT(ice_flds_info[fld].size) - 1 :
2247 (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2249 /* If the mask is 0, then we don't need to worry about this input
2250 * range checker value.
2254 (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2256 (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2257 u8 range_idx = info->entry.val;
2259 range_buf->checker_cfg[range_idx].low_boundary =
2260 CPU_TO_BE16(new_low);
2261 range_buf->checker_cfg[range_idx].high_boundary =
2262 CPU_TO_BE16(new_high);
2263 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2265 /* Indicate which range checker is being used */
2266 *range |= BIT(range_idx);
2271 * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2272 * @fld: number of the given field
2273 * @info: info about the field
2274 * @buf: buffer containing the entry
2275 * @dontcare: buffer containing don't care mask for entry
2276 * @data: pointer to a data buffer containing flow entry's match values/masks
2279 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2280 u8 *dontcare, u8 *data)
2282 u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2283 bool use_mask = false;
2286 src = info->src.val;
2287 mask = info->src.mask;
2288 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2289 disp = info->xtrct.disp % BITS_PER_BYTE;
2291 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2294 for (k = 0; k < info->entry.last; k++, dst++) {
2295 /* Add overflow bits from previous byte */
2296 buf[dst] = (tmp_s & 0xff00) >> 8;
2298 /* If mask is not valid, tmp_m is always zero, so just setting
2299 * dontcare to 0 (no masked bits). If mask is valid, pulls in
2300 * overflow bits of mask from prev byte
2302 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2304 /* If there is displacement, last byte will only contain
2305 * displaced data, but there is no more data to read from user
2306 * buffer, so skip so as not to potentially read beyond end of
2309 if (!disp || k < info->entry.last - 1) {
2310 /* Store shifted data to use in next byte */
2311 tmp_s = data[src++] << disp;
2313 /* Add current (shifted) byte */
2314 buf[dst] |= tmp_s & 0xff;
2316 /* Handle mask if valid */
2318 tmp_m = (~data[mask++] & 0xff) << disp;
2319 dontcare[dst] |= tmp_m & 0xff;
2324 /* Fill in don't care bits at beginning of field */
2326 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2327 for (k = 0; k < disp; k++)
2328 dontcare[dst] |= BIT(k);
2331 end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2333 /* Fill in don't care bits at end of field */
2335 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2336 info->entry.last - 1;
2337 for (k = end_disp; k < BITS_PER_BYTE; k++)
2338 dontcare[dst] |= BIT(k);
2343 * ice_flow_acl_frmt_entry - Format ACL entry
2344 * @hw: pointer to the hardware structure
2345 * @prof: pointer to flow profile
2346 * @e: pointer to the flow entry
2347 * @data: pointer to a data buffer containing flow entry's match values/masks
2348 * @acts: array of actions to be performed on a match
2349 * @acts_cnt: number of actions
2351 * Formats the key (and key_inverse) to be matched from the data passed in,
2352 * along with data from the flow profile. This key/key_inverse pair makes up
2353 * the 'entry' for an ACL flow entry.
2355 static enum ice_status
2356 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2357 struct ice_flow_entry *e, u8 *data,
2358 struct ice_flow_action *acts, u8 acts_cnt)
2360 u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2361 struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2362 enum ice_status status;
2367 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2371 /* Format the result action */
2373 status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2377 status = ICE_ERR_NO_MEMORY;
2379 e->acts = (struct ice_flow_action *)
2380 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2381 ICE_NONDMA_TO_NONDMA);
2386 e->acts_cnt = acts_cnt;
2388 /* Format the matching data */
2389 buf_sz = prof->cfg.scen->width;
2390 buf = (u8 *)ice_malloc(hw, buf_sz);
2394 dontcare = (u8 *)ice_malloc(hw, buf_sz);
2398 /* 'key' buffer will store both key and key_inverse, so must be twice
2401 key = (u8 *)ice_malloc(hw, buf_sz * 2);
2405 range_buf = (struct ice_aqc_acl_profile_ranges *)
2406 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2410 /* Set don't care mask to all 1's to start, will zero out used bytes */
2411 ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2413 for (i = 0; i < prof->segs_cnt; i++) {
2414 struct ice_flow_seg_info *seg = &prof->segs[i];
2415 u64 match = seg->match;
2418 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
2419 struct ice_flow_fld_info *info;
2420 const u64 bit = BIT_ULL(j);
2425 info = &seg->fields[j];
2427 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2428 ice_flow_acl_frmt_entry_range(j, info,
2432 ice_flow_acl_frmt_entry_fld(j, info, buf,
2438 for (j = 0; j < seg->raws_cnt; j++) {
2439 struct ice_flow_fld_info *info = &seg->raws[j].info;
2440 u16 dst, src, mask, k;
2441 bool use_mask = false;
2443 src = info->src.val;
2444 dst = info->entry.val -
2445 ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2446 mask = info->src.mask;
2448 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2451 for (k = 0; k < info->entry.last; k++, dst++) {
2452 buf[dst] = data[src++];
2454 dontcare[dst] = ~data[mask++];
2461 buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2462 dontcare[prof->cfg.scen->pid_idx] = 0;
2464 /* Format the buffer for direction flags */
2465 dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2467 if (prof->dir == ICE_FLOW_RX)
2468 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2471 buf[prof->cfg.scen->rng_chk_idx] = range;
2472 /* Mark any unused range checkers as don't care */
2473 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2474 e->range_buf = range_buf;
2476 ice_free(hw, range_buf);
2479 status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2485 e->entry_sz = buf_sz * 2;
2492 ice_free(hw, dontcare);
2497 if (status && range_buf) {
2498 ice_free(hw, range_buf);
2499 e->range_buf = NULL;
2502 if (status && e->acts) {
2503 ice_free(hw, e->acts);
2508 if (status && cnt_alloc)
2509 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2515 * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2516 * the compared data.
2517 * @prof: pointer to flow profile
2518 * @e: pointer to the comparing flow entry
2519 * @do_chg_action: decide if we want to change the ACL action
2520 * @do_add_entry: decide if we want to add the new ACL entry
2521 * @do_rem_entry: decide if we want to remove the current ACL entry
2523 * Find an ACL scenario entry that matches the compared data. In the same time,
2524 * this function also figure out:
2525 * a/ If we want to change the ACL action
2526 * b/ If we want to add the new ACL entry
2527 * c/ If we want to remove the current ACL entry
2529 static struct ice_flow_entry *
2530 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2531 struct ice_flow_entry *e, bool *do_chg_action,
2532 bool *do_add_entry, bool *do_rem_entry)
2534 struct ice_flow_entry *p, *return_entry = NULL;
2538 * a/ There exists an entry with same matching data, but different
2539 * priority, then we remove this existing ACL entry. Then, we
2540 * will add the new entry to the ACL scenario.
2541 * b/ There exists an entry with same matching data, priority, and
2542 * result action, then we do nothing
2543 * c/ There exists an entry with same matching data, priority, but
2544 * different, action, then do only change the action's entry.
2545 * d/ Else, we add this new entry to the ACL scenario.
2547 *do_chg_action = false;
2548 *do_add_entry = true;
2549 *do_rem_entry = false;
2550 LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2551 if (memcmp(p->entry, e->entry, p->entry_sz))
2554 /* From this point, we have the same matching_data. */
2555 *do_add_entry = false;
2558 if (p->priority != e->priority) {
2559 /* matching data && !priority */
2560 *do_add_entry = true;
2561 *do_rem_entry = true;
2565 /* From this point, we will have matching_data && priority */
2566 if (p->acts_cnt != e->acts_cnt)
2567 *do_chg_action = true;
2568 for (i = 0; i < p->acts_cnt; i++) {
2569 bool found_not_match = false;
2571 for (j = 0; j < e->acts_cnt; j++)
2572 if (memcmp(&p->acts[i], &e->acts[j],
2573 sizeof(struct ice_flow_action))) {
2574 found_not_match = true;
2578 if (found_not_match) {
2579 *do_chg_action = true;
2584 /* (do_chg_action = true) means :
2585 * matching_data && priority && !result_action
2586 * (do_chg_action = false) means :
2587 * matching_data && priority && result_action
2592 return return_entry;
2596 * ice_flow_acl_convert_to_acl_prior - Convert to ACL priority
2599 static enum ice_acl_entry_prior
2600 ice_flow_acl_convert_to_acl_prior(enum ice_flow_priority p)
2602 enum ice_acl_entry_prior acl_prior;
2605 case ICE_FLOW_PRIO_LOW:
2606 acl_prior = ICE_LOW;
2608 case ICE_FLOW_PRIO_NORMAL:
2609 acl_prior = ICE_NORMAL;
2611 case ICE_FLOW_PRIO_HIGH:
2612 acl_prior = ICE_HIGH;
2615 acl_prior = ICE_NORMAL;
2623 * ice_flow_acl_union_rng_chk - Perform union operation between two
2624 * range-range checker buffers
2625 * @dst_buf: pointer to destination range checker buffer
2626 * @src_buf: pointer to source range checker buffer
2628 * For this function, we do the union between dst_buf and src_buf
2629 * range checker buffer, and we will save the result back to dst_buf
2631 static enum ice_status
2632 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2633 struct ice_aqc_acl_profile_ranges *src_buf)
2637 if (!dst_buf || !src_buf)
2638 return ICE_ERR_BAD_PTR;
2640 for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2641 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2642 bool will_populate = false;
2644 in_data = &src_buf->checker_cfg[i];
2649 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2650 cfg_data = &dst_buf->checker_cfg[j];
2652 if (!cfg_data->mask ||
2653 !memcmp(cfg_data, in_data,
2654 sizeof(struct ice_acl_rng_data))) {
2655 will_populate = true;
2660 if (will_populate) {
2661 ice_memcpy(cfg_data, in_data,
2662 sizeof(struct ice_acl_rng_data),
2663 ICE_NONDMA_TO_NONDMA);
2665 /* No available slot left to program range checker */
2666 return ICE_ERR_MAX_LIMIT;
2674 * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2675 * @hw: pointer to the hardware structure
2676 * @prof: pointer to flow profile
2677 * @entry: double pointer to the flow entry
2679 * For this function, we will look at the current added entries in the
2680 * corresponding ACL scenario. Then, we will perform matching logic to
2681 * see if we want to add/modify/do nothing with this new entry.
2683 static enum ice_status
2684 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2685 struct ice_flow_entry **entry)
2687 bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2688 struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2689 struct ice_acl_act_entry *acts = NULL;
2690 struct ice_flow_entry *exist;
2691 enum ice_status status = ICE_SUCCESS;
2692 struct ice_flow_entry *e;
2695 if (!entry || !(*entry) || !prof)
2696 return ICE_ERR_BAD_PTR;
2700 do_chg_rng_chk = false;
2704 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2709 /* Query the current range-checker value in FW */
2710 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2714 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2715 sizeof(struct ice_aqc_acl_profile_ranges),
2716 ICE_NONDMA_TO_NONDMA);
2718 /* Generate the new range-checker value */
2719 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2723 /* Reconfigure the range check if the buffer is changed. */
2724 do_chg_rng_chk = false;
2725 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2726 sizeof(struct ice_aqc_acl_profile_ranges))) {
2727 status = ice_prog_acl_prof_ranges(hw, prof_id,
2728 &cfg_rng_buf, NULL);
2732 do_chg_rng_chk = true;
2736 /* Figure out if we want to (change the ACL action) and/or
2737 * (Add the new ACL entry) and/or (Remove the current ACL entry)
2739 exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
2740 &do_add_entry, &do_rem_entry);
2743 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
2748 /* Prepare the result action buffer */
2749 acts = (struct ice_acl_act_entry *)ice_calloc
2750 (hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
2751 for (i = 0; i < e->acts_cnt; i++)
2752 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
2753 sizeof(struct ice_acl_act_entry),
2754 ICE_NONDMA_TO_NONDMA);
2757 enum ice_acl_entry_prior prior;
2761 keys = (u8 *)e->entry;
2762 inverts = keys + (e->entry_sz / 2);
2763 prior = ice_flow_acl_convert_to_acl_prior(e->priority);
2765 status = ice_acl_add_entry(hw, prof->cfg.scen, prior, keys,
2766 inverts, acts, e->acts_cnt,
2771 e->scen_entry_idx = entry_idx;
2772 LIST_ADD(&e->l_entry, &prof->entries);
2774 if (do_chg_action) {
2775 /* For the action memory info, update the SW's copy of
2776 * exist entry with e's action memory info
2778 ice_free(hw, exist->acts);
2779 exist->acts_cnt = e->acts_cnt;
2780 exist->acts = (struct ice_flow_action *)
2781 ice_calloc(hw, exist->acts_cnt,
2782 sizeof(struct ice_flow_action));
2785 status = ICE_ERR_NO_MEMORY;
2789 ice_memcpy(exist->acts, e->acts,
2790 sizeof(struct ice_flow_action) * e->acts_cnt,
2791 ICE_NONDMA_TO_NONDMA);
2793 status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
2795 exist->scen_entry_idx);
2800 if (do_chg_rng_chk) {
2801 /* In this case, we want to update the range checker
2802 * information of the exist entry
2804 status = ice_flow_acl_union_rng_chk(exist->range_buf,
2810 /* As we don't add the new entry to our SW DB, deallocate its
2811 * memories, and return the exist entry to the caller
2813 ice_dealloc_flow_entry(hw, e);
2824 * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
2825 * @hw: pointer to the hardware structure
2826 * @prof: pointer to flow profile
2827 * @e: double pointer to the flow entry
2829 static enum ice_status
2830 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2831 struct ice_flow_entry **e)
2833 enum ice_status status;
2835 ice_acquire_lock(&prof->entries_lock);
2836 status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
2837 ice_release_lock(&prof->entries_lock);
2843 * ice_flow_add_entry - Add a flow entry
2844 * @hw: pointer to the HW struct
2845 * @blk: classification stage
2846 * @prof_id: ID of the profile to add a new flow entry to
2847 * @entry_id: unique ID to identify this flow entry
2848 * @vsi_handle: software VSI handle for the flow entry
2849 * @prio: priority of the flow entry
2850 * @data: pointer to a data buffer containing flow entry's match values/masks
2851 * @acts: arrays of actions to be performed on a match
2852 * @acts_cnt: number of actions
2853 * @entry_h: pointer to buffer that receives the new flow entry's handle
2856 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2857 u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
2858 void *data, struct ice_flow_action *acts, u8 acts_cnt,
2861 struct ice_flow_entry *e = NULL;
2862 struct ice_flow_prof *prof;
2863 enum ice_status status = ICE_SUCCESS;
2865 /* ACL entries must indicate an action */
2866 if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
2867 return ICE_ERR_PARAM;
2869 /* No flow entry data is expected for RSS */
2870 if (!entry_h || (!data && blk != ICE_BLK_RSS))
2871 return ICE_ERR_BAD_PTR;
2873 if (!ice_is_vsi_valid(hw, vsi_handle))
2874 return ICE_ERR_PARAM;
2876 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2878 prof = ice_flow_find_prof_id(hw, blk, prof_id);
2880 status = ICE_ERR_DOES_NOT_EXIST;
2882 /* Allocate memory for the entry being added and associate
2883 * the VSI to the found flow profile
2885 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
2887 status = ICE_ERR_NO_MEMORY;
2889 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
2892 ice_release_lock(&hw->fl_profs_locks[blk]);
2897 e->vsi_handle = vsi_handle;
2906 /* ACL will handle the entry management */
2907 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
2912 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
2918 status = ICE_ERR_NOT_IMPL;
2922 if (blk != ICE_BLK_ACL) {
2923 /* ACL will handle the entry management */
2924 ice_acquire_lock(&prof->entries_lock);
2925 LIST_ADD(&e->l_entry, &prof->entries);
2926 ice_release_lock(&prof->entries_lock);
2929 *entry_h = ICE_FLOW_ENTRY_HNDL(e);
2934 ice_free(hw, e->entry);
2942 * ice_flow_rem_entry - Remove a flow entry
2943 * @hw: pointer to the HW struct
2944 * @blk: classification stage
2945 * @entry_h: handle to the flow entry to be removed
2947 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
2950 struct ice_flow_entry *entry;
2951 struct ice_flow_prof *prof;
2952 enum ice_status status = ICE_SUCCESS;
2954 if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
2955 return ICE_ERR_PARAM;
2957 entry = ICE_FLOW_ENTRY_PTR((unsigned long)entry_h);
2959 /* Retain the pointer to the flow profile as the entry will be freed */
2963 ice_acquire_lock(&prof->entries_lock);
2964 status = ice_flow_rem_entry_sync(hw, blk, entry);
2965 ice_release_lock(&prof->entries_lock);
2972 * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
2973 * @seg: packet segment the field being set belongs to
2974 * @fld: field to be set
2975 * @field_type: type of the field
2976 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
2977 * entry's input buffer
2978 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
2980 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
2981 * entry's input buffer
2983 * This helper function stores information of a field being matched, including
2984 * the type of the field and the locations of the value to match, the mask, and
2985 * and the upper-bound value in the start of the input buffer for a flow entry.
2986 * This function should only be used for fixed-size data structures.
2988 * This function also opportunistically determines the protocol headers to be
2989 * present based on the fields being set. Some fields cannot be used alone to
2990 * determine the protocol headers present. Sometimes, fields for particular
2991 * protocol headers are not matched. In those cases, the protocol headers
2992 * must be explicitly set.
2995 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
2996 enum ice_flow_fld_match_type field_type, u16 val_loc,
2997 u16 mask_loc, u16 last_loc)
2999 u64 bit = BIT_ULL(fld);
3002 if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
3005 seg->fields[fld].type = field_type;
3006 seg->fields[fld].src.val = val_loc;
3007 seg->fields[fld].src.mask = mask_loc;
3008 seg->fields[fld].src.last = last_loc;
3010 ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
3014 * ice_flow_set_fld - specifies locations of field from entry's input buffer
3015 * @seg: packet segment the field being set belongs to
3016 * @fld: field to be set
3017 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3018 * entry's input buffer
3019 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3021 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3022 * entry's input buffer
3023 * @range: indicate if field being matched is to be in a range
3025 * This function specifies the locations, in the form of byte offsets from the
3026 * start of the input buffer for a flow entry, from where the value to match,
3027 * the mask value, and upper value can be extracted. These locations are then
3028 * stored in the flow profile. When adding a flow entry associated with the
3029 * flow profile, these locations will be used to quickly extract the values and
3030 * create the content of a match entry. This function should only be used for
3031 * fixed-size data structures.
3034 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3035 u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3037 enum ice_flow_fld_match_type t = range ?
3038 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3040 ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3044 * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3045 * @seg: packet segment the field being set belongs to
3046 * @fld: field to be set
3047 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3048 * entry's input buffer
3049 * @pref_loc: location of prefix value from entry's input buffer
3050 * @pref_sz: size of the location holding the prefix value
3052 * This function specifies the locations, in the form of byte offsets from the
3053 * start of the input buffer for a flow entry, from where the value to match
3054 * and the IPv4 prefix value can be extracted. These locations are then stored
3055 * in the flow profile. When adding flow entries to the associated flow profile,
3056 * these locations can be used to quickly extract the values to create the
3057 * content of a match entry. This function should only be used for fixed-size
3061 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3062 u16 val_loc, u16 pref_loc, u8 pref_sz)
3064 /* For this type of field, the "mask" location is for the prefix value's
3065 * location and the "last" location is for the size of the location of
3068 ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3069 pref_loc, (u16)pref_sz);
3073 * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3074 * @seg: packet segment the field being set belongs to
3075 * @off: offset of the raw field from the beginning of the segment in bytes
3076 * @len: length of the raw pattern to be matched
3077 * @val_loc: location of the value to match from entry's input buffer
3078 * @mask_loc: location of mask value from entry's input buffer
3080 * This function specifies the offset of the raw field to be match from the
3081 * beginning of the specified packet segment, and the locations, in the form of
3082 * byte offsets from the start of the input buffer for a flow entry, from where
3083 * the value to match and the mask value to be extracted. These locations are
3084 * then stored in the flow profile. When adding flow entries to the associated
3085 * flow profile, these locations can be used to quickly extract the values to
3086 * create the content of a match entry. This function should only be used for
3087 * fixed-size data structures.
3090 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3091 u16 val_loc, u16 mask_loc)
3093 if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3094 seg->raws[seg->raws_cnt].off = off;
3095 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3096 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3097 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3098 /* The "last" field is used to store the length of the field */
3099 seg->raws[seg->raws_cnt].info.src.last = len;
3102 /* Overflows of "raws" will be handled as an error condition later in
3103 * the flow when this information is processed.
3108 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3109 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
3111 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3112 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3114 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3115 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
3116 ICE_FLOW_SEG_HDR_SCTP)
3118 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3119 (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3120 ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3121 ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3124 * ice_flow_set_rss_seg_info - setup packet segments for RSS
3125 * @segs: pointer to the flow field segment(s)
3126 * @hash_fields: fields to be hashed on for the segment(s)
3127 * @flow_hdr: protocol header fields within a packet segment
3129 * Helper function to extract fields from hash bitmap and use flow
3130 * header value to set flow field segment for further use in flow
3131 * profile entry or removal.
3133 static enum ice_status
3134 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
3137 u64 val = hash_fields;
3140 for (i = 0; val && i < ICE_FLOW_FIELD_IDX_MAX; i++) {
3141 u64 bit = BIT_ULL(i);
3144 ice_flow_set_fld(segs, (enum ice_flow_field)i,
3145 ICE_FLOW_FLD_OFF_INVAL,
3146 ICE_FLOW_FLD_OFF_INVAL,
3147 ICE_FLOW_FLD_OFF_INVAL, false);
3151 ICE_FLOW_SET_HDRS(segs, flow_hdr);
3153 if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3154 ~ICE_FLOW_RSS_HDRS_INNER_MASK)
3155 return ICE_ERR_PARAM;
3157 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3158 if (val && !ice_is_pow2(val))
3161 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3162 if (val && !ice_is_pow2(val))
3169 * ice_rem_vsi_rss_list - remove VSI from RSS list
3170 * @hw: pointer to the hardware structure
3171 * @vsi_handle: software VSI handle
3173 * Remove the VSI from all RSS configurations in the list.
3175 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3177 struct ice_rss_cfg *r, *tmp;
3179 if (LIST_EMPTY(&hw->rss_list_head))
3182 ice_acquire_lock(&hw->rss_locks);
3183 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3184 ice_rss_cfg, l_entry)
3185 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3186 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3187 LIST_DEL(&r->l_entry);
3190 ice_release_lock(&hw->rss_locks);
3194 * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3195 * @hw: pointer to the hardware structure
3196 * @vsi_handle: software VSI handle
3198 * This function will iterate through all flow profiles and disassociate
3199 * the VSI from that profile. If the flow profile has no VSIs it will
3202 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3204 const enum ice_block blk = ICE_BLK_RSS;
3205 struct ice_flow_prof *p, *t;
3206 enum ice_status status = ICE_SUCCESS;
3208 if (!ice_is_vsi_valid(hw, vsi_handle))
3209 return ICE_ERR_PARAM;
3211 if (LIST_EMPTY(&hw->fl_profs[blk]))
3214 ice_acquire_lock(&hw->fl_profs_locks[blk]);
3215 LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3217 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3218 status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3222 if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3223 status = ice_flow_rem_prof_sync(hw, blk, p);
3228 ice_release_lock(&hw->fl_profs_locks[blk]);
3234 * ice_rem_rss_list - remove RSS configuration from list
3235 * @hw: pointer to the hardware structure
3236 * @vsi_handle: software VSI handle
3237 * @prof: pointer to flow profile
3239 * Assumption: lock has already been acquired for RSS list
3242 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3244 struct ice_rss_cfg *r, *tmp;
3246 /* Search for RSS hash fields associated to the VSI that match the
3247 * hash configurations associated to the flow profile. If found
3248 * remove from the RSS entry list of the VSI context and delete entry.
3250 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3251 ice_rss_cfg, l_entry)
3252 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3253 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3254 ice_clear_bit(vsi_handle, r->vsis);
3255 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3256 LIST_DEL(&r->l_entry);
3264 * ice_add_rss_list - add RSS configuration to list
3265 * @hw: pointer to the hardware structure
3266 * @vsi_handle: software VSI handle
3267 * @prof: pointer to flow profile
3269 * Assumption: lock has already been acquired for RSS list
3271 static enum ice_status
3272 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3274 struct ice_rss_cfg *r, *rss_cfg;
3276 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3277 ice_rss_cfg, l_entry)
3278 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3279 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3280 ice_set_bit(vsi_handle, r->vsis);
3284 rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3286 return ICE_ERR_NO_MEMORY;
3288 rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
3289 rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
3290 rss_cfg->symm = prof->cfg.symm;
3291 ice_set_bit(vsi_handle, rss_cfg->vsis);
3293 LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3298 #define ICE_FLOW_PROF_HASH_S 0
3299 #define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3300 #define ICE_FLOW_PROF_HDR_S 32
3301 #define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3302 #define ICE_FLOW_PROF_ENCAP_S 63
3303 #define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
3305 #define ICE_RSS_OUTER_HEADERS 1
3306 #define ICE_RSS_INNER_HEADERS 2
3308 /* Flow profile ID format:
3309 * [0:31] - Packet match fields
3310 * [32:62] - Protocol header
3311 * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
3313 #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
3314 (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3315 (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3316 ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
3319 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3321 u32 s = ((src % 4) << 3); /* byte shift */
3322 u32 v = dst | 0x80; /* value to program */
3323 u8 i = src / 4; /* register index */
3326 reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3327 reg = (reg & ~(0xff << s)) | (v << s);
3328 wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3332 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3335 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3338 for (i = 0; i < len; i++) {
3339 ice_rss_config_xor_word(hw, prof_id,
3340 /* Yes, field vector in GLQF_HSYMM and
3341 * GLQF_HINSET is inversed!
3343 fv_last_word - (src + i),
3344 fv_last_word - (dst + i));
3345 ice_rss_config_xor_word(hw, prof_id,
3346 fv_last_word - (dst + i),
3347 fv_last_word - (src + i));
3352 ice_rss_update_symm(struct ice_hw *hw,
3353 struct ice_flow_prof *prof)
3355 struct ice_prof_map *map;
3358 map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3359 prof_id = map->prof_id;
3361 /* clear to default */
3362 for (m = 0; m < 6; m++)
3363 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3364 if (prof->cfg.symm) {
3365 struct ice_flow_seg_info *seg =
3366 &prof->segs[prof->segs_cnt - 1];
3368 struct ice_flow_seg_xtrct *ipv4_src =
3369 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3370 struct ice_flow_seg_xtrct *ipv4_dst =
3371 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3372 struct ice_flow_seg_xtrct *ipv6_src =
3373 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3374 struct ice_flow_seg_xtrct *ipv6_dst =
3375 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3377 struct ice_flow_seg_xtrct *tcp_src =
3378 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3379 struct ice_flow_seg_xtrct *tcp_dst =
3380 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3382 struct ice_flow_seg_xtrct *udp_src =
3383 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3384 struct ice_flow_seg_xtrct *udp_dst =
3385 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3387 struct ice_flow_seg_xtrct *sctp_src =
3388 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3389 struct ice_flow_seg_xtrct *sctp_dst =
3390 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3393 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3394 ice_rss_config_xor(hw, prof_id,
3395 ipv4_src->idx, ipv4_dst->idx, 2);
3398 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3399 ice_rss_config_xor(hw, prof_id,
3400 ipv6_src->idx, ipv6_dst->idx, 8);
3403 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3404 ice_rss_config_xor(hw, prof_id,
3405 tcp_src->idx, tcp_dst->idx, 1);
3408 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3409 ice_rss_config_xor(hw, prof_id,
3410 udp_src->idx, udp_dst->idx, 1);
3413 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3414 ice_rss_config_xor(hw, prof_id,
3415 sctp_src->idx, sctp_dst->idx, 1);
3420 * ice_add_rss_cfg_sync - add an RSS configuration
3421 * @hw: pointer to the hardware structure
3422 * @vsi_handle: software VSI handle
3423 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3424 * @addl_hdrs: protocol header fields
3425 * @segs_cnt: packet segment count
3426 * @symm: symmetric hash enable/disable
3428 * Assumption: lock has already been acquired for RSS list
3430 static enum ice_status
3431 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3432 u32 addl_hdrs, u8 segs_cnt, bool symm)
3434 const enum ice_block blk = ICE_BLK_RSS;
3435 struct ice_flow_prof *prof = NULL;
3436 struct ice_flow_seg_info *segs;
3437 enum ice_status status;
3439 if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
3440 return ICE_ERR_PARAM;
3442 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3445 return ICE_ERR_NO_MEMORY;
3447 /* Construct the packet segment info from the hashed fields */
3448 status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3453 /* Search for a flow profile that has matching headers, hash fields
3454 * and has the input VSI associated to it. If found, no further
3455 * operations required and exit.
3457 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3459 ICE_FLOW_FIND_PROF_CHK_FLDS |
3460 ICE_FLOW_FIND_PROF_CHK_VSI);
3462 if (prof->cfg.symm == symm)
3464 prof->cfg.symm = symm;
3468 /* Check if a flow profile exists with the same protocol headers and
3469 * associated with the input VSI. If so disassociate the VSI from
3470 * this profile. The VSI will be added to a new profile created with
3471 * the protocol header and new hash field configuration.
3473 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3474 vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3476 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3478 ice_rem_rss_list(hw, vsi_handle, prof);
3482 /* Remove profile if it has no VSIs associated */
3483 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3484 status = ice_flow_rem_prof(hw, blk, prof->id);
3490 /* Search for a profile that has same match fields only. If this
3491 * exists then associate the VSI to this profile.
3493 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3495 ICE_FLOW_FIND_PROF_CHK_FLDS);
3497 if (prof->cfg.symm == symm) {
3498 status = ice_flow_assoc_prof(hw, blk, prof,
3501 status = ice_add_rss_list(hw, vsi_handle,
3504 /* if a profile exist but with different symmetric
3505 * requirement, just return error.
3507 status = ICE_ERR_NOT_SUPPORTED;
3512 /* Create a new flow profile with generated profile and packet
3513 * segment information.
3515 status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3516 ICE_FLOW_GEN_PROFID(hashed_flds,
3517 segs[segs_cnt - 1].hdrs,
3519 segs, segs_cnt, NULL, 0, &prof);
3523 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3524 /* If association to a new flow profile failed then this profile can
3528 ice_flow_rem_prof(hw, blk, prof->id);
3532 status = ice_add_rss_list(hw, vsi_handle, prof);
3534 prof->cfg.symm = symm;
3537 ice_rss_update_symm(hw, prof);
3545 * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3546 * @hw: pointer to the hardware structure
3547 * @vsi_handle: software VSI handle
3548 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3549 * @addl_hdrs: protocol header fields
3550 * @symm: symmetric hash enable/disable
3552 * This function will generate a flow profile based on fields associated with
3553 * the input fields to hash on, the flow type and use the VSI number to add
3554 * a flow entry to the profile.
3557 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3558 u32 addl_hdrs, bool symm)
3560 enum ice_status status;
3562 if (hashed_flds == ICE_HASH_INVALID ||
3563 !ice_is_vsi_valid(hw, vsi_handle))
3564 return ICE_ERR_PARAM;
3566 ice_acquire_lock(&hw->rss_locks);
3567 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3568 ICE_RSS_OUTER_HEADERS, symm);
3570 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3571 addl_hdrs, ICE_RSS_INNER_HEADERS,
3573 ice_release_lock(&hw->rss_locks);
3579 * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3580 * @hw: pointer to the hardware structure
3581 * @vsi_handle: software VSI handle
3582 * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3583 * @addl_hdrs: Protocol header fields within a packet segment
3584 * @segs_cnt: packet segment count
3586 * Assumption: lock has already been acquired for RSS list
3588 static enum ice_status
3589 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3590 u32 addl_hdrs, u8 segs_cnt)
3592 const enum ice_block blk = ICE_BLK_RSS;
3593 struct ice_flow_seg_info *segs;
3594 struct ice_flow_prof *prof;
3595 enum ice_status status;
3597 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3600 return ICE_ERR_NO_MEMORY;
3602 /* Construct the packet segment info from the hashed fields */
3603 status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3608 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3610 ICE_FLOW_FIND_PROF_CHK_FLDS);
3612 status = ICE_ERR_DOES_NOT_EXIST;
3616 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3620 /* Remove RSS configuration from VSI context before deleting
3623 ice_rem_rss_list(hw, vsi_handle, prof);
3625 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
3626 status = ice_flow_rem_prof(hw, blk, prof->id);
3634 * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
3635 * @hw: pointer to the hardware structure
3636 * @vsi_handle: software VSI handle
3637 * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3638 * @addl_hdrs: Protocol header fields within a packet segment
3640 * This function will lookup the flow profile based on the input
3641 * hash field bitmap, iterate through the profile entry list of
3642 * that profile and find entry associated with input VSI to be
3643 * removed. Calls are made to underlying flow apis which will in
3644 * turn build or update buffers for RSS XLT1 section.
3647 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3650 enum ice_status status;
3652 if (hashed_flds == ICE_HASH_INVALID ||
3653 !ice_is_vsi_valid(hw, vsi_handle))
3654 return ICE_ERR_PARAM;
3656 ice_acquire_lock(&hw->rss_locks);
3657 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3658 ICE_RSS_OUTER_HEADERS);
3660 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3661 addl_hdrs, ICE_RSS_INNER_HEADERS);
3662 ice_release_lock(&hw->rss_locks);
3668 * ice_replay_rss_cfg - replay RSS configurations associated with VSI
3669 * @hw: pointer to the hardware structure
3670 * @vsi_handle: software VSI handle
3672 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3674 enum ice_status status = ICE_SUCCESS;
3675 struct ice_rss_cfg *r;
3677 if (!ice_is_vsi_valid(hw, vsi_handle))
3678 return ICE_ERR_PARAM;
3680 ice_acquire_lock(&hw->rss_locks);
3681 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3682 ice_rss_cfg, l_entry) {
3683 if (ice_is_bit_set(r->vsis, vsi_handle)) {
3684 status = ice_add_rss_cfg_sync(hw, vsi_handle,
3687 ICE_RSS_OUTER_HEADERS,
3691 status = ice_add_rss_cfg_sync(hw, vsi_handle,
3694 ICE_RSS_INNER_HEADERS,
3700 ice_release_lock(&hw->rss_locks);
3706 * ice_get_rss_cfg - returns hashed fields for the given header types
3707 * @hw: pointer to the hardware structure
3708 * @vsi_handle: software VSI handle
3709 * @hdrs: protocol header type
3711 * This function will return the match fields of the first instance of flow
3712 * profile having the given header types and containing input VSI
3714 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
3716 struct ice_rss_cfg *r, *rss_cfg = NULL;
3718 /* verify if the protocol header is non zero and VSI is valid */
3719 if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
3720 return ICE_HASH_INVALID;
3722 ice_acquire_lock(&hw->rss_locks);
3723 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3724 ice_rss_cfg, l_entry)
3725 if (ice_is_bit_set(r->vsis, vsi_handle) &&
3726 r->packet_hdr == hdrs) {
3730 ice_release_lock(&hw->rss_locks);
3732 return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID;