1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
5 #include "ice_common.h"
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE 2
10 #define ICE_FLOW_FLD_SZ_VLAN 2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR 4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR 16
13 #define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
14 #define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
15 #define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
16 #define ICE_FLOW_FLD_SZ_IP_DSCP 1
17 #define ICE_FLOW_FLD_SZ_IP_TTL 1
18 #define ICE_FLOW_FLD_SZ_IP_PROT 1
19 #define ICE_FLOW_FLD_SZ_PORT 2
20 #define ICE_FLOW_FLD_SZ_TCP_FLAGS 1
21 #define ICE_FLOW_FLD_SZ_ICMP_TYPE 1
22 #define ICE_FLOW_FLD_SZ_ICMP_CODE 1
23 #define ICE_FLOW_FLD_SZ_ARP_OPER 2
24 #define ICE_FLOW_FLD_SZ_GRE_KEYID 4
25 #define ICE_FLOW_FLD_SZ_GTP_TEID 4
26 #define ICE_FLOW_FLD_SZ_GTP_QFI 2
27 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID 2
28 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
29 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID 4
30 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
31 #define ICE_FLOW_FLD_SZ_AH_SPI 4
32 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI 4
34 /* Describe properties of a protocol header field */
35 struct ice_flow_field_info {
36 enum ice_flow_seg_hdr hdr;
37 s16 off; /* Offset from start of a protocol header, in bits */
38 u16 size; /* Size of fields in bits */
39 u16 mask; /* 16-bit mask for field */
42 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
44 .off = (_offset_bytes) * BITS_PER_BYTE, \
45 .size = (_size_bytes) * BITS_PER_BYTE, \
49 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
51 .off = (_offset_bytes) * BITS_PER_BYTE, \
52 .size = (_size_bytes) * BITS_PER_BYTE, \
56 /* Table containing properties of supported protocol header fields */
58 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
60 /* ICE_FLOW_FIELD_IDX_ETH_DA */
61 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
62 /* ICE_FLOW_FIELD_IDX_ETH_SA */
63 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
64 /* ICE_FLOW_FIELD_IDX_S_VLAN */
65 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
66 /* ICE_FLOW_FIELD_IDX_C_VLAN */
67 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
68 /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
69 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
71 /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
72 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
74 /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
75 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
77 /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
78 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
79 ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
80 /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
81 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
82 ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
83 /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
84 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
85 ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
86 /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
87 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
88 ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
89 /* ICE_FLOW_FIELD_IDX_IPV4_SA */
90 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
91 /* ICE_FLOW_FIELD_IDX_IPV4_DA */
92 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
93 /* ICE_FLOW_FIELD_IDX_IPV6_SA */
94 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
95 /* ICE_FLOW_FIELD_IDX_IPV6_DA */
96 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
97 /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
98 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
99 ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
100 /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
101 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
102 ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
103 /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
104 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
105 ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
106 /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
107 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
108 ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
109 /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
110 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
111 ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
112 /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
113 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
114 ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
116 /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
117 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
118 /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
119 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
120 /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
121 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
122 /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
123 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
124 /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
125 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
126 /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
127 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
128 /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
129 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
131 /* ICE_FLOW_FIELD_IDX_ARP_SIP */
132 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
133 /* ICE_FLOW_FIELD_IDX_ARP_DIP */
134 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
135 /* ICE_FLOW_FIELD_IDX_ARP_SHA */
136 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
137 /* ICE_FLOW_FIELD_IDX_ARP_DHA */
138 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
139 /* ICE_FLOW_FIELD_IDX_ARP_OP */
140 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
142 /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
143 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
144 /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
145 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
147 /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
148 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
150 /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
151 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
152 ICE_FLOW_FLD_SZ_GTP_TEID),
153 /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
154 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
155 ICE_FLOW_FLD_SZ_GTP_TEID),
156 /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
157 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
158 ICE_FLOW_FLD_SZ_GTP_TEID),
159 /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
160 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
161 ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
162 /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
163 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
164 ICE_FLOW_FLD_SZ_GTP_TEID),
165 /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
166 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
167 ICE_FLOW_FLD_SZ_GTP_TEID),
169 /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
170 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
171 ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
173 /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
174 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
175 ICE_FLOW_FLD_SZ_PFCP_SEID),
177 /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
178 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
179 ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
181 /* ICE_FLOW_FIELD_IDX_ESP_SPI */
182 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
183 ICE_FLOW_FLD_SZ_ESP_SPI),
185 /* ICE_FLOW_FIELD_IDX_AH_SPI */
186 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
187 ICE_FLOW_FLD_SZ_AH_SPI),
189 /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
190 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
191 ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
194 /* Bitmaps indicating relevant packet types for a particular protocol header
196 * Packet types for packets with an Outer/First/Single MAC header
198 static const u32 ice_ptypes_mac_ofos[] = {
199 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
200 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
201 0x00000000, 0x03FFF000, 0x7FFFFFE0, 0x00000000,
202 0x00000000, 0x00000000, 0x00000000, 0x00000000,
203 0x00000000, 0x00000000, 0x00000000, 0x00000000,
204 0x00000000, 0x00000000, 0x00000000, 0x00000000,
205 0x00000000, 0x00000000, 0x00000000, 0x00000000,
206 0x00000000, 0x00000000, 0x00000000, 0x00000000,
209 /* Packet types for packets with an Innermost/Last MAC VLAN header */
210 static const u32 ice_ptypes_macvlan_il[] = {
211 0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
212 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
213 0x00000000, 0x00000000, 0x00000000, 0x00000000,
214 0x00000000, 0x00000000, 0x00000000, 0x00000000,
215 0x00000000, 0x00000000, 0x00000000, 0x00000000,
216 0x00000000, 0x00000000, 0x00000000, 0x00000000,
217 0x00000000, 0x00000000, 0x00000000, 0x00000000,
218 0x00000000, 0x00000000, 0x00000000, 0x00000000,
221 /* Packet types for packets with an Outer/First/Single IPv4 header */
222 static const u32 ice_ptypes_ipv4_ofos[] = {
223 0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
224 0x00000000, 0x00000155, 0x00000000, 0x00000000,
225 0x0003000F, 0x000FC000, 0x83E0F800, 0x00000101,
226 0x00000000, 0x00000000, 0x00000000, 0x00000000,
227 0x00000000, 0x00000000, 0x00000000, 0x00000000,
228 0x00000000, 0x00000000, 0x00000000, 0x00000000,
229 0x00000000, 0x00000000, 0x00000000, 0x00000000,
230 0x00000000, 0x00000000, 0x00000000, 0x00000000,
233 /* Packet types for packets with an Innermost/Last IPv4 header */
234 static const u32 ice_ptypes_ipv4_il[] = {
235 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
236 0x0000000E, 0x00000000, 0x00000000, 0x00000000,
237 0x00000000, 0x00000000, 0x001FF800, 0x00000000,
238 0x00000000, 0x00000000, 0x00000000, 0x00000000,
239 0x00000000, 0x00000000, 0x00000000, 0x00000000,
240 0x00000000, 0x00000000, 0x00000000, 0x00000000,
241 0x00000000, 0x00000000, 0x00000000, 0x00000000,
242 0x00000000, 0x00000000, 0x00000000, 0x00000000,
245 /* Packet types for packets with an Outer/First/Single IPv6 header */
246 static const u32 ice_ptypes_ipv6_ofos[] = {
247 0x00000000, 0x00000000, 0x77000000, 0x10002000,
248 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
249 0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000206,
250 0x00000000, 0x00000000, 0x00000000, 0x00000000,
251 0x00000000, 0x00000000, 0x00000000, 0x00000000,
252 0x00000000, 0x00000000, 0x00000000, 0x00000000,
253 0x00000000, 0x00000000, 0x00000000, 0x00000000,
254 0x00000000, 0x00000000, 0x00000000, 0x00000000,
257 /* Packet types for packets with an Innermost/Last IPv6 header */
258 static const u32 ice_ptypes_ipv6_il[] = {
259 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
260 0x00000770, 0x00000000, 0x00000000, 0x00000000,
261 0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
262 0x00000000, 0x00000000, 0x00000000, 0x00000000,
263 0x00000000, 0x00000000, 0x00000000, 0x00000000,
264 0x00000000, 0x00000000, 0x00000000, 0x00000000,
265 0x00000000, 0x00000000, 0x00000000, 0x00000000,
266 0x00000000, 0x00000000, 0x00000000, 0x00000000,
269 /* Packet types for packets with an Outermost/First ARP header */
270 static const u32 ice_ptypes_arp_of[] = {
271 0x00000800, 0x00000000, 0x00000000, 0x00000000,
272 0x00000000, 0x00000000, 0x00000000, 0x00000000,
273 0x00000000, 0x00000000, 0x00000000, 0x00000000,
274 0x00000000, 0x00000000, 0x00000000, 0x00000000,
275 0x00000000, 0x00000000, 0x00000000, 0x00000000,
276 0x00000000, 0x00000000, 0x00000000, 0x00000000,
277 0x00000000, 0x00000000, 0x00000000, 0x00000000,
278 0x00000000, 0x00000000, 0x00000000, 0x00000000,
281 /* UDP Packet types for non-tunneled packets or tunneled
282 * packets with inner UDP.
284 static const u32 ice_ptypes_udp_il[] = {
285 0x81000000, 0x20204040, 0x04000010, 0x80810102,
286 0x00000040, 0x00000000, 0x00000000, 0x00000000,
287 0x00000000, 0x00410000, 0x90842000, 0x00000007,
288 0x00000000, 0x00000000, 0x00000000, 0x00000000,
289 0x00000000, 0x00000000, 0x00000000, 0x00000000,
290 0x00000000, 0x00000000, 0x00000000, 0x00000000,
291 0x00000000, 0x00000000, 0x00000000, 0x00000000,
292 0x00000000, 0x00000000, 0x00000000, 0x00000000,
295 /* Packet types for packets with an Innermost/Last TCP header */
296 static const u32 ice_ptypes_tcp_il[] = {
297 0x04000000, 0x80810102, 0x10000040, 0x02040408,
298 0x00000102, 0x00000000, 0x00000000, 0x00000000,
299 0x00000000, 0x00820000, 0x21084000, 0x00000000,
300 0x00000000, 0x00000000, 0x00000000, 0x00000000,
301 0x00000000, 0x00000000, 0x00000000, 0x00000000,
302 0x00000000, 0x00000000, 0x00000000, 0x00000000,
303 0x00000000, 0x00000000, 0x00000000, 0x00000000,
304 0x00000000, 0x00000000, 0x00000000, 0x00000000,
307 /* Packet types for packets with an Innermost/Last SCTP header */
308 static const u32 ice_ptypes_sctp_il[] = {
309 0x08000000, 0x01020204, 0x20000081, 0x04080810,
310 0x00000204, 0x00000000, 0x00000000, 0x00000000,
311 0x00000000, 0x01040000, 0x00000000, 0x00000000,
312 0x00000000, 0x00000000, 0x00000000, 0x00000000,
313 0x00000000, 0x00000000, 0x00000000, 0x00000000,
314 0x00000000, 0x00000000, 0x00000000, 0x00000000,
315 0x00000000, 0x00000000, 0x00000000, 0x00000000,
316 0x00000000, 0x00000000, 0x00000000, 0x00000000,
319 /* Packet types for packets with an Outermost/First ICMP header */
320 static const u32 ice_ptypes_icmp_of[] = {
321 0x10000000, 0x00000000, 0x00000000, 0x00000000,
322 0x00000000, 0x00000000, 0x00000000, 0x00000000,
323 0x00000000, 0x00000000, 0x00000000, 0x00000000,
324 0x00000000, 0x00000000, 0x00000000, 0x00000000,
325 0x00000000, 0x00000000, 0x00000000, 0x00000000,
326 0x00000000, 0x00000000, 0x00000000, 0x00000000,
327 0x00000000, 0x00000000, 0x00000000, 0x00000000,
328 0x00000000, 0x00000000, 0x00000000, 0x00000000,
331 /* Packet types for packets with an Innermost/Last ICMP header */
332 static const u32 ice_ptypes_icmp_il[] = {
333 0x00000000, 0x02040408, 0x40000102, 0x08101020,
334 0x00000408, 0x00000000, 0x00000000, 0x00000000,
335 0x00000000, 0x00000000, 0x42108000, 0x00000000,
336 0x00000000, 0x00000000, 0x00000000, 0x00000000,
337 0x00000000, 0x00000000, 0x00000000, 0x00000000,
338 0x00000000, 0x00000000, 0x00000000, 0x00000000,
339 0x00000000, 0x00000000, 0x00000000, 0x00000000,
340 0x00000000, 0x00000000, 0x00000000, 0x00000000,
343 /* Packet types for packets with an Outermost/First GRE header */
344 static const u32 ice_ptypes_gre_of[] = {
345 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
346 0x0000017E, 0x00000000, 0x00000000, 0x00000000,
347 0x00000000, 0x00000000, 0x00000000, 0x00000000,
348 0x00000000, 0x00000000, 0x00000000, 0x00000000,
349 0x00000000, 0x00000000, 0x00000000, 0x00000000,
350 0x00000000, 0x00000000, 0x00000000, 0x00000000,
351 0x00000000, 0x00000000, 0x00000000, 0x00000000,
352 0x00000000, 0x00000000, 0x00000000, 0x00000000,
355 /* Packet types for packets with an Innermost/Last MAC header */
356 static const u32 ice_ptypes_mac_il[] = {
357 0x00000000, 0x00000000, 0x00000000, 0x00000000,
358 0x00000000, 0x00000000, 0x00000000, 0x00000000,
359 0x00000000, 0x00000000, 0x00000000, 0x00000000,
360 0x00000000, 0x00000000, 0x00000000, 0x00000000,
361 0x00000000, 0x00000000, 0x00000000, 0x00000000,
362 0x00000000, 0x00000000, 0x00000000, 0x00000000,
363 0x00000000, 0x00000000, 0x00000000, 0x00000000,
364 0x00000000, 0x00000000, 0x00000000, 0x00000000,
367 /* Packet types for GTPC */
368 static const u32 ice_ptypes_gtpc[] = {
369 0x00000000, 0x00000000, 0x00000000, 0x00000000,
370 0x00000000, 0x00000000, 0x00000000, 0x00000000,
371 0x00000000, 0x00000000, 0x00000180, 0x00000000,
372 0x00000000, 0x00000000, 0x00000000, 0x00000000,
373 0x00000000, 0x00000000, 0x00000000, 0x00000000,
374 0x00000000, 0x00000000, 0x00000000, 0x00000000,
375 0x00000000, 0x00000000, 0x00000000, 0x00000000,
376 0x00000000, 0x00000000, 0x00000000, 0x00000000,
379 /* Packet types for GTPC with TEID */
380 static const u32 ice_ptypes_gtpc_tid[] = {
381 0x00000000, 0x00000000, 0x00000000, 0x00000000,
382 0x00000000, 0x00000000, 0x00000000, 0x00000000,
383 0x00000000, 0x00000000, 0x00000060, 0x00000000,
384 0x00000000, 0x00000000, 0x00000000, 0x00000000,
385 0x00000000, 0x00000000, 0x00000000, 0x00000000,
386 0x00000000, 0x00000000, 0x00000000, 0x00000000,
387 0x00000000, 0x00000000, 0x00000000, 0x00000000,
388 0x00000000, 0x00000000, 0x00000000, 0x00000000,
391 /* Packet types for GTPU */
392 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
393 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
394 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
395 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
396 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
397 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
398 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
399 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
400 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
401 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
402 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
403 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
404 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
405 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
406 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
407 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
408 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
409 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
410 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
411 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
412 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
415 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
416 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
417 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
418 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
419 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
420 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
421 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
422 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
423 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
424 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
425 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
426 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
427 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
428 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
429 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
430 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
431 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
432 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
433 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
434 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
435 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
438 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
439 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
440 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
441 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
442 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
443 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
444 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
445 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
446 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
447 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
448 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
449 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
450 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
451 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
452 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
453 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
454 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
455 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
456 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
457 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
458 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
461 static const u32 ice_ptypes_gtpu[] = {
462 0x00000000, 0x00000000, 0x00000000, 0x00000000,
463 0x00000000, 0x00000000, 0x00000000, 0x00000000,
464 0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
465 0x00000000, 0x00000000, 0x00000000, 0x00000000,
466 0x00000000, 0x00000000, 0x00000000, 0x00000000,
467 0x00000000, 0x00000000, 0x00000000, 0x00000000,
468 0x00000000, 0x00000000, 0x00000000, 0x00000000,
469 0x00000000, 0x00000000, 0x00000000, 0x00000000,
472 /* Packet types for pppoe */
473 static const u32 ice_ptypes_pppoe[] = {
474 0x00000000, 0x00000000, 0x00000000, 0x00000000,
475 0x00000000, 0x00000000, 0x00000000, 0x00000000,
476 0x00000000, 0x03FFF000, 0x00000000, 0x00000000,
477 0x00000000, 0x00000000, 0x00000000, 0x00000000,
478 0x00000000, 0x00000000, 0x00000000, 0x00000000,
479 0x00000000, 0x00000000, 0x00000000, 0x00000000,
480 0x00000000, 0x00000000, 0x00000000, 0x00000000,
481 0x00000000, 0x00000000, 0x00000000, 0x00000000,
484 /* Packet types for packets with PFCP NODE header */
485 static const u32 ice_ptypes_pfcp_node[] = {
486 0x00000000, 0x00000000, 0x00000000, 0x00000000,
487 0x00000000, 0x00000000, 0x00000000, 0x00000000,
488 0x00000000, 0x00000000, 0x80000000, 0x00000002,
489 0x00000000, 0x00000000, 0x00000000, 0x00000000,
490 0x00000000, 0x00000000, 0x00000000, 0x00000000,
491 0x00000000, 0x00000000, 0x00000000, 0x00000000,
492 0x00000000, 0x00000000, 0x00000000, 0x00000000,
493 0x00000000, 0x00000000, 0x00000000, 0x00000000,
496 /* Packet types for packets with PFCP SESSION header */
497 static const u32 ice_ptypes_pfcp_session[] = {
498 0x00000000, 0x00000000, 0x00000000, 0x00000000,
499 0x00000000, 0x00000000, 0x00000000, 0x00000000,
500 0x00000000, 0x00000000, 0x00000000, 0x00000005,
501 0x00000000, 0x00000000, 0x00000000, 0x00000000,
502 0x00000000, 0x00000000, 0x00000000, 0x00000000,
503 0x00000000, 0x00000000, 0x00000000, 0x00000000,
504 0x00000000, 0x00000000, 0x00000000, 0x00000000,
505 0x00000000, 0x00000000, 0x00000000, 0x00000000,
508 /* Packet types for l2tpv3 */
509 static const u32 ice_ptypes_l2tpv3[] = {
510 0x00000000, 0x00000000, 0x00000000, 0x00000000,
511 0x00000000, 0x00000000, 0x00000000, 0x00000000,
512 0x00000000, 0x00000000, 0x00000000, 0x00000300,
513 0x00000000, 0x00000000, 0x00000000, 0x00000000,
514 0x00000000, 0x00000000, 0x00000000, 0x00000000,
515 0x00000000, 0x00000000, 0x00000000, 0x00000000,
516 0x00000000, 0x00000000, 0x00000000, 0x00000000,
517 0x00000000, 0x00000000, 0x00000000, 0x00000000,
520 /* Packet types for esp */
521 static const u32 ice_ptypes_esp[] = {
522 0x00000000, 0x00000000, 0x00000000, 0x00000000,
523 0x00000000, 0x00000003, 0x00000000, 0x00000000,
524 0x00000000, 0x00000000, 0x00000000, 0x00000000,
525 0x00000000, 0x00000000, 0x00000000, 0x00000000,
526 0x00000000, 0x00000000, 0x00000000, 0x00000000,
527 0x00000000, 0x00000000, 0x00000000, 0x00000000,
528 0x00000000, 0x00000000, 0x00000000, 0x00000000,
529 0x00000000, 0x00000000, 0x00000000, 0x00000000,
532 /* Packet types for ah */
533 static const u32 ice_ptypes_ah[] = {
534 0x00000000, 0x00000000, 0x00000000, 0x00000000,
535 0x00000000, 0x0000000C, 0x00000000, 0x00000000,
536 0x00000000, 0x00000000, 0x00000000, 0x00000000,
537 0x00000000, 0x00000000, 0x00000000, 0x00000000,
538 0x00000000, 0x00000000, 0x00000000, 0x00000000,
539 0x00000000, 0x00000000, 0x00000000, 0x00000000,
540 0x00000000, 0x00000000, 0x00000000, 0x00000000,
541 0x00000000, 0x00000000, 0x00000000, 0x00000000,
544 /* Packet types for packets with NAT_T ESP header */
545 static const u32 ice_ptypes_nat_t_esp[] = {
546 0x00000000, 0x00000000, 0x00000000, 0x00000000,
547 0x00000000, 0x00000030, 0x00000000, 0x00000000,
548 0x00000000, 0x00000000, 0x00000000, 0x00000000,
549 0x00000000, 0x00000000, 0x00000000, 0x00000000,
550 0x00000000, 0x00000000, 0x00000000, 0x00000000,
551 0x00000000, 0x00000000, 0x00000000, 0x00000000,
552 0x00000000, 0x00000000, 0x00000000, 0x00000000,
553 0x00000000, 0x00000000, 0x00000000, 0x00000000,
556 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
557 0x00000846, 0x00000000, 0x00000000, 0x00000000,
558 0x00000000, 0x00000000, 0x00000000, 0x00000000,
559 0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
560 0x00000000, 0x00000000, 0x00000000, 0x00000000,
561 0x00000000, 0x00000000, 0x00000000, 0x00000000,
562 0x00000000, 0x00000000, 0x00000000, 0x00000000,
563 0x00000000, 0x00000000, 0x00000000, 0x00000000,
564 0x00000000, 0x00000000, 0x00000000, 0x00000000,
567 /* Manage parameters and info. used during the creation of a flow profile */
568 struct ice_flow_prof_params {
570 u16 entry_length; /* # of bytes formatted entry will require */
572 struct ice_flow_prof *prof;
574 /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
575 * This will give us the direction flags.
577 struct ice_fv_word es[ICE_MAX_FV_WORDS];
578 /* attributes can be used to add attributes to a particular PTYPE */
579 const struct ice_ptype_attributes *attr;
582 u16 mask[ICE_MAX_FV_WORDS];
583 ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
586 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
587 (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
588 ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
589 ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
590 ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
591 ICE_FLOW_SEG_HDR_NAT_T_ESP)
593 #define ICE_FLOW_SEG_HDRS_L2_MASK \
594 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
595 #define ICE_FLOW_SEG_HDRS_L3_MASK \
596 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
597 ICE_FLOW_SEG_HDR_ARP)
598 #define ICE_FLOW_SEG_HDRS_L4_MASK \
599 (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
600 ICE_FLOW_SEG_HDR_SCTP)
603 * ice_flow_val_hdrs - validates packet segments for valid protocol headers
604 * @segs: array of one or more packet segments that describe the flow
605 * @segs_cnt: number of packet segments provided
607 static enum ice_status
608 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
612 for (i = 0; i < segs_cnt; i++) {
613 /* Multiple L3 headers */
614 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
615 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
616 return ICE_ERR_PARAM;
618 /* Multiple L4 headers */
619 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
620 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
621 return ICE_ERR_PARAM;
627 /* Sizes of fixed known protocol headers without header options */
628 #define ICE_FLOW_PROT_HDR_SZ_MAC 14
629 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
630 #define ICE_FLOW_PROT_HDR_SZ_IPV4 20
631 #define ICE_FLOW_PROT_HDR_SZ_IPV6 40
632 #define ICE_FLOW_PROT_HDR_SZ_ARP 28
633 #define ICE_FLOW_PROT_HDR_SZ_ICMP 8
634 #define ICE_FLOW_PROT_HDR_SZ_TCP 20
635 #define ICE_FLOW_PROT_HDR_SZ_UDP 8
636 #define ICE_FLOW_PROT_HDR_SZ_SCTP 12
639 * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
640 * @params: information about the flow to be processed
641 * @seg: index of packet segment whose header size is to be determined
643 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
648 sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
649 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
652 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
653 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
654 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
655 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
656 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
657 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
658 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
659 /* A L3 header is required if L4 is specified */
663 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
664 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
665 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
666 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
667 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
668 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
669 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
670 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
676 * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
677 * @params: information about the flow to be processed
679 * This function identifies the packet types associated with the protocol
680 * headers being present in packet segments of the specified flow profile.
682 static enum ice_status
683 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
685 struct ice_flow_prof *prof;
688 ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
693 for (i = 0; i < params->prof->segs_cnt; i++) {
694 const ice_bitmap_t *src;
697 hdrs = prof->segs[i].hdrs;
699 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
700 src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
701 (const ice_bitmap_t *)ice_ptypes_mac_il;
702 ice_and_bitmap(params->ptypes, params->ptypes, src,
706 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
707 src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
708 ice_and_bitmap(params->ptypes, params->ptypes, src,
712 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
713 ice_and_bitmap(params->ptypes, params->ptypes,
714 (const ice_bitmap_t *)ice_ptypes_arp_of,
718 if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
719 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
720 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
721 ice_and_bitmap(params->ptypes, params->ptypes, src,
723 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
724 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
725 ice_and_bitmap(params->ptypes,
728 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
729 ice_and_bitmap(params->ptypes, params->ptypes,
730 (const ice_bitmap_t *)
733 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
734 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
735 ice_and_bitmap(params->ptypes, params->ptypes,
736 src, ICE_FLOW_PTYPE_MAX);
738 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
739 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
740 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
741 ice_and_bitmap(params->ptypes, params->ptypes, src,
743 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
744 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
745 ice_and_bitmap(params->ptypes,
748 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
749 ice_and_bitmap(params->ptypes, params->ptypes,
750 (const ice_bitmap_t *)
753 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
754 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
755 ice_and_bitmap(params->ptypes, params->ptypes,
756 src, ICE_FLOW_PTYPE_MAX);
760 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
761 src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
762 ice_and_bitmap(params->ptypes, params->ptypes,
763 src, ICE_FLOW_PTYPE_MAX);
764 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
765 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
766 ice_and_bitmap(params->ptypes, params->ptypes, src,
770 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
771 src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
772 (const ice_bitmap_t *)ice_ptypes_icmp_il;
773 ice_and_bitmap(params->ptypes, params->ptypes, src,
775 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
777 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
778 ice_and_bitmap(params->ptypes, params->ptypes,
779 src, ICE_FLOW_PTYPE_MAX);
781 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
782 src = (const ice_bitmap_t *)ice_ptypes_gtpc;
783 ice_and_bitmap(params->ptypes, params->ptypes,
784 src, ICE_FLOW_PTYPE_MAX);
785 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
786 src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
787 ice_and_bitmap(params->ptypes, params->ptypes,
788 src, ICE_FLOW_PTYPE_MAX);
789 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
790 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
791 ice_and_bitmap(params->ptypes, params->ptypes,
792 src, ICE_FLOW_PTYPE_MAX);
794 /* Attributes for GTP packet with downlink */
795 params->attr = ice_attr_gtpu_down;
796 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
797 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
798 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
799 ice_and_bitmap(params->ptypes, params->ptypes,
800 src, ICE_FLOW_PTYPE_MAX);
802 /* Attributes for GTP packet with uplink */
803 params->attr = ice_attr_gtpu_up;
804 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
805 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
806 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
807 ice_and_bitmap(params->ptypes, params->ptypes,
808 src, ICE_FLOW_PTYPE_MAX);
810 /* Attributes for GTP packet with Extension Header */
811 params->attr = ice_attr_gtpu_eh;
812 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
813 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
814 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
815 ice_and_bitmap(params->ptypes, params->ptypes,
816 src, ICE_FLOW_PTYPE_MAX);
817 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
818 src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
819 ice_and_bitmap(params->ptypes, params->ptypes,
820 src, ICE_FLOW_PTYPE_MAX);
821 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
822 src = (const ice_bitmap_t *)ice_ptypes_esp;
823 ice_and_bitmap(params->ptypes, params->ptypes,
824 src, ICE_FLOW_PTYPE_MAX);
825 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
826 src = (const ice_bitmap_t *)ice_ptypes_ah;
827 ice_and_bitmap(params->ptypes, params->ptypes,
828 src, ICE_FLOW_PTYPE_MAX);
829 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
830 src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
831 ice_and_bitmap(params->ptypes, params->ptypes,
832 src, ICE_FLOW_PTYPE_MAX);
835 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
836 if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
838 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
841 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
843 ice_and_bitmap(params->ptypes, params->ptypes,
844 src, ICE_FLOW_PTYPE_MAX);
846 src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
847 ice_andnot_bitmap(params->ptypes, params->ptypes,
848 src, ICE_FLOW_PTYPE_MAX);
850 src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
851 ice_andnot_bitmap(params->ptypes, params->ptypes,
852 src, ICE_FLOW_PTYPE_MAX);
860 * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
861 * @hw: pointer to the HW struct
862 * @params: information about the flow to be processed
863 * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
865 * This function will allocate an extraction sequence entries for a DWORD size
866 * chunk of the packet flags.
868 static enum ice_status
869 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
870 struct ice_flow_prof_params *params,
871 enum ice_flex_mdid_pkt_flags flags)
873 u8 fv_words = hw->blk[params->blk].es.fvw;
876 /* Make sure the number of extraction sequence entries required does not
877 * exceed the block's capacity.
879 if (params->es_cnt >= fv_words)
880 return ICE_ERR_MAX_LIMIT;
882 /* some blocks require a reversed field vector layout */
883 if (hw->blk[params->blk].es.reverse)
884 idx = fv_words - params->es_cnt - 1;
886 idx = params->es_cnt;
888 params->es[idx].prot_id = ICE_PROT_META_ID;
889 params->es[idx].off = flags;
896 * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
897 * @hw: pointer to the HW struct
898 * @params: information about the flow to be processed
899 * @seg: packet segment index of the field to be extracted
900 * @fld: ID of field to be extracted
901 * @match: bitfield of all fields
903 * This function determines the protocol ID, offset, and size of the given
904 * field. It then allocates one or more extraction sequence entries for the
905 * given field, and fill the entries with protocol ID and offset information.
907 static enum ice_status
908 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
909 u8 seg, enum ice_flow_field fld, u64 match)
911 enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
912 enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
913 u8 fv_words = hw->blk[params->blk].es.fvw;
914 struct ice_flow_fld_info *flds;
915 u16 cnt, ese_bits, i;
921 flds = params->prof->segs[seg].fields;
924 case ICE_FLOW_FIELD_IDX_ETH_DA:
925 case ICE_FLOW_FIELD_IDX_ETH_SA:
926 case ICE_FLOW_FIELD_IDX_S_VLAN:
927 case ICE_FLOW_FIELD_IDX_C_VLAN:
928 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
930 case ICE_FLOW_FIELD_IDX_ETH_TYPE:
931 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
933 case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
934 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
936 case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
937 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
939 case ICE_FLOW_FIELD_IDX_IPV4_TTL:
940 case ICE_FLOW_FIELD_IDX_IPV4_PROT:
941 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
943 /* TTL and PROT share the same extraction seq. entry.
944 * Each is considered a sibling to the other in terms of sharing
945 * the same extraction sequence entry.
947 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
948 sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
949 else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
950 sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
952 /* If the sibling field is also included, that field's
953 * mask needs to be included.
955 if (match & BIT(sib))
956 sib_mask = ice_flds_info[sib].mask;
958 case ICE_FLOW_FIELD_IDX_IPV6_TTL:
959 case ICE_FLOW_FIELD_IDX_IPV6_PROT:
960 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
962 /* TTL and PROT share the same extraction seq. entry.
963 * Each is considered a sibling to the other in terms of sharing
964 * the same extraction sequence entry.
966 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
967 sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
968 else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
969 sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
971 /* If the sibling field is also included, that field's
972 * mask needs to be included.
974 if (match & BIT(sib))
975 sib_mask = ice_flds_info[sib].mask;
977 case ICE_FLOW_FIELD_IDX_IPV4_SA:
978 case ICE_FLOW_FIELD_IDX_IPV4_DA:
979 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
981 case ICE_FLOW_FIELD_IDX_IPV6_SA:
982 case ICE_FLOW_FIELD_IDX_IPV6_DA:
983 case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
984 case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
985 case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
986 case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
987 case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
988 case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
989 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
991 case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
992 case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
993 case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
994 prot_id = ICE_PROT_TCP_IL;
996 case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
997 case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
998 prot_id = ICE_PROT_UDP_IL_OR_S;
1000 case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
1001 case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
1002 prot_id = ICE_PROT_SCTP_IL;
1004 case ICE_FLOW_FIELD_IDX_GTPC_TEID:
1005 case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
1006 case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
1007 case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
1008 case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
1009 case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
1010 /* GTP is accessed through UDP OF protocol */
1011 prot_id = ICE_PROT_UDP_OF;
1013 case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
1014 prot_id = ICE_PROT_PPPOE;
1016 case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1017 prot_id = ICE_PROT_UDP_IL_OR_S;
1019 case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1020 prot_id = ICE_PROT_L2TPV3;
1022 case ICE_FLOW_FIELD_IDX_ESP_SPI:
1023 prot_id = ICE_PROT_ESP_F;
1025 case ICE_FLOW_FIELD_IDX_AH_SPI:
1026 prot_id = ICE_PROT_ESP_2;
1028 case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1029 prot_id = ICE_PROT_UDP_IL_OR_S;
1031 case ICE_FLOW_FIELD_IDX_ARP_SIP:
1032 case ICE_FLOW_FIELD_IDX_ARP_DIP:
1033 case ICE_FLOW_FIELD_IDX_ARP_SHA:
1034 case ICE_FLOW_FIELD_IDX_ARP_DHA:
1035 case ICE_FLOW_FIELD_IDX_ARP_OP:
1036 prot_id = ICE_PROT_ARP_OF;
1038 case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1039 case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1040 /* ICMP type and code share the same extraction seq. entry */
1041 prot_id = (params->prof->segs[seg].hdrs &
1042 ICE_FLOW_SEG_HDR_IPV4) ?
1043 ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1044 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1045 ICE_FLOW_FIELD_IDX_ICMP_CODE :
1046 ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1048 case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1049 prot_id = ICE_PROT_GRE_OF;
1052 return ICE_ERR_NOT_IMPL;
1055 /* Each extraction sequence entry is a word in size, and extracts a
1056 * word-aligned offset from a protocol header.
1058 ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1060 flds[fld].xtrct.prot_id = prot_id;
1061 flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1062 ICE_FLOW_FV_EXTRACT_SZ;
1063 flds[fld].xtrct.disp = (u8)((ice_flds_info[fld].off + adj) % ese_bits);
1064 flds[fld].xtrct.idx = params->es_cnt;
1065 flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1067 /* Adjust the next field-entry index after accommodating the number of
1068 * entries this field consumes
1070 cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1071 ice_flds_info[fld].size, ese_bits);
1073 /* Fill in the extraction sequence entries needed for this field */
1074 off = flds[fld].xtrct.off;
1075 mask = flds[fld].xtrct.mask;
1076 for (i = 0; i < cnt; i++) {
1077 /* Only consume an extraction sequence entry if there is no
1078 * sibling field associated with this field or the sibling entry
1079 * already extracts the word shared with this field.
1081 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1082 flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1083 flds[sib].xtrct.off != off) {
1086 /* Make sure the number of extraction sequence required
1087 * does not exceed the block's capability
1089 if (params->es_cnt >= fv_words)
1090 return ICE_ERR_MAX_LIMIT;
1092 /* some blocks require a reversed field vector layout */
1093 if (hw->blk[params->blk].es.reverse)
1094 idx = fv_words - params->es_cnt - 1;
1096 idx = params->es_cnt;
1098 params->es[idx].prot_id = prot_id;
1099 params->es[idx].off = off;
1100 params->mask[idx] = mask | sib_mask;
1104 off += ICE_FLOW_FV_EXTRACT_SZ;
1111 * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1112 * @hw: pointer to the HW struct
1113 * @params: information about the flow to be processed
1114 * @seg: index of packet segment whose raw fields are to be be extracted
1116 static enum ice_status
1117 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1124 if (!params->prof->segs[seg].raws_cnt)
1127 if (params->prof->segs[seg].raws_cnt >
1128 ARRAY_SIZE(params->prof->segs[seg].raws))
1129 return ICE_ERR_MAX_LIMIT;
1131 /* Offsets within the segment headers are not supported */
1132 hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1134 return ICE_ERR_PARAM;
1136 fv_words = hw->blk[params->blk].es.fvw;
1138 for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1139 struct ice_flow_seg_fld_raw *raw;
1142 raw = ¶ms->prof->segs[seg].raws[i];
1144 /* Storing extraction information */
1145 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1146 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1147 ICE_FLOW_FV_EXTRACT_SZ;
1148 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1150 raw->info.xtrct.idx = params->es_cnt;
1152 /* Determine the number of field vector entries this raw field
1155 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1156 (raw->info.src.last * BITS_PER_BYTE),
1157 (ICE_FLOW_FV_EXTRACT_SZ *
1159 off = raw->info.xtrct.off;
1160 for (j = 0; j < cnt; j++) {
1163 /* Make sure the number of extraction sequence required
1164 * does not exceed the block's capability
1166 if (params->es_cnt >= hw->blk[params->blk].es.count ||
1167 params->es_cnt >= ICE_MAX_FV_WORDS)
1168 return ICE_ERR_MAX_LIMIT;
1170 /* some blocks require a reversed field vector layout */
1171 if (hw->blk[params->blk].es.reverse)
1172 idx = fv_words - params->es_cnt - 1;
1174 idx = params->es_cnt;
1176 params->es[idx].prot_id = raw->info.xtrct.prot_id;
1177 params->es[idx].off = off;
1179 off += ICE_FLOW_FV_EXTRACT_SZ;
1187 * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1188 * @hw: pointer to the HW struct
1189 * @params: information about the flow to be processed
1191 * This function iterates through all matched fields in the given segments, and
1192 * creates an extraction sequence for the fields.
1194 static enum ice_status
1195 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1196 struct ice_flow_prof_params *params)
1198 enum ice_status status = ICE_SUCCESS;
1201 /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1204 if (params->blk == ICE_BLK_ACL) {
1205 status = ice_flow_xtract_pkt_flags(hw, params,
1206 ICE_RX_MDID_PKT_FLAGS_15_0);
1211 for (i = 0; i < params->prof->segs_cnt; i++) {
1212 u64 match = params->prof->segs[i].match;
1213 enum ice_flow_field j;
1215 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1216 const u64 bit = BIT_ULL(j);
1219 status = ice_flow_xtract_fld(hw, params, i, j,
1227 /* Process raw matching bytes */
1228 status = ice_flow_xtract_raws(hw, params, i);
1237 * ice_flow_sel_acl_scen - returns the specific scenario
1238 * @hw: pointer to the hardware structure
1239 * @params: information about the flow to be processed
1241 * This function will return the specific scenario based on the
1242 * params passed to it
1244 static enum ice_status
1245 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1247 /* Find the best-fit scenario for the provided match width */
1248 struct ice_acl_scen *cand_scen = NULL, *scen;
1251 return ICE_ERR_DOES_NOT_EXIST;
1253 /* Loop through each scenario and match against the scenario width
1254 * to select the specific scenario
1256 LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1257 if (scen->eff_width >= params->entry_length &&
1258 (!cand_scen || cand_scen->eff_width > scen->eff_width))
1261 return ICE_ERR_DOES_NOT_EXIST;
1263 params->prof->cfg.scen = cand_scen;
1269 * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1270 * @params: information about the flow to be processed
1272 static enum ice_status
1273 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1275 u16 index, i, range_idx = 0;
1277 index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1279 for (i = 0; i < params->prof->segs_cnt; i++) {
1280 struct ice_flow_seg_info *seg = ¶ms->prof->segs[i];
1281 u64 match = seg->match;
1284 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1285 struct ice_flow_fld_info *fld;
1286 const u64 bit = BIT_ULL(j);
1291 fld = &seg->fields[j];
1292 fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1294 if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1295 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1297 /* Range checking only supported for single
1300 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1302 BITS_PER_BYTE * 2) > 1)
1303 return ICE_ERR_PARAM;
1305 /* Ranges must define low and high values */
1306 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1307 fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1308 return ICE_ERR_PARAM;
1310 fld->entry.val = range_idx++;
1312 /* Store adjusted byte-length of field for later
1313 * use, taking into account potential
1314 * non-byte-aligned displacement
1316 fld->entry.last = DIVIDE_AND_ROUND_UP
1317 (ice_flds_info[j].size +
1318 (fld->xtrct.disp % BITS_PER_BYTE),
1320 fld->entry.val = index;
1321 index += fld->entry.last;
1327 for (j = 0; j < seg->raws_cnt; j++) {
1328 struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1330 raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1331 raw->info.entry.val = index;
1332 raw->info.entry.last = raw->info.src.last;
1333 index += raw->info.entry.last;
1337 /* Currently only support using the byte selection base, which only
1338 * allows for an effective entry size of 30 bytes. Reject anything
1341 if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1342 return ICE_ERR_PARAM;
1344 /* Only 8 range checkers per profile, reject anything trying to use
1347 if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1348 return ICE_ERR_PARAM;
1350 /* Store # bytes required for entry for later use */
1351 params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1357 * ice_flow_proc_segs - process all packet segments associated with a profile
1358 * @hw: pointer to the HW struct
1359 * @params: information about the flow to be processed
1361 static enum ice_status
1362 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1364 enum ice_status status;
1366 status = ice_flow_proc_seg_hdrs(params);
1370 status = ice_flow_create_xtrct_seq(hw, params);
1374 switch (params->blk) {
1377 status = ICE_SUCCESS;
1380 status = ice_flow_acl_def_entry_frmt(params);
1383 status = ice_flow_sel_acl_scen(hw, params);
1389 return ICE_ERR_NOT_IMPL;
1395 #define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001
1396 #define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002
1397 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004
1400 * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1401 * @hw: pointer to the HW struct
1402 * @blk: classification stage
1403 * @dir: flow direction
1404 * @segs: array of one or more packet segments that describe the flow
1405 * @segs_cnt: number of packet segments provided
1406 * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1407 * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1409 static struct ice_flow_prof *
1410 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1411 enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1412 u8 segs_cnt, u16 vsi_handle, u32 conds)
1414 struct ice_flow_prof *p, *prof = NULL;
1416 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1417 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1418 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1419 segs_cnt && segs_cnt == p->segs_cnt) {
1422 /* Check for profile-VSI association if specified */
1423 if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1424 ice_is_vsi_valid(hw, vsi_handle) &&
1425 !ice_is_bit_set(p->vsis, vsi_handle))
1428 /* Protocol headers must be checked. Matched fields are
1429 * checked if specified.
1431 for (i = 0; i < segs_cnt; i++)
1432 if (segs[i].hdrs != p->segs[i].hdrs ||
1433 ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1434 segs[i].match != p->segs[i].match))
1437 /* A match is found if all segments are matched */
1438 if (i == segs_cnt) {
1443 ice_release_lock(&hw->fl_profs_locks[blk]);
1449 * ice_flow_find_prof - Look up a profile matching headers and matched fields
1450 * @hw: pointer to the HW struct
1451 * @blk: classification stage
1452 * @dir: flow direction
1453 * @segs: array of one or more packet segments that describe the flow
1454 * @segs_cnt: number of packet segments provided
1457 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1458 struct ice_flow_seg_info *segs, u8 segs_cnt)
1460 struct ice_flow_prof *p;
1462 p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1463 ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1465 return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1469 * ice_flow_find_prof_id - Look up a profile with given profile ID
1470 * @hw: pointer to the HW struct
1471 * @blk: classification stage
1472 * @prof_id: unique ID to identify this flow profile
1474 static struct ice_flow_prof *
1475 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1477 struct ice_flow_prof *p;
1479 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1480 if (p->id == prof_id)
1487 * ice_dealloc_flow_entry - Deallocate flow entry memory
1488 * @hw: pointer to the HW struct
1489 * @entry: flow entry to be removed
1492 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1498 ice_free(hw, entry->entry);
1500 if (entry->range_buf) {
1501 ice_free(hw, entry->range_buf);
1502 entry->range_buf = NULL;
1506 ice_free(hw, entry->acts);
1508 entry->acts_cnt = 0;
1511 ice_free(hw, entry);
1514 #define ICE_ACL_INVALID_SCEN 0x3f
1517 * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
1518 * @hw: pointer to the hardware structure
1519 * @prof: pointer to flow profile
1520 * @buf: destination buffer function writes partial extraction sequence to
1522 * returns ICE_SUCCESS if no PF is associated to the given profile
1523 * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
1524 * returns other error code for real error
1526 static enum ice_status
1527 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1528 struct ice_aqc_acl_prof_generic_frmt *buf)
1530 enum ice_status status;
1533 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1537 status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1541 /* If all PF's associated scenarios are all 0 or all
1542 * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1543 * not been configured yet.
1545 if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1546 buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1547 buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1548 buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1551 if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1552 buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1553 buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1554 buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1555 buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1556 buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1557 buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1558 buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1561 return ICE_ERR_IN_USE;
1565 * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
1566 * @hw: pointer to the hardware structure
1567 * @acts: array of actions to be performed on a match
1568 * @acts_cnt: number of actions
1570 static enum ice_status
1571 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1576 for (i = 0; i < acts_cnt; i++) {
1577 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1578 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1579 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1580 struct ice_acl_cntrs cntrs;
1581 enum ice_status status;
1583 cntrs.bank = 0; /* Only bank0 for the moment */
1585 LE16_TO_CPU(acts[i].data.acl_act.value);
1587 LE16_TO_CPU(acts[i].data.acl_act.value);
1589 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1590 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1592 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1594 status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1603 * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
1604 * @hw: pointer to the hardware structure
1605 * @prof: pointer to flow profile
1607 * Disassociate the scenario from the profile for the PF of the VSI.
1609 static enum ice_status
1610 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1612 struct ice_aqc_acl_prof_generic_frmt buf;
1613 enum ice_status status = ICE_SUCCESS;
1616 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1618 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1622 status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1626 /* Clear scenario for this PF */
1627 buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1628 status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1634 * ice_flow_rem_entry_sync - Remove a flow entry
1635 * @hw: pointer to the HW struct
1636 * @blk: classification stage
1637 * @entry: flow entry to be removed
1639 static enum ice_status
1640 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1641 struct ice_flow_entry *entry)
1644 return ICE_ERR_BAD_PTR;
1646 if (blk == ICE_BLK_ACL) {
1647 enum ice_status status;
1650 return ICE_ERR_BAD_PTR;
1652 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1653 entry->scen_entry_idx);
1657 /* Checks if we need to release an ACL counter. */
1658 if (entry->acts_cnt && entry->acts)
1659 ice_flow_acl_free_act_cntr(hw, entry->acts,
1663 LIST_DEL(&entry->l_entry);
1665 ice_dealloc_flow_entry(hw, entry);
1671 * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1672 * @hw: pointer to the HW struct
1673 * @blk: classification stage
1674 * @dir: flow direction
1675 * @prof_id: unique ID to identify this flow profile
1676 * @segs: array of one or more packet segments that describe the flow
1677 * @segs_cnt: number of packet segments provided
1678 * @acts: array of default actions
1679 * @acts_cnt: number of default actions
1680 * @prof: stores the returned flow profile added
1682 * Assumption: the caller has acquired the lock to the profile list
1684 static enum ice_status
1685 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1686 enum ice_flow_dir dir, u64 prof_id,
1687 struct ice_flow_seg_info *segs, u8 segs_cnt,
1688 struct ice_flow_action *acts, u8 acts_cnt,
1689 struct ice_flow_prof **prof)
1691 struct ice_flow_prof_params params;
1692 enum ice_status status;
1695 if (!prof || (acts_cnt && !acts))
1696 return ICE_ERR_BAD_PTR;
1698 ice_memset(¶ms, 0, sizeof(params), ICE_NONDMA_MEM);
1699 params.prof = (struct ice_flow_prof *)
1700 ice_malloc(hw, sizeof(*params.prof));
1702 return ICE_ERR_NO_MEMORY;
1704 /* initialize extraction sequence to all invalid (0xff) */
1705 for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1706 params.es[i].prot_id = ICE_PROT_INVALID;
1707 params.es[i].off = ICE_FV_OFFSET_INVAL;
1711 params.prof->id = prof_id;
1712 params.prof->dir = dir;
1713 params.prof->segs_cnt = segs_cnt;
1715 /* Make a copy of the segments that need to be persistent in the flow
1718 for (i = 0; i < segs_cnt; i++)
1719 ice_memcpy(¶ms.prof->segs[i], &segs[i], sizeof(*segs),
1720 ICE_NONDMA_TO_NONDMA);
1722 /* Make a copy of the actions that need to be persistent in the flow
1726 params.prof->acts = (struct ice_flow_action *)
1727 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
1728 ICE_NONDMA_TO_NONDMA);
1730 if (!params.prof->acts) {
1731 status = ICE_ERR_NO_MEMORY;
1736 status = ice_flow_proc_segs(hw, ¶ms);
1738 ice_debug(hw, ICE_DBG_FLOW,
1739 "Error processing a flow's packet segments\n");
1743 /* Add a HW profile for this flow profile */
1744 status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes,
1745 params.attr, params.attr_cnt, params.es,
1748 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
1752 INIT_LIST_HEAD(¶ms.prof->entries);
1753 ice_init_lock(¶ms.prof->entries_lock);
1754 *prof = params.prof;
1758 if (params.prof->acts)
1759 ice_free(hw, params.prof->acts);
1760 ice_free(hw, params.prof);
1767 * ice_flow_rem_prof_sync - remove a flow profile
1768 * @hw: pointer to the hardware structure
1769 * @blk: classification stage
1770 * @prof: pointer to flow profile to remove
1772 * Assumption: the caller has acquired the lock to the profile list
1774 static enum ice_status
1775 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
1776 struct ice_flow_prof *prof)
1778 enum ice_status status;
1780 /* Remove all remaining flow entries before removing the flow profile */
1781 if (!LIST_EMPTY(&prof->entries)) {
1782 struct ice_flow_entry *e, *t;
1784 ice_acquire_lock(&prof->entries_lock);
1786 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
1788 status = ice_flow_rem_entry_sync(hw, blk, e);
1793 ice_release_lock(&prof->entries_lock);
1796 if (blk == ICE_BLK_ACL) {
1797 struct ice_aqc_acl_profile_ranges query_rng_buf;
1798 struct ice_aqc_acl_prof_generic_frmt buf;
1801 /* Disassociate the scenario from the profile for the PF */
1802 status = ice_flow_acl_disassoc_scen(hw, prof);
1806 /* Clear the range-checker if the profile ID is no longer
1809 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1810 if (status && status != ICE_ERR_IN_USE) {
1812 } else if (!status) {
1813 /* Clear the range-checker value for profile ID */
1814 ice_memset(&query_rng_buf, 0,
1815 sizeof(struct ice_aqc_acl_profile_ranges),
1818 status = ice_flow_get_hw_prof(hw, blk, prof->id,
1823 status = ice_prog_acl_prof_ranges(hw, prof_id,
1824 &query_rng_buf, NULL);
1830 /* Remove all hardware profiles associated with this flow profile */
1831 status = ice_rem_prof(hw, blk, prof->id);
1833 LIST_DEL(&prof->l_entry);
1834 ice_destroy_lock(&prof->entries_lock);
1836 ice_free(hw, prof->acts);
1844 * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
1845 * @buf: Destination buffer function writes partial xtrct sequence to
1846 * @info: Info about field
1849 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
1850 struct ice_flow_fld_info *info)
1855 src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
1856 info->xtrct.disp / BITS_PER_BYTE;
1857 dst = info->entry.val;
1858 for (i = 0; i < info->entry.last; i++)
1859 /* HW stores field vector words in LE, convert words back to BE
1860 * so constructed entries will end up in network order
1862 buf->byte_selection[dst++] = src++ ^ 1;
1866 * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
1867 * @hw: pointer to the hardware structure
1868 * @prof: pointer to flow profile
1870 static enum ice_status
1871 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
1873 struct ice_aqc_acl_prof_generic_frmt buf;
1874 struct ice_flow_fld_info *info;
1875 enum ice_status status;
1879 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1881 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1885 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1886 if (status && status != ICE_ERR_IN_USE)
1890 /* Program the profile dependent configuration. This is done
1891 * only once regardless of the number of PFs using that profile
1893 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1895 for (i = 0; i < prof->segs_cnt; i++) {
1896 struct ice_flow_seg_info *seg = &prof->segs[i];
1897 u64 match = seg->match;
1900 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1901 const u64 bit = BIT_ULL(j);
1906 info = &seg->fields[j];
1908 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
1909 buf.word_selection[info->entry.val] =
1912 ice_flow_acl_set_xtrct_seq_fld(&buf,
1918 for (j = 0; j < seg->raws_cnt; j++) {
1919 info = &seg->raws[j].info;
1920 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
1924 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
1925 ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
1929 /* Update the current PF */
1930 buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
1931 status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1937 * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
1938 * @hw: pointer to the hardware structure
1939 * @blk: classification stage
1940 * @vsi_handle: software VSI handle
1941 * @vsig: target VSI group
1943 * Assumption: the caller has already verified that the VSI to
1944 * be added has the same characteristics as the VSIG and will
1945 * thereby have access to all resources added to that VSIG.
1948 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
1951 enum ice_status status;
1953 if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
1954 return ICE_ERR_PARAM;
1956 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1957 status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
1959 ice_release_lock(&hw->fl_profs_locks[blk]);
1965 * ice_flow_assoc_prof - associate a VSI with a flow profile
1966 * @hw: pointer to the hardware structure
1967 * @blk: classification stage
1968 * @prof: pointer to flow profile
1969 * @vsi_handle: software VSI handle
1971 * Assumption: the caller has acquired the lock to the profile list
1972 * and the software VSI handle has been validated
1974 static enum ice_status
1975 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
1976 struct ice_flow_prof *prof, u16 vsi_handle)
1978 enum ice_status status = ICE_SUCCESS;
1980 if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
1981 if (blk == ICE_BLK_ACL) {
1982 status = ice_flow_acl_set_xtrct_seq(hw, prof);
1986 status = ice_add_prof_id_flow(hw, blk,
1987 ice_get_hw_vsi_num(hw,
1991 ice_set_bit(vsi_handle, prof->vsis);
1993 ice_debug(hw, ICE_DBG_FLOW,
1994 "HW profile add failed, %d\n",
2002 * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
2003 * @hw: pointer to the hardware structure
2004 * @blk: classification stage
2005 * @prof: pointer to flow profile
2006 * @vsi_handle: software VSI handle
2008 * Assumption: the caller has acquired the lock to the profile list
2009 * and the software VSI handle has been validated
2011 static enum ice_status
2012 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
2013 struct ice_flow_prof *prof, u16 vsi_handle)
2015 enum ice_status status = ICE_SUCCESS;
2017 if (ice_is_bit_set(prof->vsis, vsi_handle)) {
2018 status = ice_rem_prof_id_flow(hw, blk,
2019 ice_get_hw_vsi_num(hw,
2023 ice_clear_bit(vsi_handle, prof->vsis);
2025 ice_debug(hw, ICE_DBG_FLOW,
2026 "HW profile remove failed, %d\n",
2034 * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2035 * @hw: pointer to the HW struct
2036 * @blk: classification stage
2037 * @dir: flow direction
2038 * @prof_id: unique ID to identify this flow profile
2039 * @segs: array of one or more packet segments that describe the flow
2040 * @segs_cnt: number of packet segments provided
2041 * @acts: array of default actions
2042 * @acts_cnt: number of default actions
2043 * @prof: stores the returned flow profile added
2046 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2047 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2048 struct ice_flow_action *acts, u8 acts_cnt,
2049 struct ice_flow_prof **prof)
2051 enum ice_status status;
2053 if (segs_cnt > ICE_FLOW_SEG_MAX)
2054 return ICE_ERR_MAX_LIMIT;
2057 return ICE_ERR_PARAM;
2060 return ICE_ERR_BAD_PTR;
2062 status = ice_flow_val_hdrs(segs, segs_cnt);
2066 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2068 status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2069 acts, acts_cnt, prof);
2071 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2073 ice_release_lock(&hw->fl_profs_locks[blk]);
2079 * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2080 * @hw: pointer to the HW struct
2081 * @blk: the block for which the flow profile is to be removed
2082 * @prof_id: unique ID of the flow profile to be removed
2085 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2087 struct ice_flow_prof *prof;
2088 enum ice_status status;
2090 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2092 prof = ice_flow_find_prof_id(hw, blk, prof_id);
2094 status = ICE_ERR_DOES_NOT_EXIST;
2098 /* prof becomes invalid after the call */
2099 status = ice_flow_rem_prof_sync(hw, blk, prof);
2102 ice_release_lock(&hw->fl_profs_locks[blk]);
2108 * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
2109 * @hw: pointer to the HW struct
2110 * @blk: classification stage
2111 * @prof_id: the profile ID handle
2112 * @hw_prof_id: pointer to variable to receive the HW profile ID
2115 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2118 struct ice_prof_map *map;
2120 map = ice_search_prof_id(hw, blk, prof_id);
2122 *hw_prof_id = map->prof_id;
2126 return ICE_ERR_DOES_NOT_EXIST;
2130 * ice_flow_find_entry - look for a flow entry using its unique ID
2131 * @hw: pointer to the HW struct
2132 * @blk: classification stage
2133 * @entry_id: unique ID to identify this flow entry
2135 * This function looks for the flow entry with the specified unique ID in all
2136 * flow profiles of the specified classification stage. If the entry is found,
2137 * and it returns the handle to the flow entry. Otherwise, it returns
2138 * ICE_FLOW_ENTRY_ID_INVAL.
2140 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2142 struct ice_flow_entry *found = NULL;
2143 struct ice_flow_prof *p;
2145 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2147 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2148 struct ice_flow_entry *e;
2150 ice_acquire_lock(&p->entries_lock);
2151 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2152 if (e->id == entry_id) {
2156 ice_release_lock(&p->entries_lock);
2162 ice_release_lock(&hw->fl_profs_locks[blk]);
2164 return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2168 * ice_flow_acl_check_actions - Checks the ACL rule's actions
2169 * @hw: pointer to the hardware structure
2170 * @acts: array of actions to be performed on a match
2171 * @acts_cnt: number of actions
2172 * @cnt_alloc: indicates if an ACL counter has been allocated.
2174 static enum ice_status
2175 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2176 u8 acts_cnt, bool *cnt_alloc)
2178 ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2181 ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2184 if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2185 return ICE_ERR_OUT_OF_RANGE;
2187 for (i = 0; i < acts_cnt; i++) {
2188 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2189 acts[i].type != ICE_FLOW_ACT_DROP &&
2190 acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2191 acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2194 /* If the caller want to add two actions of the same type, then
2195 * it is considered invalid configuration.
2197 if (ice_test_and_set_bit(acts[i].type, dup_check))
2198 return ICE_ERR_PARAM;
2201 /* Checks if ACL counters are needed. */
2202 for (i = 0; i < acts_cnt; i++) {
2203 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2204 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2205 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2206 struct ice_acl_cntrs cntrs;
2207 enum ice_status status;
2210 cntrs.bank = 0; /* Only bank0 for the moment */
2212 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2213 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2215 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2217 status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2220 /* Counter index within the bank */
2221 acts[i].data.acl_act.value =
2222 CPU_TO_LE16(cntrs.first_cntr);
2231 * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2232 * @fld: number of the given field
2233 * @info: info about field
2234 * @range_buf: range checker configuration buffer
2235 * @data: pointer to a data buffer containing flow entry's match values/masks
2236 * @range: Input/output param indicating which range checkers are being used
2239 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2240 struct ice_aqc_acl_profile_ranges *range_buf,
2241 u8 *data, u8 *range)
2245 /* If not specified, default mask is all bits in field */
2246 new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2247 BIT(ice_flds_info[fld].size) - 1 :
2248 (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2250 /* If the mask is 0, then we don't need to worry about this input
2251 * range checker value.
2255 (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2257 (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2258 u8 range_idx = info->entry.val;
2260 range_buf->checker_cfg[range_idx].low_boundary =
2261 CPU_TO_BE16(new_low);
2262 range_buf->checker_cfg[range_idx].high_boundary =
2263 CPU_TO_BE16(new_high);
2264 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2266 /* Indicate which range checker is being used */
2267 *range |= BIT(range_idx);
2272 * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2273 * @fld: number of the given field
2274 * @info: info about the field
2275 * @buf: buffer containing the entry
2276 * @dontcare: buffer containing don't care mask for entry
2277 * @data: pointer to a data buffer containing flow entry's match values/masks
2280 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2281 u8 *dontcare, u8 *data)
2283 u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2284 bool use_mask = false;
2287 src = info->src.val;
2288 mask = info->src.mask;
2289 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2290 disp = info->xtrct.disp % BITS_PER_BYTE;
2292 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2295 for (k = 0; k < info->entry.last; k++, dst++) {
2296 /* Add overflow bits from previous byte */
2297 buf[dst] = (tmp_s & 0xff00) >> 8;
2299 /* If mask is not valid, tmp_m is always zero, so just setting
2300 * dontcare to 0 (no masked bits). If mask is valid, pulls in
2301 * overflow bits of mask from prev byte
2303 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2305 /* If there is displacement, last byte will only contain
2306 * displaced data, but there is no more data to read from user
2307 * buffer, so skip so as not to potentially read beyond end of
2310 if (!disp || k < info->entry.last - 1) {
2311 /* Store shifted data to use in next byte */
2312 tmp_s = data[src++] << disp;
2314 /* Add current (shifted) byte */
2315 buf[dst] |= tmp_s & 0xff;
2317 /* Handle mask if valid */
2319 tmp_m = (~data[mask++] & 0xff) << disp;
2320 dontcare[dst] |= tmp_m & 0xff;
2325 /* Fill in don't care bits at beginning of field */
2327 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2328 for (k = 0; k < disp; k++)
2329 dontcare[dst] |= BIT(k);
2332 end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2334 /* Fill in don't care bits at end of field */
2336 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2337 info->entry.last - 1;
2338 for (k = end_disp; k < BITS_PER_BYTE; k++)
2339 dontcare[dst] |= BIT(k);
2344 * ice_flow_acl_frmt_entry - Format ACL entry
2345 * @hw: pointer to the hardware structure
2346 * @prof: pointer to flow profile
2347 * @e: pointer to the flow entry
2348 * @data: pointer to a data buffer containing flow entry's match values/masks
2349 * @acts: array of actions to be performed on a match
2350 * @acts_cnt: number of actions
2352 * Formats the key (and key_inverse) to be matched from the data passed in,
2353 * along with data from the flow profile. This key/key_inverse pair makes up
2354 * the 'entry' for an ACL flow entry.
2356 static enum ice_status
2357 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2358 struct ice_flow_entry *e, u8 *data,
2359 struct ice_flow_action *acts, u8 acts_cnt)
2361 u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2362 struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2363 enum ice_status status;
2368 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2372 /* Format the result action */
2374 status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2378 status = ICE_ERR_NO_MEMORY;
2380 e->acts = (struct ice_flow_action *)
2381 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2382 ICE_NONDMA_TO_NONDMA);
2387 e->acts_cnt = acts_cnt;
2389 /* Format the matching data */
2390 buf_sz = prof->cfg.scen->width;
2391 buf = (u8 *)ice_malloc(hw, buf_sz);
2395 dontcare = (u8 *)ice_malloc(hw, buf_sz);
2399 /* 'key' buffer will store both key and key_inverse, so must be twice
2402 key = (u8 *)ice_malloc(hw, buf_sz * 2);
2406 range_buf = (struct ice_aqc_acl_profile_ranges *)
2407 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2411 /* Set don't care mask to all 1's to start, will zero out used bytes */
2412 ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2414 for (i = 0; i < prof->segs_cnt; i++) {
2415 struct ice_flow_seg_info *seg = &prof->segs[i];
2416 u64 match = seg->match;
2419 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
2420 struct ice_flow_fld_info *info;
2421 const u64 bit = BIT_ULL(j);
2426 info = &seg->fields[j];
2428 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2429 ice_flow_acl_frmt_entry_range(j, info,
2433 ice_flow_acl_frmt_entry_fld(j, info, buf,
2439 for (j = 0; j < seg->raws_cnt; j++) {
2440 struct ice_flow_fld_info *info = &seg->raws[j].info;
2441 u16 dst, src, mask, k;
2442 bool use_mask = false;
2444 src = info->src.val;
2445 dst = info->entry.val -
2446 ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2447 mask = info->src.mask;
2449 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2452 for (k = 0; k < info->entry.last; k++, dst++) {
2453 buf[dst] = data[src++];
2455 dontcare[dst] = ~data[mask++];
2462 buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2463 dontcare[prof->cfg.scen->pid_idx] = 0;
2465 /* Format the buffer for direction flags */
2466 dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2468 if (prof->dir == ICE_FLOW_RX)
2469 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2472 buf[prof->cfg.scen->rng_chk_idx] = range;
2473 /* Mark any unused range checkers as don't care */
2474 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2475 e->range_buf = range_buf;
2477 ice_free(hw, range_buf);
2480 status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2486 e->entry_sz = buf_sz * 2;
2493 ice_free(hw, dontcare);
2498 if (status && range_buf) {
2499 ice_free(hw, range_buf);
2500 e->range_buf = NULL;
2503 if (status && e->acts) {
2504 ice_free(hw, e->acts);
2509 if (status && cnt_alloc)
2510 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2516 * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2517 * the compared data.
2518 * @prof: pointer to flow profile
2519 * @e: pointer to the comparing flow entry
2520 * @do_chg_action: decide if we want to change the ACL action
2521 * @do_add_entry: decide if we want to add the new ACL entry
2522 * @do_rem_entry: decide if we want to remove the current ACL entry
2524 * Find an ACL scenario entry that matches the compared data. In the same time,
2525 * this function also figure out:
2526 * a/ If we want to change the ACL action
2527 * b/ If we want to add the new ACL entry
2528 * c/ If we want to remove the current ACL entry
2530 static struct ice_flow_entry *
2531 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2532 struct ice_flow_entry *e, bool *do_chg_action,
2533 bool *do_add_entry, bool *do_rem_entry)
2535 struct ice_flow_entry *p, *return_entry = NULL;
2539 * a/ There exists an entry with same matching data, but different
2540 * priority, then we remove this existing ACL entry. Then, we
2541 * will add the new entry to the ACL scenario.
2542 * b/ There exists an entry with same matching data, priority, and
2543 * result action, then we do nothing
2544 * c/ There exists an entry with same matching data, priority, but
2545 * different, action, then do only change the action's entry.
2546 * d/ Else, we add this new entry to the ACL scenario.
2548 *do_chg_action = false;
2549 *do_add_entry = true;
2550 *do_rem_entry = false;
2551 LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2552 if (memcmp(p->entry, e->entry, p->entry_sz))
2555 /* From this point, we have the same matching_data. */
2556 *do_add_entry = false;
2559 if (p->priority != e->priority) {
2560 /* matching data && !priority */
2561 *do_add_entry = true;
2562 *do_rem_entry = true;
2566 /* From this point, we will have matching_data && priority */
2567 if (p->acts_cnt != e->acts_cnt)
2568 *do_chg_action = true;
2569 for (i = 0; i < p->acts_cnt; i++) {
2570 bool found_not_match = false;
2572 for (j = 0; j < e->acts_cnt; j++)
2573 if (memcmp(&p->acts[i], &e->acts[j],
2574 sizeof(struct ice_flow_action))) {
2575 found_not_match = true;
2579 if (found_not_match) {
2580 *do_chg_action = true;
2585 /* (do_chg_action = true) means :
2586 * matching_data && priority && !result_action
2587 * (do_chg_action = false) means :
2588 * matching_data && priority && result_action
2593 return return_entry;
2597 * ice_flow_acl_convert_to_acl_prior - Convert to ACL priority
2600 static enum ice_acl_entry_prior
2601 ice_flow_acl_convert_to_acl_prior(enum ice_flow_priority p)
2603 enum ice_acl_entry_prior acl_prior;
2606 case ICE_FLOW_PRIO_LOW:
2607 acl_prior = ICE_LOW;
2609 case ICE_FLOW_PRIO_NORMAL:
2610 acl_prior = ICE_NORMAL;
2612 case ICE_FLOW_PRIO_HIGH:
2613 acl_prior = ICE_HIGH;
2616 acl_prior = ICE_NORMAL;
2624 * ice_flow_acl_union_rng_chk - Perform union operation between two
2625 * range-range checker buffers
2626 * @dst_buf: pointer to destination range checker buffer
2627 * @src_buf: pointer to source range checker buffer
2629 * For this function, we do the union between dst_buf and src_buf
2630 * range checker buffer, and we will save the result back to dst_buf
2632 static enum ice_status
2633 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2634 struct ice_aqc_acl_profile_ranges *src_buf)
2638 if (!dst_buf || !src_buf)
2639 return ICE_ERR_BAD_PTR;
2641 for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2642 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2643 bool will_populate = false;
2645 in_data = &src_buf->checker_cfg[i];
2650 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2651 cfg_data = &dst_buf->checker_cfg[j];
2653 if (!cfg_data->mask ||
2654 !memcmp(cfg_data, in_data,
2655 sizeof(struct ice_acl_rng_data))) {
2656 will_populate = true;
2661 if (will_populate) {
2662 ice_memcpy(cfg_data, in_data,
2663 sizeof(struct ice_acl_rng_data),
2664 ICE_NONDMA_TO_NONDMA);
2666 /* No available slot left to program range checker */
2667 return ICE_ERR_MAX_LIMIT;
2675 * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2676 * @hw: pointer to the hardware structure
2677 * @prof: pointer to flow profile
2678 * @entry: double pointer to the flow entry
2680 * For this function, we will look at the current added entries in the
2681 * corresponding ACL scenario. Then, we will perform matching logic to
2682 * see if we want to add/modify/do nothing with this new entry.
2684 static enum ice_status
2685 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2686 struct ice_flow_entry **entry)
2688 bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2689 struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2690 struct ice_acl_act_entry *acts = NULL;
2691 struct ice_flow_entry *exist;
2692 enum ice_status status = ICE_SUCCESS;
2693 struct ice_flow_entry *e;
2696 if (!entry || !(*entry) || !prof)
2697 return ICE_ERR_BAD_PTR;
2701 do_chg_rng_chk = false;
2705 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2710 /* Query the current range-checker value in FW */
2711 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2715 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2716 sizeof(struct ice_aqc_acl_profile_ranges),
2717 ICE_NONDMA_TO_NONDMA);
2719 /* Generate the new range-checker value */
2720 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2724 /* Reconfigure the range check if the buffer is changed. */
2725 do_chg_rng_chk = false;
2726 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2727 sizeof(struct ice_aqc_acl_profile_ranges))) {
2728 status = ice_prog_acl_prof_ranges(hw, prof_id,
2729 &cfg_rng_buf, NULL);
2733 do_chg_rng_chk = true;
2737 /* Figure out if we want to (change the ACL action) and/or
2738 * (Add the new ACL entry) and/or (Remove the current ACL entry)
2740 exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
2741 &do_add_entry, &do_rem_entry);
2744 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
2749 /* Prepare the result action buffer */
2750 acts = (struct ice_acl_act_entry *)ice_calloc
2751 (hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
2752 for (i = 0; i < e->acts_cnt; i++)
2753 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
2754 sizeof(struct ice_acl_act_entry),
2755 ICE_NONDMA_TO_NONDMA);
2758 enum ice_acl_entry_prior prior;
2762 keys = (u8 *)e->entry;
2763 inverts = keys + (e->entry_sz / 2);
2764 prior = ice_flow_acl_convert_to_acl_prior(e->priority);
2766 status = ice_acl_add_entry(hw, prof->cfg.scen, prior, keys,
2767 inverts, acts, e->acts_cnt,
2772 e->scen_entry_idx = entry_idx;
2773 LIST_ADD(&e->l_entry, &prof->entries);
2775 if (do_chg_action) {
2776 /* For the action memory info, update the SW's copy of
2777 * exist entry with e's action memory info
2779 ice_free(hw, exist->acts);
2780 exist->acts_cnt = e->acts_cnt;
2781 exist->acts = (struct ice_flow_action *)
2782 ice_calloc(hw, exist->acts_cnt,
2783 sizeof(struct ice_flow_action));
2786 status = ICE_ERR_NO_MEMORY;
2790 ice_memcpy(exist->acts, e->acts,
2791 sizeof(struct ice_flow_action) * e->acts_cnt,
2792 ICE_NONDMA_TO_NONDMA);
2794 status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
2796 exist->scen_entry_idx);
2801 if (do_chg_rng_chk) {
2802 /* In this case, we want to update the range checker
2803 * information of the exist entry
2805 status = ice_flow_acl_union_rng_chk(exist->range_buf,
2811 /* As we don't add the new entry to our SW DB, deallocate its
2812 * memories, and return the exist entry to the caller
2814 ice_dealloc_flow_entry(hw, e);
2825 * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
2826 * @hw: pointer to the hardware structure
2827 * @prof: pointer to flow profile
2828 * @e: double pointer to the flow entry
2830 static enum ice_status
2831 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2832 struct ice_flow_entry **e)
2834 enum ice_status status;
2836 ice_acquire_lock(&prof->entries_lock);
2837 status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
2838 ice_release_lock(&prof->entries_lock);
2844 * ice_flow_add_entry - Add a flow entry
2845 * @hw: pointer to the HW struct
2846 * @blk: classification stage
2847 * @prof_id: ID of the profile to add a new flow entry to
2848 * @entry_id: unique ID to identify this flow entry
2849 * @vsi_handle: software VSI handle for the flow entry
2850 * @prio: priority of the flow entry
2851 * @data: pointer to a data buffer containing flow entry's match values/masks
2852 * @acts: arrays of actions to be performed on a match
2853 * @acts_cnt: number of actions
2854 * @entry_h: pointer to buffer that receives the new flow entry's handle
2857 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2858 u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
2859 void *data, struct ice_flow_action *acts, u8 acts_cnt,
2862 struct ice_flow_entry *e = NULL;
2863 struct ice_flow_prof *prof;
2864 enum ice_status status = ICE_SUCCESS;
2866 /* ACL entries must indicate an action */
2867 if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
2868 return ICE_ERR_PARAM;
2870 /* No flow entry data is expected for RSS */
2871 if (!entry_h || (!data && blk != ICE_BLK_RSS))
2872 return ICE_ERR_BAD_PTR;
2874 if (!ice_is_vsi_valid(hw, vsi_handle))
2875 return ICE_ERR_PARAM;
2877 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2879 prof = ice_flow_find_prof_id(hw, blk, prof_id);
2881 status = ICE_ERR_DOES_NOT_EXIST;
2883 /* Allocate memory for the entry being added and associate
2884 * the VSI to the found flow profile
2886 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
2888 status = ICE_ERR_NO_MEMORY;
2890 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
2893 ice_release_lock(&hw->fl_profs_locks[blk]);
2898 e->vsi_handle = vsi_handle;
2907 /* ACL will handle the entry management */
2908 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
2913 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
2921 status = ICE_ERR_NOT_IMPL;
2925 if (blk != ICE_BLK_ACL) {
2926 /* ACL will handle the entry management */
2927 ice_acquire_lock(&prof->entries_lock);
2928 LIST_ADD(&e->l_entry, &prof->entries);
2929 ice_release_lock(&prof->entries_lock);
2932 *entry_h = ICE_FLOW_ENTRY_HNDL(e);
2937 ice_free(hw, e->entry);
2945 * ice_flow_rem_entry - Remove a flow entry
2946 * @hw: pointer to the HW struct
2947 * @blk: classification stage
2948 * @entry_h: handle to the flow entry to be removed
2950 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
2953 struct ice_flow_entry *entry;
2954 struct ice_flow_prof *prof;
2955 enum ice_status status = ICE_SUCCESS;
2957 if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
2958 return ICE_ERR_PARAM;
2960 entry = ICE_FLOW_ENTRY_PTR((unsigned long)entry_h);
2962 /* Retain the pointer to the flow profile as the entry will be freed */
2966 ice_acquire_lock(&prof->entries_lock);
2967 status = ice_flow_rem_entry_sync(hw, blk, entry);
2968 ice_release_lock(&prof->entries_lock);
2975 * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
2976 * @seg: packet segment the field being set belongs to
2977 * @fld: field to be set
2978 * @field_type: type of the field
2979 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
2980 * entry's input buffer
2981 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
2983 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
2984 * entry's input buffer
2986 * This helper function stores information of a field being matched, including
2987 * the type of the field and the locations of the value to match, the mask, and
2988 * and the upper-bound value in the start of the input buffer for a flow entry.
2989 * This function should only be used for fixed-size data structures.
2991 * This function also opportunistically determines the protocol headers to be
2992 * present based on the fields being set. Some fields cannot be used alone to
2993 * determine the protocol headers present. Sometimes, fields for particular
2994 * protocol headers are not matched. In those cases, the protocol headers
2995 * must be explicitly set.
2998 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
2999 enum ice_flow_fld_match_type field_type, u16 val_loc,
3000 u16 mask_loc, u16 last_loc)
3002 u64 bit = BIT_ULL(fld);
3005 if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
3008 seg->fields[fld].type = field_type;
3009 seg->fields[fld].src.val = val_loc;
3010 seg->fields[fld].src.mask = mask_loc;
3011 seg->fields[fld].src.last = last_loc;
3013 ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
3017 * ice_flow_set_fld - specifies locations of field from entry's input buffer
3018 * @seg: packet segment the field being set belongs to
3019 * @fld: field to be set
3020 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3021 * entry's input buffer
3022 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3024 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3025 * entry's input buffer
3026 * @range: indicate if field being matched is to be in a range
3028 * This function specifies the locations, in the form of byte offsets from the
3029 * start of the input buffer for a flow entry, from where the value to match,
3030 * the mask value, and upper value can be extracted. These locations are then
3031 * stored in the flow profile. When adding a flow entry associated with the
3032 * flow profile, these locations will be used to quickly extract the values and
3033 * create the content of a match entry. This function should only be used for
3034 * fixed-size data structures.
3037 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3038 u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3040 enum ice_flow_fld_match_type t = range ?
3041 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3043 ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3047 * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3048 * @seg: packet segment the field being set belongs to
3049 * @fld: field to be set
3050 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3051 * entry's input buffer
3052 * @pref_loc: location of prefix value from entry's input buffer
3053 * @pref_sz: size of the location holding the prefix value
3055 * This function specifies the locations, in the form of byte offsets from the
3056 * start of the input buffer for a flow entry, from where the value to match
3057 * and the IPv4 prefix value can be extracted. These locations are then stored
3058 * in the flow profile. When adding flow entries to the associated flow profile,
3059 * these locations can be used to quickly extract the values to create the
3060 * content of a match entry. This function should only be used for fixed-size
3064 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3065 u16 val_loc, u16 pref_loc, u8 pref_sz)
3067 /* For this type of field, the "mask" location is for the prefix value's
3068 * location and the "last" location is for the size of the location of
3071 ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3072 pref_loc, (u16)pref_sz);
3076 * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3077 * @seg: packet segment the field being set belongs to
3078 * @off: offset of the raw field from the beginning of the segment in bytes
3079 * @len: length of the raw pattern to be matched
3080 * @val_loc: location of the value to match from entry's input buffer
3081 * @mask_loc: location of mask value from entry's input buffer
3083 * This function specifies the offset of the raw field to be match from the
3084 * beginning of the specified packet segment, and the locations, in the form of
3085 * byte offsets from the start of the input buffer for a flow entry, from where
3086 * the value to match and the mask value to be extracted. These locations are
3087 * then stored in the flow profile. When adding flow entries to the associated
3088 * flow profile, these locations can be used to quickly extract the values to
3089 * create the content of a match entry. This function should only be used for
3090 * fixed-size data structures.
3093 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3094 u16 val_loc, u16 mask_loc)
3096 if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3097 seg->raws[seg->raws_cnt].off = off;
3098 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3099 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3100 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3101 /* The "last" field is used to store the length of the field */
3102 seg->raws[seg->raws_cnt].info.src.last = len;
3105 /* Overflows of "raws" will be handled as an error condition later in
3106 * the flow when this information is processed.
3111 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3112 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
3114 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3115 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3117 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3118 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
3119 ICE_FLOW_SEG_HDR_SCTP)
3121 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3122 (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3123 ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3124 ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3127 * ice_flow_set_rss_seg_info - setup packet segments for RSS
3128 * @segs: pointer to the flow field segment(s)
3129 * @hash_fields: fields to be hashed on for the segment(s)
3130 * @flow_hdr: protocol header fields within a packet segment
3132 * Helper function to extract fields from hash bitmap and use flow
3133 * header value to set flow field segment for further use in flow
3134 * profile entry or removal.
3136 static enum ice_status
3137 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
3140 u64 val = hash_fields;
3143 for (i = 0; val && i < ICE_FLOW_FIELD_IDX_MAX; i++) {
3144 u64 bit = BIT_ULL(i);
3147 ice_flow_set_fld(segs, (enum ice_flow_field)i,
3148 ICE_FLOW_FLD_OFF_INVAL,
3149 ICE_FLOW_FLD_OFF_INVAL,
3150 ICE_FLOW_FLD_OFF_INVAL, false);
3154 ICE_FLOW_SET_HDRS(segs, flow_hdr);
3156 if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3157 ~ICE_FLOW_RSS_HDRS_INNER_MASK)
3158 return ICE_ERR_PARAM;
3160 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3161 if (val && !ice_is_pow2(val))
3164 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3165 if (val && !ice_is_pow2(val))
3172 * ice_rem_vsi_rss_list - remove VSI from RSS list
3173 * @hw: pointer to the hardware structure
3174 * @vsi_handle: software VSI handle
3176 * Remove the VSI from all RSS configurations in the list.
3178 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3180 struct ice_rss_cfg *r, *tmp;
3182 if (LIST_EMPTY(&hw->rss_list_head))
3185 ice_acquire_lock(&hw->rss_locks);
3186 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3187 ice_rss_cfg, l_entry)
3188 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3189 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3190 LIST_DEL(&r->l_entry);
3193 ice_release_lock(&hw->rss_locks);
3197 * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3198 * @hw: pointer to the hardware structure
3199 * @vsi_handle: software VSI handle
3201 * This function will iterate through all flow profiles and disassociate
3202 * the VSI from that profile. If the flow profile has no VSIs it will
3205 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3207 const enum ice_block blk = ICE_BLK_RSS;
3208 struct ice_flow_prof *p, *t;
3209 enum ice_status status = ICE_SUCCESS;
3211 if (!ice_is_vsi_valid(hw, vsi_handle))
3212 return ICE_ERR_PARAM;
3214 if (LIST_EMPTY(&hw->fl_profs[blk]))
3217 ice_acquire_lock(&hw->fl_profs_locks[blk]);
3218 LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3220 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3221 status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3225 if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3226 status = ice_flow_rem_prof_sync(hw, blk, p);
3231 ice_release_lock(&hw->fl_profs_locks[blk]);
3237 * ice_rem_rss_list - remove RSS configuration from list
3238 * @hw: pointer to the hardware structure
3239 * @vsi_handle: software VSI handle
3240 * @prof: pointer to flow profile
3242 * Assumption: lock has already been acquired for RSS list
3245 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3247 struct ice_rss_cfg *r, *tmp;
3249 /* Search for RSS hash fields associated to the VSI that match the
3250 * hash configurations associated to the flow profile. If found
3251 * remove from the RSS entry list of the VSI context and delete entry.
3253 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3254 ice_rss_cfg, l_entry)
3255 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3256 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3257 ice_clear_bit(vsi_handle, r->vsis);
3258 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3259 LIST_DEL(&r->l_entry);
3267 * ice_add_rss_list - add RSS configuration to list
3268 * @hw: pointer to the hardware structure
3269 * @vsi_handle: software VSI handle
3270 * @prof: pointer to flow profile
3272 * Assumption: lock has already been acquired for RSS list
3274 static enum ice_status
3275 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3277 struct ice_rss_cfg *r, *rss_cfg;
3279 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3280 ice_rss_cfg, l_entry)
3281 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3282 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3283 ice_set_bit(vsi_handle, r->vsis);
3287 rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3289 return ICE_ERR_NO_MEMORY;
3291 rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
3292 rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
3293 rss_cfg->symm = prof->cfg.symm;
3294 ice_set_bit(vsi_handle, rss_cfg->vsis);
3296 LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3301 #define ICE_FLOW_PROF_HASH_S 0
3302 #define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3303 #define ICE_FLOW_PROF_HDR_S 32
3304 #define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3305 #define ICE_FLOW_PROF_ENCAP_S 63
3306 #define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
3308 #define ICE_RSS_OUTER_HEADERS 1
3309 #define ICE_RSS_INNER_HEADERS 2
3311 /* Flow profile ID format:
3312 * [0:31] - Packet match fields
3313 * [32:62] - Protocol header
3314 * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
3316 #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
3317 (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3318 (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3319 ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
3322 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3324 u32 s = ((src % 4) << 3); /* byte shift */
3325 u32 v = dst | 0x80; /* value to program */
3326 u8 i = src / 4; /* register index */
3329 reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3330 reg = (reg & ~(0xff << s)) | (v << s);
3331 wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3335 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3338 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3341 for (i = 0; i < len; i++) {
3342 ice_rss_config_xor_word(hw, prof_id,
3343 /* Yes, field vector in GLQF_HSYMM and
3344 * GLQF_HINSET is inversed!
3346 fv_last_word - (src + i),
3347 fv_last_word - (dst + i));
3348 ice_rss_config_xor_word(hw, prof_id,
3349 fv_last_word - (dst + i),
3350 fv_last_word - (src + i));
3355 ice_rss_update_symm(struct ice_hw *hw,
3356 struct ice_flow_prof *prof)
3358 struct ice_prof_map *map;
3361 map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3362 prof_id = map->prof_id;
3364 /* clear to default */
3365 for (m = 0; m < 6; m++)
3366 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3367 if (prof->cfg.symm) {
3368 struct ice_flow_seg_info *seg =
3369 &prof->segs[prof->segs_cnt - 1];
3371 struct ice_flow_seg_xtrct *ipv4_src =
3372 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3373 struct ice_flow_seg_xtrct *ipv4_dst =
3374 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3375 struct ice_flow_seg_xtrct *ipv6_src =
3376 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3377 struct ice_flow_seg_xtrct *ipv6_dst =
3378 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3380 struct ice_flow_seg_xtrct *tcp_src =
3381 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3382 struct ice_flow_seg_xtrct *tcp_dst =
3383 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3385 struct ice_flow_seg_xtrct *udp_src =
3386 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3387 struct ice_flow_seg_xtrct *udp_dst =
3388 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3390 struct ice_flow_seg_xtrct *sctp_src =
3391 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3392 struct ice_flow_seg_xtrct *sctp_dst =
3393 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3396 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3397 ice_rss_config_xor(hw, prof_id,
3398 ipv4_src->idx, ipv4_dst->idx, 2);
3401 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3402 ice_rss_config_xor(hw, prof_id,
3403 ipv6_src->idx, ipv6_dst->idx, 8);
3406 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3407 ice_rss_config_xor(hw, prof_id,
3408 tcp_src->idx, tcp_dst->idx, 1);
3411 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3412 ice_rss_config_xor(hw, prof_id,
3413 udp_src->idx, udp_dst->idx, 1);
3416 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3417 ice_rss_config_xor(hw, prof_id,
3418 sctp_src->idx, sctp_dst->idx, 1);
3423 * ice_add_rss_cfg_sync - add an RSS configuration
3424 * @hw: pointer to the hardware structure
3425 * @vsi_handle: software VSI handle
3426 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3427 * @addl_hdrs: protocol header fields
3428 * @segs_cnt: packet segment count
3429 * @symm: symmetric hash enable/disable
3431 * Assumption: lock has already been acquired for RSS list
3433 static enum ice_status
3434 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3435 u32 addl_hdrs, u8 segs_cnt, bool symm)
3437 const enum ice_block blk = ICE_BLK_RSS;
3438 struct ice_flow_prof *prof = NULL;
3439 struct ice_flow_seg_info *segs;
3440 enum ice_status status;
3442 if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
3443 return ICE_ERR_PARAM;
3445 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3448 return ICE_ERR_NO_MEMORY;
3450 /* Construct the packet segment info from the hashed fields */
3451 status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3456 /* Search for a flow profile that has matching headers, hash fields
3457 * and has the input VSI associated to it. If found, no further
3458 * operations required and exit.
3460 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3462 ICE_FLOW_FIND_PROF_CHK_FLDS |
3463 ICE_FLOW_FIND_PROF_CHK_VSI);
3465 if (prof->cfg.symm == symm)
3467 prof->cfg.symm = symm;
3471 /* Check if a flow profile exists with the same protocol headers and
3472 * associated with the input VSI. If so disassociate the VSI from
3473 * this profile. The VSI will be added to a new profile created with
3474 * the protocol header and new hash field configuration.
3476 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3477 vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3479 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3481 ice_rem_rss_list(hw, vsi_handle, prof);
3485 /* Remove profile if it has no VSIs associated */
3486 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3487 status = ice_flow_rem_prof(hw, blk, prof->id);
3493 /* Search for a profile that has same match fields only. If this
3494 * exists then associate the VSI to this profile.
3496 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3498 ICE_FLOW_FIND_PROF_CHK_FLDS);
3500 if (prof->cfg.symm == symm) {
3501 status = ice_flow_assoc_prof(hw, blk, prof,
3504 status = ice_add_rss_list(hw, vsi_handle,
3507 /* if a profile exist but with different symmetric
3508 * requirement, just return error.
3510 status = ICE_ERR_NOT_SUPPORTED;
3515 /* Create a new flow profile with generated profile and packet
3516 * segment information.
3518 status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3519 ICE_FLOW_GEN_PROFID(hashed_flds,
3520 segs[segs_cnt - 1].hdrs,
3522 segs, segs_cnt, NULL, 0, &prof);
3526 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3527 /* If association to a new flow profile failed then this profile can
3531 ice_flow_rem_prof(hw, blk, prof->id);
3535 status = ice_add_rss_list(hw, vsi_handle, prof);
3537 prof->cfg.symm = symm;
3540 ice_rss_update_symm(hw, prof);
3548 * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3549 * @hw: pointer to the hardware structure
3550 * @vsi_handle: software VSI handle
3551 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3552 * @addl_hdrs: protocol header fields
3553 * @symm: symmetric hash enable/disable
3555 * This function will generate a flow profile based on fields associated with
3556 * the input fields to hash on, the flow type and use the VSI number to add
3557 * a flow entry to the profile.
3560 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3561 u32 addl_hdrs, bool symm)
3563 enum ice_status status;
3565 if (hashed_flds == ICE_HASH_INVALID ||
3566 !ice_is_vsi_valid(hw, vsi_handle))
3567 return ICE_ERR_PARAM;
3569 ice_acquire_lock(&hw->rss_locks);
3570 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3571 ICE_RSS_OUTER_HEADERS, symm);
3573 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3574 addl_hdrs, ICE_RSS_INNER_HEADERS,
3576 ice_release_lock(&hw->rss_locks);
3582 * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3583 * @hw: pointer to the hardware structure
3584 * @vsi_handle: software VSI handle
3585 * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3586 * @addl_hdrs: Protocol header fields within a packet segment
3587 * @segs_cnt: packet segment count
3589 * Assumption: lock has already been acquired for RSS list
3591 static enum ice_status
3592 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3593 u32 addl_hdrs, u8 segs_cnt)
3595 const enum ice_block blk = ICE_BLK_RSS;
3596 struct ice_flow_seg_info *segs;
3597 struct ice_flow_prof *prof;
3598 enum ice_status status;
3600 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3603 return ICE_ERR_NO_MEMORY;
3605 /* Construct the packet segment info from the hashed fields */
3606 status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3611 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3613 ICE_FLOW_FIND_PROF_CHK_FLDS);
3615 status = ICE_ERR_DOES_NOT_EXIST;
3619 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3623 /* Remove RSS configuration from VSI context before deleting
3626 ice_rem_rss_list(hw, vsi_handle, prof);
3628 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
3629 status = ice_flow_rem_prof(hw, blk, prof->id);
3637 * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
3638 * @hw: pointer to the hardware structure
3639 * @vsi_handle: software VSI handle
3640 * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3641 * @addl_hdrs: Protocol header fields within a packet segment
3643 * This function will lookup the flow profile based on the input
3644 * hash field bitmap, iterate through the profile entry list of
3645 * that profile and find entry associated with input VSI to be
3646 * removed. Calls are made to underlying flow apis which will in
3647 * turn build or update buffers for RSS XLT1 section.
3650 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3653 enum ice_status status;
3655 if (hashed_flds == ICE_HASH_INVALID ||
3656 !ice_is_vsi_valid(hw, vsi_handle))
3657 return ICE_ERR_PARAM;
3659 ice_acquire_lock(&hw->rss_locks);
3660 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3661 ICE_RSS_OUTER_HEADERS);
3663 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3664 addl_hdrs, ICE_RSS_INNER_HEADERS);
3665 ice_release_lock(&hw->rss_locks);
3671 * ice_replay_rss_cfg - replay RSS configurations associated with VSI
3672 * @hw: pointer to the hardware structure
3673 * @vsi_handle: software VSI handle
3675 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3677 enum ice_status status = ICE_SUCCESS;
3678 struct ice_rss_cfg *r;
3680 if (!ice_is_vsi_valid(hw, vsi_handle))
3681 return ICE_ERR_PARAM;
3683 ice_acquire_lock(&hw->rss_locks);
3684 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3685 ice_rss_cfg, l_entry) {
3686 if (ice_is_bit_set(r->vsis, vsi_handle)) {
3687 status = ice_add_rss_cfg_sync(hw, vsi_handle,
3690 ICE_RSS_OUTER_HEADERS,
3694 status = ice_add_rss_cfg_sync(hw, vsi_handle,
3697 ICE_RSS_INNER_HEADERS,
3703 ice_release_lock(&hw->rss_locks);
3709 * ice_get_rss_cfg - returns hashed fields for the given header types
3710 * @hw: pointer to the hardware structure
3711 * @vsi_handle: software VSI handle
3712 * @hdrs: protocol header type
3714 * This function will return the match fields of the first instance of flow
3715 * profile having the given header types and containing input VSI
3717 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
3719 struct ice_rss_cfg *r, *rss_cfg = NULL;
3721 /* verify if the protocol header is non zero and VSI is valid */
3722 if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
3723 return ICE_HASH_INVALID;
3725 ice_acquire_lock(&hw->rss_locks);
3726 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3727 ice_rss_cfg, l_entry)
3728 if (ice_is_bit_set(r->vsis, vsi_handle) &&
3729 r->packet_hdr == hdrs) {
3733 ice_release_lock(&hw->rss_locks);
3735 return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID;