1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
5 #include "ice_common.h"
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE 2
10 #define ICE_FLOW_FLD_SZ_VLAN 2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR 4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR 16
13 #define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
14 #define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
15 #define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
16 #define ICE_FLOW_FLD_SZ_IP_DSCP 1
17 #define ICE_FLOW_FLD_SZ_IP_TTL 1
18 #define ICE_FLOW_FLD_SZ_IP_PROT 1
19 #define ICE_FLOW_FLD_SZ_PORT 2
20 #define ICE_FLOW_FLD_SZ_TCP_FLAGS 1
21 #define ICE_FLOW_FLD_SZ_ICMP_TYPE 1
22 #define ICE_FLOW_FLD_SZ_ICMP_CODE 1
23 #define ICE_FLOW_FLD_SZ_ARP_OPER 2
24 #define ICE_FLOW_FLD_SZ_GRE_KEYID 4
25 #define ICE_FLOW_FLD_SZ_GTP_TEID 4
26 #define ICE_FLOW_FLD_SZ_GTP_QFI 2
27 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID 2
28 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
29 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID 4
30 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
31 #define ICE_FLOW_FLD_SZ_AH_SPI 4
32 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI 4
34 /* Describe properties of a protocol header field */
35 struct ice_flow_field_info {
36 enum ice_flow_seg_hdr hdr;
37 s16 off; /* Offset from start of a protocol header, in bits */
38 u16 size; /* Size of fields in bits */
39 u16 mask; /* 16-bit mask for field */
42 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
44 .off = (_offset_bytes) * BITS_PER_BYTE, \
45 .size = (_size_bytes) * BITS_PER_BYTE, \
49 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
51 .off = (_offset_bytes) * BITS_PER_BYTE, \
52 .size = (_size_bytes) * BITS_PER_BYTE, \
56 /* Table containing properties of supported protocol header fields */
58 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
60 /* ICE_FLOW_FIELD_IDX_ETH_DA */
61 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
62 /* ICE_FLOW_FIELD_IDX_ETH_SA */
63 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
64 /* ICE_FLOW_FIELD_IDX_S_VLAN */
65 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
66 /* ICE_FLOW_FIELD_IDX_C_VLAN */
67 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
68 /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
69 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
71 /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
72 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
74 /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
75 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
77 /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
78 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
79 ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
80 /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
81 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
82 ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
83 /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
84 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
85 ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
86 /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
87 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
88 ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
89 /* ICE_FLOW_FIELD_IDX_IPV4_SA */
90 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
91 /* ICE_FLOW_FIELD_IDX_IPV4_DA */
92 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
93 /* ICE_FLOW_FIELD_IDX_IPV6_SA */
94 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
95 /* ICE_FLOW_FIELD_IDX_IPV6_DA */
96 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
97 /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
98 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
99 ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
100 /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
101 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
102 ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
103 /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
104 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
105 ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
106 /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
107 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
108 ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
109 /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
110 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
111 ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
112 /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
113 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
114 ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
116 /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
117 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
118 /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
119 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
120 /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
121 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
122 /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
123 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
124 /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
125 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
126 /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
127 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
128 /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
129 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
131 /* ICE_FLOW_FIELD_IDX_ARP_SIP */
132 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
133 /* ICE_FLOW_FIELD_IDX_ARP_DIP */
134 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
135 /* ICE_FLOW_FIELD_IDX_ARP_SHA */
136 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
137 /* ICE_FLOW_FIELD_IDX_ARP_DHA */
138 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
139 /* ICE_FLOW_FIELD_IDX_ARP_OP */
140 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
142 /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
143 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
144 /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
145 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
147 /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
148 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
150 /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
151 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
152 ICE_FLOW_FLD_SZ_GTP_TEID),
153 /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
154 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
155 ICE_FLOW_FLD_SZ_GTP_TEID),
156 /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
157 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
158 ICE_FLOW_FLD_SZ_GTP_TEID),
159 /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
160 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
161 ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
162 /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
163 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
164 ICE_FLOW_FLD_SZ_GTP_TEID),
165 /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
166 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
167 ICE_FLOW_FLD_SZ_GTP_TEID),
169 /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
170 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
171 ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
173 /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
174 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
175 ICE_FLOW_FLD_SZ_PFCP_SEID),
177 /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
178 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
179 ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
181 /* ICE_FLOW_FIELD_IDX_ESP_SPI */
182 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
183 ICE_FLOW_FLD_SZ_ESP_SPI),
185 /* ICE_FLOW_FIELD_IDX_AH_SPI */
186 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
187 ICE_FLOW_FLD_SZ_AH_SPI),
189 /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
190 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
191 ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
194 /* Bitmaps indicating relevant packet types for a particular protocol header
196 * Packet types for packets with an Outer/First/Single MAC header
198 static const u32 ice_ptypes_mac_ofos[] = {
199 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
200 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
201 0x00400000, 0x03FFF000, 0x7FFFFFE0, 0x00000000,
202 0x00000000, 0x00000000, 0x00000000, 0x00000000,
203 0x00000000, 0x00000000, 0x00000000, 0x00000000,
204 0x00000000, 0x00000000, 0x00000000, 0x00000000,
205 0x00000000, 0x00000000, 0x00000000, 0x00000000,
206 0x00000000, 0x00000000, 0x00000000, 0x00000000,
209 /* Packet types for packets with an Innermost/Last MAC VLAN header */
210 static const u32 ice_ptypes_macvlan_il[] = {
211 0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
212 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
213 0x00000000, 0x00000000, 0x00000000, 0x00000000,
214 0x00000000, 0x00000000, 0x00000000, 0x00000000,
215 0x00000000, 0x00000000, 0x00000000, 0x00000000,
216 0x00000000, 0x00000000, 0x00000000, 0x00000000,
217 0x00000000, 0x00000000, 0x00000000, 0x00000000,
218 0x00000000, 0x00000000, 0x00000000, 0x00000000,
221 /* Packet types for packets with an Outer/First/Single IPv4 header */
222 static const u32 ice_ptypes_ipv4_ofos[] = {
223 0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
224 0x00000000, 0x00000155, 0x00000000, 0x00000000,
225 0x00000000, 0x000FC000, 0x83E0F800, 0x00000101,
226 0x00000000, 0x00000000, 0x00000000, 0x00000000,
227 0x00000000, 0x00000000, 0x00000000, 0x00000000,
228 0x00000000, 0x00000000, 0x00000000, 0x00000000,
229 0x00000000, 0x00000000, 0x00000000, 0x00000000,
230 0x00000000, 0x00000000, 0x00000000, 0x00000000,
233 /* Packet types for packets with an Innermost/Last IPv4 header */
234 static const u32 ice_ptypes_ipv4_il[] = {
235 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
236 0x0000000E, 0x00000000, 0x00000000, 0x00000000,
237 0x00000000, 0x00000000, 0x001FF800, 0x00000000,
238 0x00000000, 0x00000000, 0x00000000, 0x00000000,
239 0x00000000, 0x00000000, 0x00000000, 0x00000000,
240 0x00000000, 0x00000000, 0x00000000, 0x00000000,
241 0x00000000, 0x00000000, 0x00000000, 0x00000000,
242 0x00000000, 0x00000000, 0x00000000, 0x00000000,
245 /* Packet types for packets with an Outer/First/Single IPv6 header */
246 static const u32 ice_ptypes_ipv6_ofos[] = {
247 0x00000000, 0x00000000, 0x77000000, 0x10002000,
248 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
249 0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000206,
250 0x00000000, 0x00000000, 0x00000000, 0x00000000,
251 0x00000000, 0x00000000, 0x00000000, 0x00000000,
252 0x00000000, 0x00000000, 0x00000000, 0x00000000,
253 0x00000000, 0x00000000, 0x00000000, 0x00000000,
254 0x00000000, 0x00000000, 0x00000000, 0x00000000,
257 /* Packet types for packets with an Innermost/Last IPv6 header */
258 static const u32 ice_ptypes_ipv6_il[] = {
259 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
260 0x00000770, 0x00000000, 0x00000000, 0x00000000,
261 0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
262 0x00000000, 0x00000000, 0x00000000, 0x00000000,
263 0x00000000, 0x00000000, 0x00000000, 0x00000000,
264 0x00000000, 0x00000000, 0x00000000, 0x00000000,
265 0x00000000, 0x00000000, 0x00000000, 0x00000000,
266 0x00000000, 0x00000000, 0x00000000, 0x00000000,
269 /* Packet types for packets with an Outermost/First ARP header */
270 static const u32 ice_ptypes_arp_of[] = {
271 0x00000800, 0x00000000, 0x00000000, 0x00000000,
272 0x00000000, 0x00000000, 0x00000000, 0x00000000,
273 0x00000000, 0x00000000, 0x00000000, 0x00000000,
274 0x00000000, 0x00000000, 0x00000000, 0x00000000,
275 0x00000000, 0x00000000, 0x00000000, 0x00000000,
276 0x00000000, 0x00000000, 0x00000000, 0x00000000,
277 0x00000000, 0x00000000, 0x00000000, 0x00000000,
278 0x00000000, 0x00000000, 0x00000000, 0x00000000,
281 /* UDP Packet types for non-tunneled packets or tunneled
282 * packets with inner UDP.
284 static const u32 ice_ptypes_udp_il[] = {
285 0x81000000, 0x20204040, 0x04000010, 0x80810102,
286 0x00000040, 0x00000000, 0x00000000, 0x00000000,
287 0x00000000, 0x00410000, 0x90842000, 0x00000007,
288 0x00000000, 0x00000000, 0x00000000, 0x00000000,
289 0x00000000, 0x00000000, 0x00000000, 0x00000000,
290 0x00000000, 0x00000000, 0x00000000, 0x00000000,
291 0x00000000, 0x00000000, 0x00000000, 0x00000000,
292 0x00000000, 0x00000000, 0x00000000, 0x00000000,
295 /* Packet types for packets with an Innermost/Last TCP header */
296 static const u32 ice_ptypes_tcp_il[] = {
297 0x04000000, 0x80810102, 0x10000040, 0x02040408,
298 0x00000102, 0x00000000, 0x00000000, 0x00000000,
299 0x00000000, 0x00820000, 0x21084000, 0x00000000,
300 0x00000000, 0x00000000, 0x00000000, 0x00000000,
301 0x00000000, 0x00000000, 0x00000000, 0x00000000,
302 0x00000000, 0x00000000, 0x00000000, 0x00000000,
303 0x00000000, 0x00000000, 0x00000000, 0x00000000,
304 0x00000000, 0x00000000, 0x00000000, 0x00000000,
307 /* Packet types for packets with an Innermost/Last SCTP header */
308 static const u32 ice_ptypes_sctp_il[] = {
309 0x08000000, 0x01020204, 0x20000081, 0x04080810,
310 0x00000204, 0x00000000, 0x00000000, 0x00000000,
311 0x00000000, 0x01040000, 0x00000000, 0x00000000,
312 0x00000000, 0x00000000, 0x00000000, 0x00000000,
313 0x00000000, 0x00000000, 0x00000000, 0x00000000,
314 0x00000000, 0x00000000, 0x00000000, 0x00000000,
315 0x00000000, 0x00000000, 0x00000000, 0x00000000,
316 0x00000000, 0x00000000, 0x00000000, 0x00000000,
319 /* Packet types for packets with an Outermost/First ICMP header */
320 static const u32 ice_ptypes_icmp_of[] = {
321 0x10000000, 0x00000000, 0x00000000, 0x00000000,
322 0x00000000, 0x00000000, 0x00000000, 0x00000000,
323 0x00000000, 0x00000000, 0x00000000, 0x00000000,
324 0x00000000, 0x00000000, 0x00000000, 0x00000000,
325 0x00000000, 0x00000000, 0x00000000, 0x00000000,
326 0x00000000, 0x00000000, 0x00000000, 0x00000000,
327 0x00000000, 0x00000000, 0x00000000, 0x00000000,
328 0x00000000, 0x00000000, 0x00000000, 0x00000000,
331 /* Packet types for packets with an Innermost/Last ICMP header */
332 static const u32 ice_ptypes_icmp_il[] = {
333 0x00000000, 0x02040408, 0x40000102, 0x08101020,
334 0x00000408, 0x00000000, 0x00000000, 0x00000000,
335 0x00000000, 0x00000000, 0x42108000, 0x00000000,
336 0x00000000, 0x00000000, 0x00000000, 0x00000000,
337 0x00000000, 0x00000000, 0x00000000, 0x00000000,
338 0x00000000, 0x00000000, 0x00000000, 0x00000000,
339 0x00000000, 0x00000000, 0x00000000, 0x00000000,
340 0x00000000, 0x00000000, 0x00000000, 0x00000000,
343 /* Packet types for packets with an Outermost/First GRE header */
344 static const u32 ice_ptypes_gre_of[] = {
345 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
346 0x0000017E, 0x00000000, 0x00000000, 0x00000000,
347 0x00000000, 0x00000000, 0x00000000, 0x00000000,
348 0x00000000, 0x00000000, 0x00000000, 0x00000000,
349 0x00000000, 0x00000000, 0x00000000, 0x00000000,
350 0x00000000, 0x00000000, 0x00000000, 0x00000000,
351 0x00000000, 0x00000000, 0x00000000, 0x00000000,
352 0x00000000, 0x00000000, 0x00000000, 0x00000000,
355 /* Packet types for packets with an Innermost/Last MAC header */
356 static const u32 ice_ptypes_mac_il[] = {
357 0x00000000, 0x00000000, 0x00000000, 0x00000000,
358 0x00000000, 0x00000000, 0x00000000, 0x00000000,
359 0x00000000, 0x00000000, 0x00000000, 0x00000000,
360 0x00000000, 0x00000000, 0x00000000, 0x00000000,
361 0x00000000, 0x00000000, 0x00000000, 0x00000000,
362 0x00000000, 0x00000000, 0x00000000, 0x00000000,
363 0x00000000, 0x00000000, 0x00000000, 0x00000000,
364 0x00000000, 0x00000000, 0x00000000, 0x00000000,
367 /* Packet types for GTPC */
368 static const u32 ice_ptypes_gtpc[] = {
369 0x00000000, 0x00000000, 0x00000000, 0x00000000,
370 0x00000000, 0x00000000, 0x00000000, 0x00000000,
371 0x00000000, 0x00000000, 0x00000180, 0x00000000,
372 0x00000000, 0x00000000, 0x00000000, 0x00000000,
373 0x00000000, 0x00000000, 0x00000000, 0x00000000,
374 0x00000000, 0x00000000, 0x00000000, 0x00000000,
375 0x00000000, 0x00000000, 0x00000000, 0x00000000,
376 0x00000000, 0x00000000, 0x00000000, 0x00000000,
379 /* Packet types for GTPC with TEID */
380 static const u32 ice_ptypes_gtpc_tid[] = {
381 0x00000000, 0x00000000, 0x00000000, 0x00000000,
382 0x00000000, 0x00000000, 0x00000000, 0x00000000,
383 0x00000000, 0x00000000, 0x00000060, 0x00000000,
384 0x00000000, 0x00000000, 0x00000000, 0x00000000,
385 0x00000000, 0x00000000, 0x00000000, 0x00000000,
386 0x00000000, 0x00000000, 0x00000000, 0x00000000,
387 0x00000000, 0x00000000, 0x00000000, 0x00000000,
388 0x00000000, 0x00000000, 0x00000000, 0x00000000,
391 /* Packet types for GTPU */
392 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
393 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
394 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
395 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
396 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
397 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
398 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
399 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
400 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
401 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
402 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
403 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
404 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
405 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
406 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
407 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
408 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
409 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
410 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
411 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
412 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
415 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
416 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
417 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
418 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
419 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
420 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
421 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
422 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
423 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
424 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
425 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
426 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
427 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
428 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
429 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
430 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
431 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
432 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
433 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
434 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
435 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
438 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
439 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
440 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
441 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
442 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
443 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
444 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
445 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
446 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
447 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
448 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
449 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
450 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
451 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
452 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
453 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
454 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
455 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
456 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
457 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
458 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
461 static const u32 ice_ptypes_gtpu[] = {
462 0x00000000, 0x00000000, 0x00000000, 0x00000000,
463 0x00000000, 0x00000000, 0x00000000, 0x00000000,
464 0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
465 0x00000000, 0x00000000, 0x00000000, 0x00000000,
466 0x00000000, 0x00000000, 0x00000000, 0x00000000,
467 0x00000000, 0x00000000, 0x00000000, 0x00000000,
468 0x00000000, 0x00000000, 0x00000000, 0x00000000,
469 0x00000000, 0x00000000, 0x00000000, 0x00000000,
472 /* Packet types for pppoe */
473 static const u32 ice_ptypes_pppoe[] = {
474 0x00000000, 0x00000000, 0x00000000, 0x00000000,
475 0x00000000, 0x00000000, 0x00000000, 0x00000000,
476 0x00000000, 0x03FFF000, 0x00000000, 0x00000000,
477 0x00000000, 0x00000000, 0x00000000, 0x00000000,
478 0x00000000, 0x00000000, 0x00000000, 0x00000000,
479 0x00000000, 0x00000000, 0x00000000, 0x00000000,
480 0x00000000, 0x00000000, 0x00000000, 0x00000000,
481 0x00000000, 0x00000000, 0x00000000, 0x00000000,
484 /* Packet types for packets with PFCP NODE header */
485 static const u32 ice_ptypes_pfcp_node[] = {
486 0x00000000, 0x00000000, 0x00000000, 0x00000000,
487 0x00000000, 0x00000000, 0x00000000, 0x00000000,
488 0x00000000, 0x00000000, 0x80000000, 0x00000002,
489 0x00000000, 0x00000000, 0x00000000, 0x00000000,
490 0x00000000, 0x00000000, 0x00000000, 0x00000000,
491 0x00000000, 0x00000000, 0x00000000, 0x00000000,
492 0x00000000, 0x00000000, 0x00000000, 0x00000000,
493 0x00000000, 0x00000000, 0x00000000, 0x00000000,
496 /* Packet types for packets with PFCP SESSION header */
497 static const u32 ice_ptypes_pfcp_session[] = {
498 0x00000000, 0x00000000, 0x00000000, 0x00000000,
499 0x00000000, 0x00000000, 0x00000000, 0x00000000,
500 0x00000000, 0x00000000, 0x00000000, 0x00000005,
501 0x00000000, 0x00000000, 0x00000000, 0x00000000,
502 0x00000000, 0x00000000, 0x00000000, 0x00000000,
503 0x00000000, 0x00000000, 0x00000000, 0x00000000,
504 0x00000000, 0x00000000, 0x00000000, 0x00000000,
505 0x00000000, 0x00000000, 0x00000000, 0x00000000,
508 /* Packet types for l2tpv3 */
509 static const u32 ice_ptypes_l2tpv3[] = {
510 0x00000000, 0x00000000, 0x00000000, 0x00000000,
511 0x00000000, 0x00000000, 0x00000000, 0x00000000,
512 0x00000000, 0x00000000, 0x00000000, 0x00000300,
513 0x00000000, 0x00000000, 0x00000000, 0x00000000,
514 0x00000000, 0x00000000, 0x00000000, 0x00000000,
515 0x00000000, 0x00000000, 0x00000000, 0x00000000,
516 0x00000000, 0x00000000, 0x00000000, 0x00000000,
517 0x00000000, 0x00000000, 0x00000000, 0x00000000,
520 /* Packet types for esp */
521 static const u32 ice_ptypes_esp[] = {
522 0x00000000, 0x00000000, 0x00000000, 0x00000000,
523 0x00000000, 0x00000003, 0x00000000, 0x00000000,
524 0x00000000, 0x00000000, 0x00000000, 0x00000000,
525 0x00000000, 0x00000000, 0x00000000, 0x00000000,
526 0x00000000, 0x00000000, 0x00000000, 0x00000000,
527 0x00000000, 0x00000000, 0x00000000, 0x00000000,
528 0x00000000, 0x00000000, 0x00000000, 0x00000000,
529 0x00000000, 0x00000000, 0x00000000, 0x00000000,
532 /* Packet types for ah */
533 static const u32 ice_ptypes_ah[] = {
534 0x00000000, 0x00000000, 0x00000000, 0x00000000,
535 0x00000000, 0x0000000C, 0x00000000, 0x00000000,
536 0x00000000, 0x00000000, 0x00000000, 0x00000000,
537 0x00000000, 0x00000000, 0x00000000, 0x00000000,
538 0x00000000, 0x00000000, 0x00000000, 0x00000000,
539 0x00000000, 0x00000000, 0x00000000, 0x00000000,
540 0x00000000, 0x00000000, 0x00000000, 0x00000000,
541 0x00000000, 0x00000000, 0x00000000, 0x00000000,
544 /* Packet types for packets with NAT_T ESP header */
545 static const u32 ice_ptypes_nat_t_esp[] = {
546 0x00000000, 0x00000000, 0x00000000, 0x00000000,
547 0x00000000, 0x00000030, 0x00000000, 0x00000000,
548 0x00000000, 0x00000000, 0x00000000, 0x00000000,
549 0x00000000, 0x00000000, 0x00000000, 0x00000000,
550 0x00000000, 0x00000000, 0x00000000, 0x00000000,
551 0x00000000, 0x00000000, 0x00000000, 0x00000000,
552 0x00000000, 0x00000000, 0x00000000, 0x00000000,
553 0x00000000, 0x00000000, 0x00000000, 0x00000000,
556 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
557 0x00000846, 0x00000000, 0x00000000, 0x00000000,
558 0x00000000, 0x00000000, 0x00000000, 0x00000000,
559 0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
560 0x00000000, 0x00000000, 0x00000000, 0x00000000,
561 0x00000000, 0x00000000, 0x00000000, 0x00000000,
562 0x00000000, 0x00000000, 0x00000000, 0x00000000,
563 0x00000000, 0x00000000, 0x00000000, 0x00000000,
564 0x00000000, 0x00000000, 0x00000000, 0x00000000,
567 /* Manage parameters and info. used during the creation of a flow profile */
568 struct ice_flow_prof_params {
570 u16 entry_length; /* # of bytes formatted entry will require */
572 struct ice_flow_prof *prof;
574 /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
575 * This will give us the direction flags.
577 struct ice_fv_word es[ICE_MAX_FV_WORDS];
578 /* attributes can be used to add attributes to a particular PTYPE */
579 const struct ice_ptype_attributes *attr;
582 u16 mask[ICE_MAX_FV_WORDS];
583 ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
586 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
587 (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
588 ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
589 ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
590 ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
591 ICE_FLOW_SEG_HDR_NAT_T_ESP)
593 #define ICE_FLOW_SEG_HDRS_L2_MASK \
594 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
595 #define ICE_FLOW_SEG_HDRS_L3_MASK \
596 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
597 ICE_FLOW_SEG_HDR_ARP)
598 #define ICE_FLOW_SEG_HDRS_L4_MASK \
599 (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
600 ICE_FLOW_SEG_HDR_SCTP)
603 * ice_flow_val_hdrs - validates packet segments for valid protocol headers
604 * @segs: array of one or more packet segments that describe the flow
605 * @segs_cnt: number of packet segments provided
607 static enum ice_status
608 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
612 for (i = 0; i < segs_cnt; i++) {
613 /* Multiple L3 headers */
614 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
615 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
616 return ICE_ERR_PARAM;
618 /* Multiple L4 headers */
619 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
620 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
621 return ICE_ERR_PARAM;
627 /* Sizes of fixed known protocol headers without header options */
628 #define ICE_FLOW_PROT_HDR_SZ_MAC 14
629 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
630 #define ICE_FLOW_PROT_HDR_SZ_IPV4 20
631 #define ICE_FLOW_PROT_HDR_SZ_IPV6 40
632 #define ICE_FLOW_PROT_HDR_SZ_ARP 28
633 #define ICE_FLOW_PROT_HDR_SZ_ICMP 8
634 #define ICE_FLOW_PROT_HDR_SZ_TCP 20
635 #define ICE_FLOW_PROT_HDR_SZ_UDP 8
636 #define ICE_FLOW_PROT_HDR_SZ_SCTP 12
639 * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
640 * @params: information about the flow to be processed
641 * @seg: index of packet segment whose header size is to be determined
643 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
648 sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
649 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
652 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
653 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
654 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
655 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
656 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
657 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
658 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
659 /* A L3 header is required if L4 is specified */
663 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
664 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
665 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
666 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
667 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
668 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
669 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
670 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
676 * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
677 * @params: information about the flow to be processed
679 * This function identifies the packet types associated with the protocol
680 * headers being present in packet segments of the specified flow profile.
682 static enum ice_status
683 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
685 struct ice_flow_prof *prof;
688 ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
693 for (i = 0; i < params->prof->segs_cnt; i++) {
694 const ice_bitmap_t *src;
697 hdrs = prof->segs[i].hdrs;
699 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
700 src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
701 (const ice_bitmap_t *)ice_ptypes_mac_il;
702 ice_and_bitmap(params->ptypes, params->ptypes, src,
706 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
707 src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
708 ice_and_bitmap(params->ptypes, params->ptypes, src,
712 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
713 ice_and_bitmap(params->ptypes, params->ptypes,
714 (const ice_bitmap_t *)ice_ptypes_arp_of,
718 if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
719 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
720 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
721 ice_and_bitmap(params->ptypes, params->ptypes, src,
723 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
724 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
725 ice_and_bitmap(params->ptypes,
728 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
729 ice_and_bitmap(params->ptypes, params->ptypes,
730 (const ice_bitmap_t *)
733 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
734 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
735 ice_and_bitmap(params->ptypes, params->ptypes,
736 src, ICE_FLOW_PTYPE_MAX);
738 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
739 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
740 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
741 ice_and_bitmap(params->ptypes, params->ptypes, src,
743 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
744 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
745 ice_and_bitmap(params->ptypes,
748 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
749 ice_and_bitmap(params->ptypes, params->ptypes,
750 (const ice_bitmap_t *)
753 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
754 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
755 ice_and_bitmap(params->ptypes, params->ptypes,
756 src, ICE_FLOW_PTYPE_MAX);
760 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
761 src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
762 ice_and_bitmap(params->ptypes, params->ptypes,
763 src, ICE_FLOW_PTYPE_MAX);
764 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
765 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
766 ice_and_bitmap(params->ptypes, params->ptypes, src,
769 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
770 ice_andnot_bitmap(params->ptypes, params->ptypes, src,
774 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
775 src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
776 (const ice_bitmap_t *)ice_ptypes_icmp_il;
777 ice_and_bitmap(params->ptypes, params->ptypes, src,
779 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
781 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
782 ice_and_bitmap(params->ptypes, params->ptypes,
783 src, ICE_FLOW_PTYPE_MAX);
785 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
786 src = (const ice_bitmap_t *)ice_ptypes_gtpc;
787 ice_and_bitmap(params->ptypes, params->ptypes,
788 src, ICE_FLOW_PTYPE_MAX);
789 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
790 src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
791 ice_and_bitmap(params->ptypes, params->ptypes,
792 src, ICE_FLOW_PTYPE_MAX);
793 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
794 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
795 ice_and_bitmap(params->ptypes, params->ptypes,
796 src, ICE_FLOW_PTYPE_MAX);
798 /* Attributes for GTP packet with downlink */
799 params->attr = ice_attr_gtpu_down;
800 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
801 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
802 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
803 ice_and_bitmap(params->ptypes, params->ptypes,
804 src, ICE_FLOW_PTYPE_MAX);
806 /* Attributes for GTP packet with uplink */
807 params->attr = ice_attr_gtpu_up;
808 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
809 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
810 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
811 ice_and_bitmap(params->ptypes, params->ptypes,
812 src, ICE_FLOW_PTYPE_MAX);
814 /* Attributes for GTP packet with Extension Header */
815 params->attr = ice_attr_gtpu_eh;
816 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
817 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
818 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
819 ice_and_bitmap(params->ptypes, params->ptypes,
820 src, ICE_FLOW_PTYPE_MAX);
821 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
822 src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
823 ice_and_bitmap(params->ptypes, params->ptypes,
824 src, ICE_FLOW_PTYPE_MAX);
825 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
826 src = (const ice_bitmap_t *)ice_ptypes_esp;
827 ice_and_bitmap(params->ptypes, params->ptypes,
828 src, ICE_FLOW_PTYPE_MAX);
829 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
830 src = (const ice_bitmap_t *)ice_ptypes_ah;
831 ice_and_bitmap(params->ptypes, params->ptypes,
832 src, ICE_FLOW_PTYPE_MAX);
833 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
834 src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
835 ice_and_bitmap(params->ptypes, params->ptypes,
836 src, ICE_FLOW_PTYPE_MAX);
839 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
840 if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
842 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
845 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
847 ice_and_bitmap(params->ptypes, params->ptypes,
848 src, ICE_FLOW_PTYPE_MAX);
850 src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
851 ice_andnot_bitmap(params->ptypes, params->ptypes,
852 src, ICE_FLOW_PTYPE_MAX);
854 src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
855 ice_andnot_bitmap(params->ptypes, params->ptypes,
856 src, ICE_FLOW_PTYPE_MAX);
864 * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
865 * @hw: pointer to the HW struct
866 * @params: information about the flow to be processed
867 * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
869 * This function will allocate an extraction sequence entries for a DWORD size
870 * chunk of the packet flags.
872 static enum ice_status
873 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
874 struct ice_flow_prof_params *params,
875 enum ice_flex_mdid_pkt_flags flags)
877 u8 fv_words = hw->blk[params->blk].es.fvw;
880 /* Make sure the number of extraction sequence entries required does not
881 * exceed the block's capacity.
883 if (params->es_cnt >= fv_words)
884 return ICE_ERR_MAX_LIMIT;
886 /* some blocks require a reversed field vector layout */
887 if (hw->blk[params->blk].es.reverse)
888 idx = fv_words - params->es_cnt - 1;
890 idx = params->es_cnt;
892 params->es[idx].prot_id = ICE_PROT_META_ID;
893 params->es[idx].off = flags;
900 * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
901 * @hw: pointer to the HW struct
902 * @params: information about the flow to be processed
903 * @seg: packet segment index of the field to be extracted
904 * @fld: ID of field to be extracted
905 * @match: bitfield of all fields
907 * This function determines the protocol ID, offset, and size of the given
908 * field. It then allocates one or more extraction sequence entries for the
909 * given field, and fill the entries with protocol ID and offset information.
911 static enum ice_status
912 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
913 u8 seg, enum ice_flow_field fld, u64 match)
915 enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
916 enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
917 u8 fv_words = hw->blk[params->blk].es.fvw;
918 struct ice_flow_fld_info *flds;
919 u16 cnt, ese_bits, i;
924 flds = params->prof->segs[seg].fields;
927 case ICE_FLOW_FIELD_IDX_ETH_DA:
928 case ICE_FLOW_FIELD_IDX_ETH_SA:
929 case ICE_FLOW_FIELD_IDX_S_VLAN:
930 case ICE_FLOW_FIELD_IDX_C_VLAN:
931 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
933 case ICE_FLOW_FIELD_IDX_ETH_TYPE:
934 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
936 case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
937 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
939 case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
940 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
942 case ICE_FLOW_FIELD_IDX_IPV4_TTL:
943 case ICE_FLOW_FIELD_IDX_IPV4_PROT:
944 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
946 /* TTL and PROT share the same extraction seq. entry.
947 * Each is considered a sibling to the other in terms of sharing
948 * the same extraction sequence entry.
950 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
951 sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
952 else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
953 sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
955 /* If the sibling field is also included, that field's
956 * mask needs to be included.
958 if (match & BIT(sib))
959 sib_mask = ice_flds_info[sib].mask;
961 case ICE_FLOW_FIELD_IDX_IPV6_TTL:
962 case ICE_FLOW_FIELD_IDX_IPV6_PROT:
963 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
965 /* TTL and PROT share the same extraction seq. entry.
966 * Each is considered a sibling to the other in terms of sharing
967 * the same extraction sequence entry.
969 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
970 sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
971 else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
972 sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
974 /* If the sibling field is also included, that field's
975 * mask needs to be included.
977 if (match & BIT(sib))
978 sib_mask = ice_flds_info[sib].mask;
980 case ICE_FLOW_FIELD_IDX_IPV4_SA:
981 case ICE_FLOW_FIELD_IDX_IPV4_DA:
982 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
984 case ICE_FLOW_FIELD_IDX_IPV6_SA:
985 case ICE_FLOW_FIELD_IDX_IPV6_DA:
986 case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
987 case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
988 case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
989 case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
990 case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
991 case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
992 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
994 case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
995 case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
996 case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
997 prot_id = ICE_PROT_TCP_IL;
999 case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
1000 case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
1001 prot_id = ICE_PROT_UDP_IL_OR_S;
1003 case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
1004 case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
1005 prot_id = ICE_PROT_SCTP_IL;
1007 case ICE_FLOW_FIELD_IDX_GTPC_TEID:
1008 case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
1009 case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
1010 case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
1011 case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
1012 case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
1013 /* GTP is accessed through UDP OF protocol */
1014 prot_id = ICE_PROT_UDP_OF;
1016 case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
1017 prot_id = ICE_PROT_PPPOE;
1019 case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1020 prot_id = ICE_PROT_UDP_IL_OR_S;
1022 case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1023 prot_id = ICE_PROT_L2TPV3;
1025 case ICE_FLOW_FIELD_IDX_ESP_SPI:
1026 prot_id = ICE_PROT_ESP_F;
1028 case ICE_FLOW_FIELD_IDX_AH_SPI:
1029 prot_id = ICE_PROT_ESP_2;
1031 case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1032 prot_id = ICE_PROT_UDP_IL_OR_S;
1034 case ICE_FLOW_FIELD_IDX_ARP_SIP:
1035 case ICE_FLOW_FIELD_IDX_ARP_DIP:
1036 case ICE_FLOW_FIELD_IDX_ARP_SHA:
1037 case ICE_FLOW_FIELD_IDX_ARP_DHA:
1038 case ICE_FLOW_FIELD_IDX_ARP_OP:
1039 prot_id = ICE_PROT_ARP_OF;
1041 case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1042 case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1043 /* ICMP type and code share the same extraction seq. entry */
1044 prot_id = (params->prof->segs[seg].hdrs &
1045 ICE_FLOW_SEG_HDR_IPV4) ?
1046 ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1047 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1048 ICE_FLOW_FIELD_IDX_ICMP_CODE :
1049 ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1051 case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1052 prot_id = ICE_PROT_GRE_OF;
1055 return ICE_ERR_NOT_IMPL;
1058 /* Each extraction sequence entry is a word in size, and extracts a
1059 * word-aligned offset from a protocol header.
1061 ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1063 flds[fld].xtrct.prot_id = prot_id;
1064 flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1065 ICE_FLOW_FV_EXTRACT_SZ;
1066 flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
1067 flds[fld].xtrct.idx = params->es_cnt;
1068 flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1070 /* Adjust the next field-entry index after accommodating the number of
1071 * entries this field consumes
1073 cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1074 ice_flds_info[fld].size, ese_bits);
1076 /* Fill in the extraction sequence entries needed for this field */
1077 off = flds[fld].xtrct.off;
1078 mask = flds[fld].xtrct.mask;
1079 for (i = 0; i < cnt; i++) {
1080 /* Only consume an extraction sequence entry if there is no
1081 * sibling field associated with this field or the sibling entry
1082 * already extracts the word shared with this field.
1084 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1085 flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1086 flds[sib].xtrct.off != off) {
1089 /* Make sure the number of extraction sequence required
1090 * does not exceed the block's capability
1092 if (params->es_cnt >= fv_words)
1093 return ICE_ERR_MAX_LIMIT;
1095 /* some blocks require a reversed field vector layout */
1096 if (hw->blk[params->blk].es.reverse)
1097 idx = fv_words - params->es_cnt - 1;
1099 idx = params->es_cnt;
1101 params->es[idx].prot_id = prot_id;
1102 params->es[idx].off = off;
1103 params->mask[idx] = mask | sib_mask;
1107 off += ICE_FLOW_FV_EXTRACT_SZ;
1114 * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1115 * @hw: pointer to the HW struct
1116 * @params: information about the flow to be processed
1117 * @seg: index of packet segment whose raw fields are to be be extracted
1119 static enum ice_status
1120 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1127 if (!params->prof->segs[seg].raws_cnt)
1130 if (params->prof->segs[seg].raws_cnt >
1131 ARRAY_SIZE(params->prof->segs[seg].raws))
1132 return ICE_ERR_MAX_LIMIT;
1134 /* Offsets within the segment headers are not supported */
1135 hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1137 return ICE_ERR_PARAM;
1139 fv_words = hw->blk[params->blk].es.fvw;
1141 for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1142 struct ice_flow_seg_fld_raw *raw;
1145 raw = ¶ms->prof->segs[seg].raws[i];
1147 /* Storing extraction information */
1148 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1149 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1150 ICE_FLOW_FV_EXTRACT_SZ;
1151 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1153 raw->info.xtrct.idx = params->es_cnt;
1155 /* Determine the number of field vector entries this raw field
1158 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1159 (raw->info.src.last * BITS_PER_BYTE),
1160 (ICE_FLOW_FV_EXTRACT_SZ *
1162 off = raw->info.xtrct.off;
1163 for (j = 0; j < cnt; j++) {
1166 /* Make sure the number of extraction sequence required
1167 * does not exceed the block's capability
1169 if (params->es_cnt >= hw->blk[params->blk].es.count ||
1170 params->es_cnt >= ICE_MAX_FV_WORDS)
1171 return ICE_ERR_MAX_LIMIT;
1173 /* some blocks require a reversed field vector layout */
1174 if (hw->blk[params->blk].es.reverse)
1175 idx = fv_words - params->es_cnt - 1;
1177 idx = params->es_cnt;
1179 params->es[idx].prot_id = raw->info.xtrct.prot_id;
1180 params->es[idx].off = off;
1182 off += ICE_FLOW_FV_EXTRACT_SZ;
1190 * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1191 * @hw: pointer to the HW struct
1192 * @params: information about the flow to be processed
1194 * This function iterates through all matched fields in the given segments, and
1195 * creates an extraction sequence for the fields.
1197 static enum ice_status
1198 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1199 struct ice_flow_prof_params *params)
1201 enum ice_status status = ICE_SUCCESS;
1204 /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1207 if (params->blk == ICE_BLK_ACL) {
1208 status = ice_flow_xtract_pkt_flags(hw, params,
1209 ICE_RX_MDID_PKT_FLAGS_15_0);
1214 for (i = 0; i < params->prof->segs_cnt; i++) {
1215 u64 match = params->prof->segs[i].match;
1216 enum ice_flow_field j;
1218 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1219 const u64 bit = BIT_ULL(j);
1222 status = ice_flow_xtract_fld(hw, params, i, j,
1230 /* Process raw matching bytes */
1231 status = ice_flow_xtract_raws(hw, params, i);
1240 * ice_flow_sel_acl_scen - returns the specific scenario
1241 * @hw: pointer to the hardware structure
1242 * @params: information about the flow to be processed
1244 * This function will return the specific scenario based on the
1245 * params passed to it
1247 static enum ice_status
1248 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1250 /* Find the best-fit scenario for the provided match width */
1251 struct ice_acl_scen *cand_scen = NULL, *scen;
1254 return ICE_ERR_DOES_NOT_EXIST;
1256 /* Loop through each scenario and match against the scenario width
1257 * to select the specific scenario
1259 LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1260 if (scen->eff_width >= params->entry_length &&
1261 (!cand_scen || cand_scen->eff_width > scen->eff_width))
1264 return ICE_ERR_DOES_NOT_EXIST;
1266 params->prof->cfg.scen = cand_scen;
1272 * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1273 * @params: information about the flow to be processed
1275 static enum ice_status
1276 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1278 u16 index, i, range_idx = 0;
1280 index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1282 for (i = 0; i < params->prof->segs_cnt; i++) {
1283 struct ice_flow_seg_info *seg = ¶ms->prof->segs[i];
1284 u64 match = seg->match;
1287 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1288 struct ice_flow_fld_info *fld;
1289 const u64 bit = BIT_ULL(j);
1294 fld = &seg->fields[j];
1295 fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1297 if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1298 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1300 /* Range checking only supported for single
1303 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1305 BITS_PER_BYTE * 2) > 1)
1306 return ICE_ERR_PARAM;
1308 /* Ranges must define low and high values */
1309 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1310 fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1311 return ICE_ERR_PARAM;
1313 fld->entry.val = range_idx++;
1315 /* Store adjusted byte-length of field for later
1316 * use, taking into account potential
1317 * non-byte-aligned displacement
1319 fld->entry.last = DIVIDE_AND_ROUND_UP
1320 (ice_flds_info[j].size +
1321 (fld->xtrct.disp % BITS_PER_BYTE),
1323 fld->entry.val = index;
1324 index += fld->entry.last;
1330 for (j = 0; j < seg->raws_cnt; j++) {
1331 struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1333 raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1334 raw->info.entry.val = index;
1335 raw->info.entry.last = raw->info.src.last;
1336 index += raw->info.entry.last;
1340 /* Currently only support using the byte selection base, which only
1341 * allows for an effective entry size of 30 bytes. Reject anything
1344 if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1345 return ICE_ERR_PARAM;
1347 /* Only 8 range checkers per profile, reject anything trying to use
1350 if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1351 return ICE_ERR_PARAM;
1353 /* Store # bytes required for entry for later use */
1354 params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1360 * ice_flow_proc_segs - process all packet segments associated with a profile
1361 * @hw: pointer to the HW struct
1362 * @params: information about the flow to be processed
1364 static enum ice_status
1365 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1367 enum ice_status status;
1369 status = ice_flow_proc_seg_hdrs(params);
1373 status = ice_flow_create_xtrct_seq(hw, params);
1377 switch (params->blk) {
1380 status = ICE_SUCCESS;
1383 status = ice_flow_acl_def_entry_frmt(params);
1386 status = ice_flow_sel_acl_scen(hw, params);
1391 return ICE_ERR_NOT_IMPL;
1397 #define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001
1398 #define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002
1399 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004
1402 * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1403 * @hw: pointer to the HW struct
1404 * @blk: classification stage
1405 * @dir: flow direction
1406 * @segs: array of one or more packet segments that describe the flow
1407 * @segs_cnt: number of packet segments provided
1408 * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1409 * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1411 static struct ice_flow_prof *
1412 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1413 enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1414 u8 segs_cnt, u16 vsi_handle, u32 conds)
1416 struct ice_flow_prof *p, *prof = NULL;
1418 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1419 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1420 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1421 segs_cnt && segs_cnt == p->segs_cnt) {
1424 /* Check for profile-VSI association if specified */
1425 if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1426 ice_is_vsi_valid(hw, vsi_handle) &&
1427 !ice_is_bit_set(p->vsis, vsi_handle))
1430 /* Protocol headers must be checked. Matched fields are
1431 * checked if specified.
1433 for (i = 0; i < segs_cnt; i++)
1434 if (segs[i].hdrs != p->segs[i].hdrs ||
1435 ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1436 segs[i].match != p->segs[i].match))
1439 /* A match is found if all segments are matched */
1440 if (i == segs_cnt) {
1445 ice_release_lock(&hw->fl_profs_locks[blk]);
1451 * ice_flow_find_prof - Look up a profile matching headers and matched fields
1452 * @hw: pointer to the HW struct
1453 * @blk: classification stage
1454 * @dir: flow direction
1455 * @segs: array of one or more packet segments that describe the flow
1456 * @segs_cnt: number of packet segments provided
1459 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1460 struct ice_flow_seg_info *segs, u8 segs_cnt)
1462 struct ice_flow_prof *p;
1464 p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1465 ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1467 return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1471 * ice_flow_find_prof_id - Look up a profile with given profile ID
1472 * @hw: pointer to the HW struct
1473 * @blk: classification stage
1474 * @prof_id: unique ID to identify this flow profile
1476 static struct ice_flow_prof *
1477 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1479 struct ice_flow_prof *p;
1481 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1482 if (p->id == prof_id)
1489 * ice_dealloc_flow_entry - Deallocate flow entry memory
1490 * @hw: pointer to the HW struct
1491 * @entry: flow entry to be removed
1494 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1500 ice_free(hw, entry->entry);
1502 if (entry->range_buf) {
1503 ice_free(hw, entry->range_buf);
1504 entry->range_buf = NULL;
1508 ice_free(hw, entry->acts);
1510 entry->acts_cnt = 0;
1513 ice_free(hw, entry);
1516 #define ICE_ACL_INVALID_SCEN 0x3f
1519 * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
1520 * @hw: pointer to the hardware structure
1521 * @prof: pointer to flow profile
1522 * @buf: destination buffer function writes partial extraction sequence to
1524 * returns ICE_SUCCESS if no PF is associated to the given profile
1525 * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
1526 * returns other error code for real error
1528 static enum ice_status
1529 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1530 struct ice_aqc_acl_prof_generic_frmt *buf)
1532 enum ice_status status;
1535 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1539 status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1543 /* If all PF's associated scenarios are all 0 or all
1544 * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1545 * not been configured yet.
1547 if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1548 buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1549 buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1550 buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1553 if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1554 buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1555 buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1556 buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1557 buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1558 buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1559 buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1560 buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1563 return ICE_ERR_IN_USE;
1567 * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
1568 * @hw: pointer to the hardware structure
1569 * @acts: array of actions to be performed on a match
1570 * @acts_cnt: number of actions
1572 static enum ice_status
1573 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1578 for (i = 0; i < acts_cnt; i++) {
1579 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1580 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1581 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1582 struct ice_acl_cntrs cntrs;
1583 enum ice_status status;
1585 cntrs.bank = 0; /* Only bank0 for the moment */
1587 LE16_TO_CPU(acts[i].data.acl_act.value);
1589 LE16_TO_CPU(acts[i].data.acl_act.value);
1591 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1592 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1594 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1596 status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1605 * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
1606 * @hw: pointer to the hardware structure
1607 * @prof: pointer to flow profile
1609 * Disassociate the scenario from the profile for the PF of the VSI.
1611 static enum ice_status
1612 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1614 struct ice_aqc_acl_prof_generic_frmt buf;
1615 enum ice_status status = ICE_SUCCESS;
1618 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1620 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1624 status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1628 /* Clear scenario for this PF */
1629 buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1630 status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1636 * ice_flow_rem_entry_sync - Remove a flow entry
1637 * @hw: pointer to the HW struct
1638 * @blk: classification stage
1639 * @entry: flow entry to be removed
1641 static enum ice_status
1642 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1643 struct ice_flow_entry *entry)
1646 return ICE_ERR_BAD_PTR;
1648 if (blk == ICE_BLK_ACL) {
1649 enum ice_status status;
1652 return ICE_ERR_BAD_PTR;
1654 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1655 entry->scen_entry_idx);
1659 /* Checks if we need to release an ACL counter. */
1660 if (entry->acts_cnt && entry->acts)
1661 ice_flow_acl_free_act_cntr(hw, entry->acts,
1665 LIST_DEL(&entry->l_entry);
1667 ice_dealloc_flow_entry(hw, entry);
1673 * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1674 * @hw: pointer to the HW struct
1675 * @blk: classification stage
1676 * @dir: flow direction
1677 * @prof_id: unique ID to identify this flow profile
1678 * @segs: array of one or more packet segments that describe the flow
1679 * @segs_cnt: number of packet segments provided
1680 * @acts: array of default actions
1681 * @acts_cnt: number of default actions
1682 * @prof: stores the returned flow profile added
1684 * Assumption: the caller has acquired the lock to the profile list
1686 static enum ice_status
1687 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1688 enum ice_flow_dir dir, u64 prof_id,
1689 struct ice_flow_seg_info *segs, u8 segs_cnt,
1690 struct ice_flow_action *acts, u8 acts_cnt,
1691 struct ice_flow_prof **prof)
1693 struct ice_flow_prof_params params;
1694 enum ice_status status;
1697 if (!prof || (acts_cnt && !acts))
1698 return ICE_ERR_BAD_PTR;
1700 ice_memset(¶ms, 0, sizeof(params), ICE_NONDMA_MEM);
1701 params.prof = (struct ice_flow_prof *)
1702 ice_malloc(hw, sizeof(*params.prof));
1704 return ICE_ERR_NO_MEMORY;
1706 /* initialize extraction sequence to all invalid (0xff) */
1707 for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1708 params.es[i].prot_id = ICE_PROT_INVALID;
1709 params.es[i].off = ICE_FV_OFFSET_INVAL;
1713 params.prof->id = prof_id;
1714 params.prof->dir = dir;
1715 params.prof->segs_cnt = segs_cnt;
1717 /* Make a copy of the segments that need to be persistent in the flow
1720 for (i = 0; i < segs_cnt; i++)
1721 ice_memcpy(¶ms.prof->segs[i], &segs[i], sizeof(*segs),
1722 ICE_NONDMA_TO_NONDMA);
1724 /* Make a copy of the actions that need to be persistent in the flow
1728 params.prof->acts = (struct ice_flow_action *)
1729 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
1730 ICE_NONDMA_TO_NONDMA);
1732 if (!params.prof->acts) {
1733 status = ICE_ERR_NO_MEMORY;
1738 status = ice_flow_proc_segs(hw, ¶ms);
1740 ice_debug(hw, ICE_DBG_FLOW,
1741 "Error processing a flow's packet segments\n");
1745 /* Add a HW profile for this flow profile */
1746 status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes,
1747 params.attr, params.attr_cnt, params.es,
1750 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
1754 INIT_LIST_HEAD(¶ms.prof->entries);
1755 ice_init_lock(¶ms.prof->entries_lock);
1756 *prof = params.prof;
1760 if (params.prof->acts)
1761 ice_free(hw, params.prof->acts);
1762 ice_free(hw, params.prof);
1769 * ice_flow_rem_prof_sync - remove a flow profile
1770 * @hw: pointer to the hardware structure
1771 * @blk: classification stage
1772 * @prof: pointer to flow profile to remove
1774 * Assumption: the caller has acquired the lock to the profile list
1776 static enum ice_status
1777 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
1778 struct ice_flow_prof *prof)
1780 enum ice_status status;
1782 /* Remove all remaining flow entries before removing the flow profile */
1783 if (!LIST_EMPTY(&prof->entries)) {
1784 struct ice_flow_entry *e, *t;
1786 ice_acquire_lock(&prof->entries_lock);
1788 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
1790 status = ice_flow_rem_entry_sync(hw, blk, e);
1795 ice_release_lock(&prof->entries_lock);
1798 if (blk == ICE_BLK_ACL) {
1799 struct ice_aqc_acl_profile_ranges query_rng_buf;
1800 struct ice_aqc_acl_prof_generic_frmt buf;
1803 /* Disassociate the scenario from the profile for the PF */
1804 status = ice_flow_acl_disassoc_scen(hw, prof);
1808 /* Clear the range-checker if the profile ID is no longer
1811 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1812 if (status && status != ICE_ERR_IN_USE) {
1814 } else if (!status) {
1815 /* Clear the range-checker value for profile ID */
1816 ice_memset(&query_rng_buf, 0,
1817 sizeof(struct ice_aqc_acl_profile_ranges),
1820 status = ice_flow_get_hw_prof(hw, blk, prof->id,
1825 status = ice_prog_acl_prof_ranges(hw, prof_id,
1826 &query_rng_buf, NULL);
1832 /* Remove all hardware profiles associated with this flow profile */
1833 status = ice_rem_prof(hw, blk, prof->id);
1835 LIST_DEL(&prof->l_entry);
1836 ice_destroy_lock(&prof->entries_lock);
1838 ice_free(hw, prof->acts);
1846 * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
1847 * @buf: Destination buffer function writes partial xtrct sequence to
1848 * @info: Info about field
1851 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
1852 struct ice_flow_fld_info *info)
1857 src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
1858 info->xtrct.disp / BITS_PER_BYTE;
1859 dst = info->entry.val;
1860 for (i = 0; i < info->entry.last; i++)
1861 /* HW stores field vector words in LE, convert words back to BE
1862 * so constructed entries will end up in network order
1864 buf->byte_selection[dst++] = src++ ^ 1;
1868 * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
1869 * @hw: pointer to the hardware structure
1870 * @prof: pointer to flow profile
1872 static enum ice_status
1873 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
1875 struct ice_aqc_acl_prof_generic_frmt buf;
1876 struct ice_flow_fld_info *info;
1877 enum ice_status status;
1881 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1883 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1887 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1888 if (status && status != ICE_ERR_IN_USE)
1892 /* Program the profile dependent configuration. This is done
1893 * only once regardless of the number of PFs using that profile
1895 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1897 for (i = 0; i < prof->segs_cnt; i++) {
1898 struct ice_flow_seg_info *seg = &prof->segs[i];
1899 u64 match = seg->match;
1902 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1903 const u64 bit = BIT_ULL(j);
1908 info = &seg->fields[j];
1910 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
1911 buf.word_selection[info->entry.val] =
1914 ice_flow_acl_set_xtrct_seq_fld(&buf,
1920 for (j = 0; j < seg->raws_cnt; j++) {
1921 info = &seg->raws[j].info;
1922 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
1926 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
1927 ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
1931 /* Update the current PF */
1932 buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
1933 status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1939 * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
1940 * @hw: pointer to the hardware structure
1941 * @blk: classification stage
1942 * @vsi_handle: software VSI handle
1943 * @vsig: target VSI group
1945 * Assumption: the caller has already verified that the VSI to
1946 * be added has the same characteristics as the VSIG and will
1947 * thereby have access to all resources added to that VSIG.
1950 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
1953 enum ice_status status;
1955 if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
1956 return ICE_ERR_PARAM;
1958 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1959 status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
1961 ice_release_lock(&hw->fl_profs_locks[blk]);
1967 * ice_flow_assoc_prof - associate a VSI with a flow profile
1968 * @hw: pointer to the hardware structure
1969 * @blk: classification stage
1970 * @prof: pointer to flow profile
1971 * @vsi_handle: software VSI handle
1973 * Assumption: the caller has acquired the lock to the profile list
1974 * and the software VSI handle has been validated
1976 static enum ice_status
1977 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
1978 struct ice_flow_prof *prof, u16 vsi_handle)
1980 enum ice_status status = ICE_SUCCESS;
1982 if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
1983 if (blk == ICE_BLK_ACL) {
1984 status = ice_flow_acl_set_xtrct_seq(hw, prof);
1988 status = ice_add_prof_id_flow(hw, blk,
1989 ice_get_hw_vsi_num(hw,
1993 ice_set_bit(vsi_handle, prof->vsis);
1995 ice_debug(hw, ICE_DBG_FLOW,
1996 "HW profile add failed, %d\n",
2004 * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
2005 * @hw: pointer to the hardware structure
2006 * @blk: classification stage
2007 * @prof: pointer to flow profile
2008 * @vsi_handle: software VSI handle
2010 * Assumption: the caller has acquired the lock to the profile list
2011 * and the software VSI handle has been validated
2013 static enum ice_status
2014 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
2015 struct ice_flow_prof *prof, u16 vsi_handle)
2017 enum ice_status status = ICE_SUCCESS;
2019 if (ice_is_bit_set(prof->vsis, vsi_handle)) {
2020 status = ice_rem_prof_id_flow(hw, blk,
2021 ice_get_hw_vsi_num(hw,
2025 ice_clear_bit(vsi_handle, prof->vsis);
2027 ice_debug(hw, ICE_DBG_FLOW,
2028 "HW profile remove failed, %d\n",
2036 * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2037 * @hw: pointer to the HW struct
2038 * @blk: classification stage
2039 * @dir: flow direction
2040 * @prof_id: unique ID to identify this flow profile
2041 * @segs: array of one or more packet segments that describe the flow
2042 * @segs_cnt: number of packet segments provided
2043 * @acts: array of default actions
2044 * @acts_cnt: number of default actions
2045 * @prof: stores the returned flow profile added
2048 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2049 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2050 struct ice_flow_action *acts, u8 acts_cnt,
2051 struct ice_flow_prof **prof)
2053 enum ice_status status;
2055 if (segs_cnt > ICE_FLOW_SEG_MAX)
2056 return ICE_ERR_MAX_LIMIT;
2059 return ICE_ERR_PARAM;
2062 return ICE_ERR_BAD_PTR;
2064 status = ice_flow_val_hdrs(segs, segs_cnt);
2068 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2070 status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2071 acts, acts_cnt, prof);
2073 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2075 ice_release_lock(&hw->fl_profs_locks[blk]);
2081 * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2082 * @hw: pointer to the HW struct
2083 * @blk: the block for which the flow profile is to be removed
2084 * @prof_id: unique ID of the flow profile to be removed
2087 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2089 struct ice_flow_prof *prof;
2090 enum ice_status status;
2092 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2094 prof = ice_flow_find_prof_id(hw, blk, prof_id);
2096 status = ICE_ERR_DOES_NOT_EXIST;
2100 /* prof becomes invalid after the call */
2101 status = ice_flow_rem_prof_sync(hw, blk, prof);
2104 ice_release_lock(&hw->fl_profs_locks[blk]);
2110 * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
2111 * @hw: pointer to the HW struct
2112 * @blk: classification stage
2113 * @prof_id: the profile ID handle
2114 * @hw_prof_id: pointer to variable to receive the HW profile ID
2117 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2120 struct ice_prof_map *map;
2122 map = ice_search_prof_id(hw, blk, prof_id);
2124 *hw_prof_id = map->prof_id;
2128 return ICE_ERR_DOES_NOT_EXIST;
2132 * ice_flow_find_entry - look for a flow entry using its unique ID
2133 * @hw: pointer to the HW struct
2134 * @blk: classification stage
2135 * @entry_id: unique ID to identify this flow entry
2137 * This function looks for the flow entry with the specified unique ID in all
2138 * flow profiles of the specified classification stage. If the entry is found,
2139 * and it returns the handle to the flow entry. Otherwise, it returns
2140 * ICE_FLOW_ENTRY_ID_INVAL.
2142 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2144 struct ice_flow_entry *found = NULL;
2145 struct ice_flow_prof *p;
2147 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2149 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2150 struct ice_flow_entry *e;
2152 ice_acquire_lock(&p->entries_lock);
2153 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2154 if (e->id == entry_id) {
2158 ice_release_lock(&p->entries_lock);
2164 ice_release_lock(&hw->fl_profs_locks[blk]);
2166 return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2170 * ice_flow_acl_check_actions - Checks the ACL rule's actions
2171 * @hw: pointer to the hardware structure
2172 * @acts: array of actions to be performed on a match
2173 * @acts_cnt: number of actions
2174 * @cnt_alloc: indicates if an ACL counter has been allocated.
2176 static enum ice_status
2177 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2178 u8 acts_cnt, bool *cnt_alloc)
2180 ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2183 ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2186 if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2187 return ICE_ERR_OUT_OF_RANGE;
2189 for (i = 0; i < acts_cnt; i++) {
2190 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2191 acts[i].type != ICE_FLOW_ACT_DROP &&
2192 acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2193 acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2196 /* If the caller want to add two actions of the same type, then
2197 * it is considered invalid configuration.
2199 if (ice_test_and_set_bit(acts[i].type, dup_check))
2200 return ICE_ERR_PARAM;
2203 /* Checks if ACL counters are needed. */
2204 for (i = 0; i < acts_cnt; i++) {
2205 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2206 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2207 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2208 struct ice_acl_cntrs cntrs;
2209 enum ice_status status;
2212 cntrs.bank = 0; /* Only bank0 for the moment */
2214 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2215 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2217 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2219 status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2222 /* Counter index within the bank */
2223 acts[i].data.acl_act.value =
2224 CPU_TO_LE16(cntrs.first_cntr);
2233 * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2234 * @fld: number of the given field
2235 * @info: info about field
2236 * @range_buf: range checker configuration buffer
2237 * @data: pointer to a data buffer containing flow entry's match values/masks
2238 * @range: Input/output param indicating which range checkers are being used
2241 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2242 struct ice_aqc_acl_profile_ranges *range_buf,
2243 u8 *data, u8 *range)
2247 /* If not specified, default mask is all bits in field */
2248 new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2249 BIT(ice_flds_info[fld].size) - 1 :
2250 (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2252 /* If the mask is 0, then we don't need to worry about this input
2253 * range checker value.
2257 (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2259 (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2260 u8 range_idx = info->entry.val;
2262 range_buf->checker_cfg[range_idx].low_boundary =
2263 CPU_TO_BE16(new_low);
2264 range_buf->checker_cfg[range_idx].high_boundary =
2265 CPU_TO_BE16(new_high);
2266 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2268 /* Indicate which range checker is being used */
2269 *range |= BIT(range_idx);
2274 * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2275 * @fld: number of the given field
2276 * @info: info about the field
2277 * @buf: buffer containing the entry
2278 * @dontcare: buffer containing don't care mask for entry
2279 * @data: pointer to a data buffer containing flow entry's match values/masks
2282 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2283 u8 *dontcare, u8 *data)
2285 u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2286 bool use_mask = false;
2289 src = info->src.val;
2290 mask = info->src.mask;
2291 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2292 disp = info->xtrct.disp % BITS_PER_BYTE;
2294 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2297 for (k = 0; k < info->entry.last; k++, dst++) {
2298 /* Add overflow bits from previous byte */
2299 buf[dst] = (tmp_s & 0xff00) >> 8;
2301 /* If mask is not valid, tmp_m is always zero, so just setting
2302 * dontcare to 0 (no masked bits). If mask is valid, pulls in
2303 * overflow bits of mask from prev byte
2305 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2307 /* If there is displacement, last byte will only contain
2308 * displaced data, but there is no more data to read from user
2309 * buffer, so skip so as not to potentially read beyond end of
2312 if (!disp || k < info->entry.last - 1) {
2313 /* Store shifted data to use in next byte */
2314 tmp_s = data[src++] << disp;
2316 /* Add current (shifted) byte */
2317 buf[dst] |= tmp_s & 0xff;
2319 /* Handle mask if valid */
2321 tmp_m = (~data[mask++] & 0xff) << disp;
2322 dontcare[dst] |= tmp_m & 0xff;
2327 /* Fill in don't care bits at beginning of field */
2329 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2330 for (k = 0; k < disp; k++)
2331 dontcare[dst] |= BIT(k);
2334 end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2336 /* Fill in don't care bits at end of field */
2338 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2339 info->entry.last - 1;
2340 for (k = end_disp; k < BITS_PER_BYTE; k++)
2341 dontcare[dst] |= BIT(k);
2346 * ice_flow_acl_frmt_entry - Format ACL entry
2347 * @hw: pointer to the hardware structure
2348 * @prof: pointer to flow profile
2349 * @e: pointer to the flow entry
2350 * @data: pointer to a data buffer containing flow entry's match values/masks
2351 * @acts: array of actions to be performed on a match
2352 * @acts_cnt: number of actions
2354 * Formats the key (and key_inverse) to be matched from the data passed in,
2355 * along with data from the flow profile. This key/key_inverse pair makes up
2356 * the 'entry' for an ACL flow entry.
2358 static enum ice_status
2359 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2360 struct ice_flow_entry *e, u8 *data,
2361 struct ice_flow_action *acts, u8 acts_cnt)
2363 u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2364 struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2365 enum ice_status status;
2370 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2374 /* Format the result action */
2376 status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2380 status = ICE_ERR_NO_MEMORY;
2382 e->acts = (struct ice_flow_action *)
2383 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2384 ICE_NONDMA_TO_NONDMA);
2389 e->acts_cnt = acts_cnt;
2391 /* Format the matching data */
2392 buf_sz = prof->cfg.scen->width;
2393 buf = (u8 *)ice_malloc(hw, buf_sz);
2397 dontcare = (u8 *)ice_malloc(hw, buf_sz);
2401 /* 'key' buffer will store both key and key_inverse, so must be twice
2404 key = (u8 *)ice_malloc(hw, buf_sz * 2);
2408 range_buf = (struct ice_aqc_acl_profile_ranges *)
2409 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2413 /* Set don't care mask to all 1's to start, will zero out used bytes */
2414 ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2416 for (i = 0; i < prof->segs_cnt; i++) {
2417 struct ice_flow_seg_info *seg = &prof->segs[i];
2418 u64 match = seg->match;
2421 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
2422 struct ice_flow_fld_info *info;
2423 const u64 bit = BIT_ULL(j);
2428 info = &seg->fields[j];
2430 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2431 ice_flow_acl_frmt_entry_range(j, info,
2435 ice_flow_acl_frmt_entry_fld(j, info, buf,
2441 for (j = 0; j < seg->raws_cnt; j++) {
2442 struct ice_flow_fld_info *info = &seg->raws[j].info;
2443 u16 dst, src, mask, k;
2444 bool use_mask = false;
2446 src = info->src.val;
2447 dst = info->entry.val -
2448 ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2449 mask = info->src.mask;
2451 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2454 for (k = 0; k < info->entry.last; k++, dst++) {
2455 buf[dst] = data[src++];
2457 dontcare[dst] = ~data[mask++];
2464 buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2465 dontcare[prof->cfg.scen->pid_idx] = 0;
2467 /* Format the buffer for direction flags */
2468 dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2470 if (prof->dir == ICE_FLOW_RX)
2471 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2474 buf[prof->cfg.scen->rng_chk_idx] = range;
2475 /* Mark any unused range checkers as don't care */
2476 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2477 e->range_buf = range_buf;
2479 ice_free(hw, range_buf);
2482 status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2488 e->entry_sz = buf_sz * 2;
2495 ice_free(hw, dontcare);
2500 if (status && range_buf) {
2501 ice_free(hw, range_buf);
2502 e->range_buf = NULL;
2505 if (status && e->acts) {
2506 ice_free(hw, e->acts);
2511 if (status && cnt_alloc)
2512 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2518 * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2519 * the compared data.
2520 * @prof: pointer to flow profile
2521 * @e: pointer to the comparing flow entry
2522 * @do_chg_action: decide if we want to change the ACL action
2523 * @do_add_entry: decide if we want to add the new ACL entry
2524 * @do_rem_entry: decide if we want to remove the current ACL entry
2526 * Find an ACL scenario entry that matches the compared data. In the same time,
2527 * this function also figure out:
2528 * a/ If we want to change the ACL action
2529 * b/ If we want to add the new ACL entry
2530 * c/ If we want to remove the current ACL entry
2532 static struct ice_flow_entry *
2533 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2534 struct ice_flow_entry *e, bool *do_chg_action,
2535 bool *do_add_entry, bool *do_rem_entry)
2537 struct ice_flow_entry *p, *return_entry = NULL;
2541 * a/ There exists an entry with same matching data, but different
2542 * priority, then we remove this existing ACL entry. Then, we
2543 * will add the new entry to the ACL scenario.
2544 * b/ There exists an entry with same matching data, priority, and
2545 * result action, then we do nothing
2546 * c/ There exists an entry with same matching data, priority, but
2547 * different, action, then do only change the action's entry.
2548 * d/ Else, we add this new entry to the ACL scenario.
2550 *do_chg_action = false;
2551 *do_add_entry = true;
2552 *do_rem_entry = false;
2553 LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2554 if (memcmp(p->entry, e->entry, p->entry_sz))
2557 /* From this point, we have the same matching_data. */
2558 *do_add_entry = false;
2561 if (p->priority != e->priority) {
2562 /* matching data && !priority */
2563 *do_add_entry = true;
2564 *do_rem_entry = true;
2568 /* From this point, we will have matching_data && priority */
2569 if (p->acts_cnt != e->acts_cnt)
2570 *do_chg_action = true;
2571 for (i = 0; i < p->acts_cnt; i++) {
2572 bool found_not_match = false;
2574 for (j = 0; j < e->acts_cnt; j++)
2575 if (memcmp(&p->acts[i], &e->acts[j],
2576 sizeof(struct ice_flow_action))) {
2577 found_not_match = true;
2581 if (found_not_match) {
2582 *do_chg_action = true;
2587 /* (do_chg_action = true) means :
2588 * matching_data && priority && !result_action
2589 * (do_chg_action = false) means :
2590 * matching_data && priority && result_action
2595 return return_entry;
2599 * ice_flow_acl_convert_to_acl_prior - Convert to ACL priority
2602 static enum ice_acl_entry_prior
2603 ice_flow_acl_convert_to_acl_prior(enum ice_flow_priority p)
2605 enum ice_acl_entry_prior acl_prior;
2608 case ICE_FLOW_PRIO_LOW:
2609 acl_prior = ICE_LOW;
2611 case ICE_FLOW_PRIO_NORMAL:
2612 acl_prior = ICE_NORMAL;
2614 case ICE_FLOW_PRIO_HIGH:
2615 acl_prior = ICE_HIGH;
2618 acl_prior = ICE_NORMAL;
2626 * ice_flow_acl_union_rng_chk - Perform union operation between two
2627 * range-range checker buffers
2628 * @dst_buf: pointer to destination range checker buffer
2629 * @src_buf: pointer to source range checker buffer
2631 * For this function, we do the union between dst_buf and src_buf
2632 * range checker buffer, and we will save the result back to dst_buf
2634 static enum ice_status
2635 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2636 struct ice_aqc_acl_profile_ranges *src_buf)
2640 if (!dst_buf || !src_buf)
2641 return ICE_ERR_BAD_PTR;
2643 for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2644 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2645 bool will_populate = false;
2647 in_data = &src_buf->checker_cfg[i];
2652 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2653 cfg_data = &dst_buf->checker_cfg[j];
2655 if (!cfg_data->mask ||
2656 !memcmp(cfg_data, in_data,
2657 sizeof(struct ice_acl_rng_data))) {
2658 will_populate = true;
2663 if (will_populate) {
2664 ice_memcpy(cfg_data, in_data,
2665 sizeof(struct ice_acl_rng_data),
2666 ICE_NONDMA_TO_NONDMA);
2668 /* No available slot left to program range checker */
2669 return ICE_ERR_MAX_LIMIT;
2677 * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2678 * @hw: pointer to the hardware structure
2679 * @prof: pointer to flow profile
2680 * @entry: double pointer to the flow entry
2682 * For this function, we will look at the current added entries in the
2683 * corresponding ACL scenario. Then, we will perform matching logic to
2684 * see if we want to add/modify/do nothing with this new entry.
2686 static enum ice_status
2687 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2688 struct ice_flow_entry **entry)
2690 bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2691 struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2692 struct ice_acl_act_entry *acts = NULL;
2693 struct ice_flow_entry *exist;
2694 enum ice_status status = ICE_SUCCESS;
2695 struct ice_flow_entry *e;
2698 if (!entry || !(*entry) || !prof)
2699 return ICE_ERR_BAD_PTR;
2703 do_chg_rng_chk = false;
2707 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2712 /* Query the current range-checker value in FW */
2713 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2717 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2718 sizeof(struct ice_aqc_acl_profile_ranges),
2719 ICE_NONDMA_TO_NONDMA);
2721 /* Generate the new range-checker value */
2722 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2726 /* Reconfigure the range check if the buffer is changed. */
2727 do_chg_rng_chk = false;
2728 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2729 sizeof(struct ice_aqc_acl_profile_ranges))) {
2730 status = ice_prog_acl_prof_ranges(hw, prof_id,
2731 &cfg_rng_buf, NULL);
2735 do_chg_rng_chk = true;
2739 /* Figure out if we want to (change the ACL action) and/or
2740 * (Add the new ACL entry) and/or (Remove the current ACL entry)
2742 exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
2743 &do_add_entry, &do_rem_entry);
2746 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
2751 /* Prepare the result action buffer */
2752 acts = (struct ice_acl_act_entry *)ice_calloc
2753 (hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
2754 for (i = 0; i < e->acts_cnt; i++)
2755 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
2756 sizeof(struct ice_acl_act_entry),
2757 ICE_NONDMA_TO_NONDMA);
2760 enum ice_acl_entry_prior prior;
2764 keys = (u8 *)e->entry;
2765 inverts = keys + (e->entry_sz / 2);
2766 prior = ice_flow_acl_convert_to_acl_prior(e->priority);
2768 status = ice_acl_add_entry(hw, prof->cfg.scen, prior, keys,
2769 inverts, acts, e->acts_cnt,
2774 e->scen_entry_idx = entry_idx;
2775 LIST_ADD(&e->l_entry, &prof->entries);
2777 if (do_chg_action) {
2778 /* For the action memory info, update the SW's copy of
2779 * exist entry with e's action memory info
2781 ice_free(hw, exist->acts);
2782 exist->acts_cnt = e->acts_cnt;
2783 exist->acts = (struct ice_flow_action *)
2784 ice_calloc(hw, exist->acts_cnt,
2785 sizeof(struct ice_flow_action));
2788 status = ICE_ERR_NO_MEMORY;
2792 ice_memcpy(exist->acts, e->acts,
2793 sizeof(struct ice_flow_action) * e->acts_cnt,
2794 ICE_NONDMA_TO_NONDMA);
2796 status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
2798 exist->scen_entry_idx);
2803 if (do_chg_rng_chk) {
2804 /* In this case, we want to update the range checker
2805 * information of the exist entry
2807 status = ice_flow_acl_union_rng_chk(exist->range_buf,
2813 /* As we don't add the new entry to our SW DB, deallocate its
2814 * memories, and return the exist entry to the caller
2816 ice_dealloc_flow_entry(hw, e);
2827 * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
2828 * @hw: pointer to the hardware structure
2829 * @prof: pointer to flow profile
2830 * @e: double pointer to the flow entry
2832 static enum ice_status
2833 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2834 struct ice_flow_entry **e)
2836 enum ice_status status;
2838 ice_acquire_lock(&prof->entries_lock);
2839 status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
2840 ice_release_lock(&prof->entries_lock);
2846 * ice_flow_add_entry - Add a flow entry
2847 * @hw: pointer to the HW struct
2848 * @blk: classification stage
2849 * @prof_id: ID of the profile to add a new flow entry to
2850 * @entry_id: unique ID to identify this flow entry
2851 * @vsi_handle: software VSI handle for the flow entry
2852 * @prio: priority of the flow entry
2853 * @data: pointer to a data buffer containing flow entry's match values/masks
2854 * @acts: arrays of actions to be performed on a match
2855 * @acts_cnt: number of actions
2856 * @entry_h: pointer to buffer that receives the new flow entry's handle
2859 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2860 u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
2861 void *data, struct ice_flow_action *acts, u8 acts_cnt,
2864 struct ice_flow_entry *e = NULL;
2865 struct ice_flow_prof *prof;
2866 enum ice_status status = ICE_SUCCESS;
2868 /* ACL entries must indicate an action */
2869 if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
2870 return ICE_ERR_PARAM;
2872 /* No flow entry data is expected for RSS */
2873 if (!entry_h || (!data && blk != ICE_BLK_RSS))
2874 return ICE_ERR_BAD_PTR;
2876 if (!ice_is_vsi_valid(hw, vsi_handle))
2877 return ICE_ERR_PARAM;
2879 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2881 prof = ice_flow_find_prof_id(hw, blk, prof_id);
2883 status = ICE_ERR_DOES_NOT_EXIST;
2885 /* Allocate memory for the entry being added and associate
2886 * the VSI to the found flow profile
2888 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
2890 status = ICE_ERR_NO_MEMORY;
2892 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
2895 ice_release_lock(&hw->fl_profs_locks[blk]);
2900 e->vsi_handle = vsi_handle;
2909 /* ACL will handle the entry management */
2910 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
2915 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
2921 status = ICE_ERR_NOT_IMPL;
2925 if (blk != ICE_BLK_ACL) {
2926 /* ACL will handle the entry management */
2927 ice_acquire_lock(&prof->entries_lock);
2928 LIST_ADD(&e->l_entry, &prof->entries);
2929 ice_release_lock(&prof->entries_lock);
2932 *entry_h = ICE_FLOW_ENTRY_HNDL(e);
2937 ice_free(hw, e->entry);
2945 * ice_flow_rem_entry - Remove a flow entry
2946 * @hw: pointer to the HW struct
2947 * @blk: classification stage
2948 * @entry_h: handle to the flow entry to be removed
2950 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
2953 struct ice_flow_entry *entry;
2954 struct ice_flow_prof *prof;
2955 enum ice_status status = ICE_SUCCESS;
2957 if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
2958 return ICE_ERR_PARAM;
2960 entry = ICE_FLOW_ENTRY_PTR((unsigned long)entry_h);
2962 /* Retain the pointer to the flow profile as the entry will be freed */
2966 ice_acquire_lock(&prof->entries_lock);
2967 status = ice_flow_rem_entry_sync(hw, blk, entry);
2968 ice_release_lock(&prof->entries_lock);
2975 * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
2976 * @seg: packet segment the field being set belongs to
2977 * @fld: field to be set
2978 * @field_type: type of the field
2979 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
2980 * entry's input buffer
2981 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
2983 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
2984 * entry's input buffer
2986 * This helper function stores information of a field being matched, including
2987 * the type of the field and the locations of the value to match, the mask, and
2988 * and the upper-bound value in the start of the input buffer for a flow entry.
2989 * This function should only be used for fixed-size data structures.
2991 * This function also opportunistically determines the protocol headers to be
2992 * present based on the fields being set. Some fields cannot be used alone to
2993 * determine the protocol headers present. Sometimes, fields for particular
2994 * protocol headers are not matched. In those cases, the protocol headers
2995 * must be explicitly set.
2998 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
2999 enum ice_flow_fld_match_type field_type, u16 val_loc,
3000 u16 mask_loc, u16 last_loc)
3002 u64 bit = BIT_ULL(fld);
3005 if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
3008 seg->fields[fld].type = field_type;
3009 seg->fields[fld].src.val = val_loc;
3010 seg->fields[fld].src.mask = mask_loc;
3011 seg->fields[fld].src.last = last_loc;
3013 ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
3017 * ice_flow_set_fld - specifies locations of field from entry's input buffer
3018 * @seg: packet segment the field being set belongs to
3019 * @fld: field to be set
3020 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3021 * entry's input buffer
3022 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3024 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3025 * entry's input buffer
3026 * @range: indicate if field being matched is to be in a range
3028 * This function specifies the locations, in the form of byte offsets from the
3029 * start of the input buffer for a flow entry, from where the value to match,
3030 * the mask value, and upper value can be extracted. These locations are then
3031 * stored in the flow profile. When adding a flow entry associated with the
3032 * flow profile, these locations will be used to quickly extract the values and
3033 * create the content of a match entry. This function should only be used for
3034 * fixed-size data structures.
3037 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3038 u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3040 enum ice_flow_fld_match_type t = range ?
3041 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3043 ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3047 * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3048 * @seg: packet segment the field being set belongs to
3049 * @fld: field to be set
3050 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3051 * entry's input buffer
3052 * @pref_loc: location of prefix value from entry's input buffer
3053 * @pref_sz: size of the location holding the prefix value
3055 * This function specifies the locations, in the form of byte offsets from the
3056 * start of the input buffer for a flow entry, from where the value to match
3057 * and the IPv4 prefix value can be extracted. These locations are then stored
3058 * in the flow profile. When adding flow entries to the associated flow profile,
3059 * these locations can be used to quickly extract the values to create the
3060 * content of a match entry. This function should only be used for fixed-size
3064 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3065 u16 val_loc, u16 pref_loc, u8 pref_sz)
3067 /* For this type of field, the "mask" location is for the prefix value's
3068 * location and the "last" location is for the size of the location of
3071 ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3072 pref_loc, (u16)pref_sz);
3076 * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3077 * @seg: packet segment the field being set belongs to
3078 * @off: offset of the raw field from the beginning of the segment in bytes
3079 * @len: length of the raw pattern to be matched
3080 * @val_loc: location of the value to match from entry's input buffer
3081 * @mask_loc: location of mask value from entry's input buffer
3083 * This function specifies the offset of the raw field to be match from the
3084 * beginning of the specified packet segment, and the locations, in the form of
3085 * byte offsets from the start of the input buffer for a flow entry, from where
3086 * the value to match and the mask value to be extracted. These locations are
3087 * then stored in the flow profile. When adding flow entries to the associated
3088 * flow profile, these locations can be used to quickly extract the values to
3089 * create the content of a match entry. This function should only be used for
3090 * fixed-size data structures.
3093 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3094 u16 val_loc, u16 mask_loc)
3096 if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3097 seg->raws[seg->raws_cnt].off = off;
3098 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3099 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3100 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3101 /* The "last" field is used to store the length of the field */
3102 seg->raws[seg->raws_cnt].info.src.last = len;
3105 /* Overflows of "raws" will be handled as an error condition later in
3106 * the flow when this information is processed.
3111 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3112 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
3114 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3115 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3117 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3118 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
3119 ICE_FLOW_SEG_HDR_SCTP)
3121 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3122 (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3123 ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3124 ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3127 * ice_flow_set_rss_seg_info - setup packet segments for RSS
3128 * @segs: pointer to the flow field segment(s)
3129 * @hash_fields: fields to be hashed on for the segment(s)
3130 * @flow_hdr: protocol header fields within a packet segment
3132 * Helper function to extract fields from hash bitmap and use flow
3133 * header value to set flow field segment for further use in flow
3134 * profile entry or removal.
3136 static enum ice_status
3137 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
3140 u64 val = hash_fields;
3143 for (i = 0; val && i < ICE_FLOW_FIELD_IDX_MAX; i++) {
3144 u64 bit = BIT_ULL(i);
3147 ice_flow_set_fld(segs, (enum ice_flow_field)i,
3148 ICE_FLOW_FLD_OFF_INVAL,
3149 ICE_FLOW_FLD_OFF_INVAL,
3150 ICE_FLOW_FLD_OFF_INVAL, false);
3154 ICE_FLOW_SET_HDRS(segs, flow_hdr);
3156 if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3157 ~ICE_FLOW_RSS_HDRS_INNER_MASK)
3158 return ICE_ERR_PARAM;
3160 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3161 if (val && !ice_is_pow2(val))
3164 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3165 if (val && !ice_is_pow2(val))
3172 * ice_rem_vsi_rss_list - remove VSI from RSS list
3173 * @hw: pointer to the hardware structure
3174 * @vsi_handle: software VSI handle
3176 * Remove the VSI from all RSS configurations in the list.
3178 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3180 struct ice_rss_cfg *r, *tmp;
3182 if (LIST_EMPTY(&hw->rss_list_head))
3185 ice_acquire_lock(&hw->rss_locks);
3186 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3187 ice_rss_cfg, l_entry)
3188 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3189 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3190 LIST_DEL(&r->l_entry);
3193 ice_release_lock(&hw->rss_locks);
3197 * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3198 * @hw: pointer to the hardware structure
3199 * @vsi_handle: software VSI handle
3201 * This function will iterate through all flow profiles and disassociate
3202 * the VSI from that profile. If the flow profile has no VSIs it will
3205 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3207 const enum ice_block blk = ICE_BLK_RSS;
3208 struct ice_flow_prof *p, *t;
3209 enum ice_status status = ICE_SUCCESS;
3211 if (!ice_is_vsi_valid(hw, vsi_handle))
3212 return ICE_ERR_PARAM;
3214 if (LIST_EMPTY(&hw->fl_profs[blk]))
3217 ice_acquire_lock(&hw->fl_profs_locks[blk]);
3218 LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3220 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3221 status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3225 if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3226 status = ice_flow_rem_prof_sync(hw, blk, p);
3231 ice_release_lock(&hw->fl_profs_locks[blk]);
3237 * ice_rem_rss_list - remove RSS configuration from list
3238 * @hw: pointer to the hardware structure
3239 * @vsi_handle: software VSI handle
3240 * @prof: pointer to flow profile
3242 * Assumption: lock has already been acquired for RSS list
3245 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3247 struct ice_rss_cfg *r, *tmp;
3249 /* Search for RSS hash fields associated to the VSI that match the
3250 * hash configurations associated to the flow profile. If found
3251 * remove from the RSS entry list of the VSI context and delete entry.
3253 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3254 ice_rss_cfg, l_entry)
3255 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3256 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3257 ice_clear_bit(vsi_handle, r->vsis);
3258 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3259 LIST_DEL(&r->l_entry);
3267 * ice_add_rss_list - add RSS configuration to list
3268 * @hw: pointer to the hardware structure
3269 * @vsi_handle: software VSI handle
3270 * @prof: pointer to flow profile
3272 * Assumption: lock has already been acquired for RSS list
3274 static enum ice_status
3275 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3277 struct ice_rss_cfg *r, *rss_cfg;
3279 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3280 ice_rss_cfg, l_entry)
3281 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3282 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3283 ice_set_bit(vsi_handle, r->vsis);
3287 rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3289 return ICE_ERR_NO_MEMORY;
3291 rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
3292 rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
3293 rss_cfg->symm = prof->cfg.symm;
3294 ice_set_bit(vsi_handle, rss_cfg->vsis);
3296 LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3301 #define ICE_FLOW_PROF_HASH_S 0
3302 #define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3303 #define ICE_FLOW_PROF_HDR_S 32
3304 #define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3305 #define ICE_FLOW_PROF_ENCAP_S 63
3306 #define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
3308 #define ICE_RSS_OUTER_HEADERS 1
3309 #define ICE_RSS_INNER_HEADERS 2
3311 /* Flow profile ID format:
3312 * [0:31] - Packet match fields
3313 * [32:62] - Protocol header
3314 * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
3316 #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
3317 (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3318 (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3319 ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
3322 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3324 u32 s = ((src % 4) << 3); /* byte shift */
3325 u32 v = dst | 0x80; /* value to program */
3326 u8 i = src / 4; /* register index */
3329 reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3330 reg = (reg & ~(0xff << s)) | (v << s);
3331 wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3335 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3338 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3341 for (i = 0; i < len; i++) {
3342 ice_rss_config_xor_word(hw, prof_id,
3343 /* Yes, field vector in GLQF_HSYMM and
3344 * GLQF_HINSET is inversed!
3346 fv_last_word - (src + i),
3347 fv_last_word - (dst + i));
3348 ice_rss_config_xor_word(hw, prof_id,
3349 fv_last_word - (dst + i),
3350 fv_last_word - (src + i));
3355 ice_rss_update_symm(struct ice_hw *hw,
3356 struct ice_flow_prof *prof)
3358 struct ice_prof_map *map;
3361 map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3362 prof_id = map->prof_id;
3364 /* clear to default */
3365 for (m = 0; m < 6; m++)
3366 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3367 if (prof->cfg.symm) {
3368 struct ice_flow_seg_info *seg =
3369 &prof->segs[prof->segs_cnt - 1];
3371 struct ice_flow_seg_xtrct *ipv4_src =
3372 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3373 struct ice_flow_seg_xtrct *ipv4_dst =
3374 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3375 struct ice_flow_seg_xtrct *ipv6_src =
3376 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3377 struct ice_flow_seg_xtrct *ipv6_dst =
3378 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3380 struct ice_flow_seg_xtrct *tcp_src =
3381 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3382 struct ice_flow_seg_xtrct *tcp_dst =
3383 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3385 struct ice_flow_seg_xtrct *udp_src =
3386 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3387 struct ice_flow_seg_xtrct *udp_dst =
3388 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3390 struct ice_flow_seg_xtrct *sctp_src =
3391 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3392 struct ice_flow_seg_xtrct *sctp_dst =
3393 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3396 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3397 ice_rss_config_xor(hw, prof_id,
3398 ipv4_src->idx, ipv4_dst->idx, 2);
3401 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3402 ice_rss_config_xor(hw, prof_id,
3403 ipv6_src->idx, ipv6_dst->idx, 8);
3406 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3407 ice_rss_config_xor(hw, prof_id,
3408 tcp_src->idx, tcp_dst->idx, 1);
3411 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3412 ice_rss_config_xor(hw, prof_id,
3413 udp_src->idx, udp_dst->idx, 1);
3416 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3417 ice_rss_config_xor(hw, prof_id,
3418 sctp_src->idx, sctp_dst->idx, 1);
3423 * ice_add_rss_cfg_sync - add an RSS configuration
3424 * @hw: pointer to the hardware structure
3425 * @vsi_handle: software VSI handle
3426 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3427 * @addl_hdrs: protocol header fields
3428 * @segs_cnt: packet segment count
3429 * @symm: symmetric hash enable/disable
3431 * Assumption: lock has already been acquired for RSS list
3433 static enum ice_status
3434 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3435 u32 addl_hdrs, u8 segs_cnt, bool symm)
3437 const enum ice_block blk = ICE_BLK_RSS;
3438 struct ice_flow_prof *prof = NULL;
3439 struct ice_flow_seg_info *segs;
3440 enum ice_status status;
3442 if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
3443 return ICE_ERR_PARAM;
3445 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3448 return ICE_ERR_NO_MEMORY;
3450 /* Construct the packet segment info from the hashed fields */
3451 status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3456 /* Search for a flow profile that has matching headers, hash fields
3457 * and has the input VSI associated to it. If found, no further
3458 * operations required and exit.
3460 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3462 ICE_FLOW_FIND_PROF_CHK_FLDS |
3463 ICE_FLOW_FIND_PROF_CHK_VSI);
3465 if (prof->cfg.symm == symm)
3467 prof->cfg.symm = symm;
3471 /* Check if a flow profile exists with the same protocol headers and
3472 * associated with the input VSI. If so disassociate the VSI from
3473 * this profile. The VSI will be added to a new profile created with
3474 * the protocol header and new hash field configuration.
3476 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3477 vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3479 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3481 ice_rem_rss_list(hw, vsi_handle, prof);
3485 /* Remove profile if it has no VSIs associated */
3486 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3487 status = ice_flow_rem_prof(hw, blk, prof->id);
3493 /* Search for a profile that has same match fields only. If this
3494 * exists then associate the VSI to this profile.
3496 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3498 ICE_FLOW_FIND_PROF_CHK_FLDS);
3500 if (prof->cfg.symm == symm) {
3501 status = ice_flow_assoc_prof(hw, blk, prof,
3504 status = ice_add_rss_list(hw, vsi_handle,
3507 /* if a profile exist but with different symmetric
3508 * requirement, just return error.
3510 status = ICE_ERR_NOT_SUPPORTED;
3515 /* Create a new flow profile with generated profile and packet
3516 * segment information.
3518 status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3519 ICE_FLOW_GEN_PROFID(hashed_flds,
3520 segs[segs_cnt - 1].hdrs,
3522 segs, segs_cnt, NULL, 0, &prof);
3526 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3527 /* If association to a new flow profile failed then this profile can
3531 ice_flow_rem_prof(hw, blk, prof->id);
3535 status = ice_add_rss_list(hw, vsi_handle, prof);
3537 prof->cfg.symm = symm;
3540 ice_rss_update_symm(hw, prof);
3548 * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3549 * @hw: pointer to the hardware structure
3550 * @vsi_handle: software VSI handle
3551 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3552 * @addl_hdrs: protocol header fields
3553 * @symm: symmetric hash enable/disable
3555 * This function will generate a flow profile based on fields associated with
3556 * the input fields to hash on, the flow type and use the VSI number to add
3557 * a flow entry to the profile.
3560 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3561 u32 addl_hdrs, bool symm)
3563 enum ice_status status;
3565 if (hashed_flds == ICE_HASH_INVALID ||
3566 !ice_is_vsi_valid(hw, vsi_handle))
3567 return ICE_ERR_PARAM;
3569 ice_acquire_lock(&hw->rss_locks);
3570 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3571 ICE_RSS_OUTER_HEADERS, symm);
3573 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3574 addl_hdrs, ICE_RSS_INNER_HEADERS,
3576 ice_release_lock(&hw->rss_locks);
3582 * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3583 * @hw: pointer to the hardware structure
3584 * @vsi_handle: software VSI handle
3585 * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3586 * @addl_hdrs: Protocol header fields within a packet segment
3587 * @segs_cnt: packet segment count
3589 * Assumption: lock has already been acquired for RSS list
3591 static enum ice_status
3592 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3593 u32 addl_hdrs, u8 segs_cnt)
3595 const enum ice_block blk = ICE_BLK_RSS;
3596 struct ice_flow_seg_info *segs;
3597 struct ice_flow_prof *prof;
3598 enum ice_status status;
3600 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3603 return ICE_ERR_NO_MEMORY;
3605 /* Construct the packet segment info from the hashed fields */
3606 status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3611 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3613 ICE_FLOW_FIND_PROF_CHK_FLDS);
3615 status = ICE_ERR_DOES_NOT_EXIST;
3619 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3623 /* Remove RSS configuration from VSI context before deleting
3626 ice_rem_rss_list(hw, vsi_handle, prof);
3628 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
3629 status = ice_flow_rem_prof(hw, blk, prof->id);
3637 * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
3638 * @hw: pointer to the hardware structure
3639 * @vsi_handle: software VSI handle
3640 * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3641 * @addl_hdrs: Protocol header fields within a packet segment
3643 * This function will lookup the flow profile based on the input
3644 * hash field bitmap, iterate through the profile entry list of
3645 * that profile and find entry associated with input VSI to be
3646 * removed. Calls are made to underlying flow apis which will in
3647 * turn build or update buffers for RSS XLT1 section.
3650 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3653 enum ice_status status;
3655 if (hashed_flds == ICE_HASH_INVALID ||
3656 !ice_is_vsi_valid(hw, vsi_handle))
3657 return ICE_ERR_PARAM;
3659 ice_acquire_lock(&hw->rss_locks);
3660 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3661 ICE_RSS_OUTER_HEADERS);
3663 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3664 addl_hdrs, ICE_RSS_INNER_HEADERS);
3665 ice_release_lock(&hw->rss_locks);
3671 * ice_replay_rss_cfg - replay RSS configurations associated with VSI
3672 * @hw: pointer to the hardware structure
3673 * @vsi_handle: software VSI handle
3675 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3677 enum ice_status status = ICE_SUCCESS;
3678 struct ice_rss_cfg *r;
3680 if (!ice_is_vsi_valid(hw, vsi_handle))
3681 return ICE_ERR_PARAM;
3683 ice_acquire_lock(&hw->rss_locks);
3684 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3685 ice_rss_cfg, l_entry) {
3686 if (ice_is_bit_set(r->vsis, vsi_handle)) {
3687 status = ice_add_rss_cfg_sync(hw, vsi_handle,
3690 ICE_RSS_OUTER_HEADERS,
3694 status = ice_add_rss_cfg_sync(hw, vsi_handle,
3697 ICE_RSS_INNER_HEADERS,
3703 ice_release_lock(&hw->rss_locks);
3709 * ice_get_rss_cfg - returns hashed fields for the given header types
3710 * @hw: pointer to the hardware structure
3711 * @vsi_handle: software VSI handle
3712 * @hdrs: protocol header type
3714 * This function will return the match fields of the first instance of flow
3715 * profile having the given header types and containing input VSI
3717 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
3719 struct ice_rss_cfg *r, *rss_cfg = NULL;
3721 /* verify if the protocol header is non zero and VSI is valid */
3722 if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
3723 return ICE_HASH_INVALID;
3725 ice_acquire_lock(&hw->rss_locks);
3726 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3727 ice_rss_cfg, l_entry)
3728 if (ice_is_bit_set(r->vsis, vsi_handle) &&
3729 r->packet_hdr == hdrs) {
3733 ice_release_lock(&hw->rss_locks);
3735 return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID;